Merge branch 'main' into main

This commit is contained in:
GatewayJ 2026-04-18 19:43:37 +08:00 committed by GitHub
commit 03e2ca1efe
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
759 changed files with 114681 additions and 11073 deletions

View file

@ -1,17 +1,9 @@
"""Tests for API-key provider support (z.ai/GLM, Kimi, MiniMax, AI Gateway)."""
import os
import sys
import types
import pytest
# Ensure dotenv doesn't interfere
if "dotenv" not in sys.modules:
fake_dotenv = types.ModuleType("dotenv")
fake_dotenv.load_dotenv = lambda *args, **kwargs: None
sys.modules["dotenv"] = fake_dotenv
from hermes_cli.auth import (
PROVIDER_REGISTRY,
ProviderConfig,
@ -41,6 +33,7 @@ class TestProviderRegistry:
("huggingface", "Hugging Face", "api_key"),
("zai", "Z.AI / GLM", "api_key"),
("xai", "xAI", "api_key"),
("nvidia", "NVIDIA NIM", "api_key"),
("kimi-coding", "Kimi / Moonshot", "api_key"),
("minimax", "MiniMax", "api_key"),
("minimax-cn", "MiniMax (China)", "api_key"),
@ -65,6 +58,12 @@ class TestProviderRegistry:
assert pconfig.base_url_env_var == "XAI_BASE_URL"
assert pconfig.inference_base_url == "https://api.x.ai/v1"
def test_nvidia_env_vars(self):
pconfig = PROVIDER_REGISTRY["nvidia"]
assert pconfig.api_key_env_vars == ("NVIDIA_API_KEY",)
assert pconfig.base_url_env_var == "NVIDIA_BASE_URL"
assert pconfig.inference_base_url == "https://integrate.api.nvidia.com/v1"
def test_copilot_env_vars(self):
pconfig = PROVIDER_REGISTRY["copilot"]
assert pconfig.api_key_env_vars == ("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN")

View file

@ -1,15 +1,9 @@
"""Tests for Arcee AI provider support — standard direct API provider."""
import sys
import types
import pytest
if "dotenv" not in sys.modules:
fake_dotenv = types.ModuleType("dotenv")
fake_dotenv.load_dotenv = lambda *args, **kwargs: None
sys.modules["dotenv"] = fake_dotenv
from hermes_cli.auth import (
PROVIDER_REGISTRY,
resolve_provider,

View file

@ -57,85 +57,6 @@ def _build_parser():
return parser
class TestFlagBeforeSubcommand:
"""Flags placed before 'chat' must propagate through."""
def test_yolo_before_chat(self):
parser = _build_parser()
args = parser.parse_args(["--yolo", "chat"])
assert getattr(args, "yolo", False) is True
def test_worktree_before_chat(self):
parser = _build_parser()
args = parser.parse_args(["-w", "chat"])
assert getattr(args, "worktree", False) is True
def test_skills_before_chat(self):
parser = _build_parser()
args = parser.parse_args(["-s", "myskill", "chat"])
assert getattr(args, "skills", None) == ["myskill"]
def test_pass_session_id_before_chat(self):
parser = _build_parser()
args = parser.parse_args(["--pass-session-id", "chat"])
assert getattr(args, "pass_session_id", False) is True
def test_resume_before_chat(self):
parser = _build_parser()
args = parser.parse_args(["-r", "abc123", "chat"])
assert getattr(args, "resume", None) == "abc123"
class TestFlagAfterSubcommand:
"""Flags placed after 'chat' must still work."""
def test_yolo_after_chat(self):
parser = _build_parser()
args = parser.parse_args(["chat", "--yolo"])
assert getattr(args, "yolo", False) is True
def test_worktree_after_chat(self):
parser = _build_parser()
args = parser.parse_args(["chat", "-w"])
assert getattr(args, "worktree", False) is True
def test_skills_after_chat(self):
parser = _build_parser()
args = parser.parse_args(["chat", "-s", "myskill"])
assert getattr(args, "skills", None) == ["myskill"]
def test_resume_after_chat(self):
parser = _build_parser()
args = parser.parse_args(["chat", "-r", "abc123"])
assert getattr(args, "resume", None) == "abc123"
class TestNoSubcommandDefaults:
"""When no subcommand is given, flags must work and defaults must hold."""
def test_yolo_no_subcommand(self):
parser = _build_parser()
args = parser.parse_args(["--yolo"])
assert args.yolo is True
assert args.command is None
def test_defaults_no_flags(self):
parser = _build_parser()
args = parser.parse_args([])
assert getattr(args, "yolo", False) is False
assert getattr(args, "worktree", False) is False
assert getattr(args, "skills", None) is None
assert getattr(args, "resume", None) is None
def test_defaults_chat_no_flags(self):
parser = _build_parser()
args = parser.parse_args(["chat"])
# With SUPPRESS, these fall through to parent defaults
assert getattr(args, "yolo", False) is False
assert getattr(args, "worktree", False) is False
assert getattr(args, "skills", None) is None
class TestYoloEnvVar:
"""Verify --yolo sets HERMES_YOLO_MODE regardless of flag position.

View file

@ -141,13 +141,93 @@ def test_auth_add_nous_oauth_persists_pool_entry(tmp_path, monkeypatch):
auth_add_command(_Args())
payload = json.loads((tmp_path / "hermes" / "auth.json").read_text())
# Pool has exactly one canonical `device_code` entry — not a duplicate
# pair of `manual:device_code` + `device_code` (the latter would be
# materialised by _seed_from_singletons on every load_pool).
entries = payload["credential_pool"]["nous"]
entry = next(item for item in entries if item["source"] == "manual:device_code")
assert entry["label"] == "nous@example.com"
assert entry["source"] == "manual:device_code"
device_code_entries = [
item for item in entries if item["source"] == "device_code"
]
assert len(device_code_entries) == 1, entries
assert not any(item["source"] == "manual:device_code" for item in entries)
entry = device_code_entries[0]
assert entry["source"] == "device_code"
assert entry["agent_key"] == "ak-test"
assert entry["portal_base_url"] == "https://portal.example.com"
# `hermes auth add nous` must also populate providers.nous so the
# 401-recovery path (resolve_nous_runtime_credentials) can mint a fresh
# agent_key when the 24h TTL expires. If this mirror is missing, recovery
# raises "Hermes is not logged into Nous Portal" and the agent dies.
singleton = payload["providers"]["nous"]
assert singleton["access_token"] == token
assert singleton["refresh_token"] == "refresh-token"
assert singleton["agent_key"] == "ak-test"
assert singleton["portal_base_url"] == "https://portal.example.com"
assert singleton["inference_base_url"] == "https://inference.example.com/v1"
def test_auth_add_nous_oauth_honors_custom_label(tmp_path, monkeypatch):
"""`hermes auth add nous --type oauth --label <name>` must preserve the
custom label end-to-end it was silently dropped in the first cut of the
persist_nous_credentials helper because `--label` wasn't threaded through.
"""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
_write_auth_store(tmp_path, {"version": 1, "providers": {}})
token = _jwt_with_email("nous@example.com")
monkeypatch.setattr(
"hermes_cli.auth._nous_device_code_login",
lambda **kwargs: {
"portal_base_url": "https://portal.example.com",
"inference_base_url": "https://inference.example.com/v1",
"client_id": "hermes-cli",
"scope": "inference:mint_agent_key",
"token_type": "Bearer",
"access_token": token,
"refresh_token": "refresh-token",
"obtained_at": "2026-03-23T10:00:00+00:00",
"expires_at": "2026-03-23T11:00:00+00:00",
"expires_in": 3600,
"agent_key": "ak-test",
"agent_key_id": "ak-id",
"agent_key_expires_at": "2026-03-23T10:30:00+00:00",
"agent_key_expires_in": 1800,
"agent_key_reused": False,
"agent_key_obtained_at": "2026-03-23T10:00:10+00:00",
"tls": {"insecure": False, "ca_bundle": None},
},
)
from hermes_cli.auth_commands import auth_add_command
class _Args:
provider = "nous"
auth_type = "oauth"
api_key = None
label = "my-nous"
portal_url = None
inference_url = None
client_id = None
scope = None
no_browser = False
timeout = None
insecure = False
ca_bundle = None
auth_add_command(_Args())
payload = json.loads((tmp_path / "hermes" / "auth.json").read_text())
# Custom label reaches the pool entry …
pool_entry = payload["credential_pool"]["nous"][0]
assert pool_entry["source"] == "device_code"
assert pool_entry["label"] == "my-nous"
# … and survives in providers.nous so a subsequent load_pool() re-seeds
# it without reverting to the auto-derived fingerprint.
assert payload["providers"]["nous"]["label"] == "my-nous"
def test_auth_add_codex_oauth_persists_pool_entry(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
@ -703,3 +783,231 @@ def test_auth_remove_claude_code_suppresses_reseed(tmp_path, monkeypatch):
suppressed = updated.get("suppressed_sources", {})
assert "anthropic" in suppressed
assert "claude_code" in suppressed["anthropic"]
def test_unsuppress_credential_source_clears_marker(tmp_path, monkeypatch):
"""unsuppress_credential_source() removes a previously-set marker."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
_write_auth_store(tmp_path, {"version": 1})
from hermes_cli.auth import suppress_credential_source, unsuppress_credential_source, is_source_suppressed
suppress_credential_source("openai-codex", "device_code")
assert is_source_suppressed("openai-codex", "device_code") is True
cleared = unsuppress_credential_source("openai-codex", "device_code")
assert cleared is True
assert is_source_suppressed("openai-codex", "device_code") is False
payload = json.loads((tmp_path / "hermes" / "auth.json").read_text())
# Empty suppressed_sources dict should be cleaned up entirely
assert "suppressed_sources" not in payload
def test_unsuppress_credential_source_returns_false_when_absent(tmp_path, monkeypatch):
"""unsuppress_credential_source() returns False if no marker exists."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
_write_auth_store(tmp_path, {"version": 1})
from hermes_cli.auth import unsuppress_credential_source
assert unsuppress_credential_source("openai-codex", "device_code") is False
assert unsuppress_credential_source("nonexistent", "whatever") is False
def test_unsuppress_credential_source_preserves_other_markers(tmp_path, monkeypatch):
"""Clearing one marker must not affect unrelated markers."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
_write_auth_store(tmp_path, {"version": 1})
from hermes_cli.auth import (
suppress_credential_source,
unsuppress_credential_source,
is_source_suppressed,
)
suppress_credential_source("openai-codex", "device_code")
suppress_credential_source("anthropic", "claude_code")
assert unsuppress_credential_source("openai-codex", "device_code") is True
assert is_source_suppressed("anthropic", "claude_code") is True
def test_auth_remove_codex_device_code_suppresses_reseed(tmp_path, monkeypatch):
"""Removing an auto-seeded openai-codex credential must mark the source as suppressed."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
monkeypatch.setattr(
"agent.credential_pool._seed_from_singletons",
lambda provider, entries: (False, {"device_code"}),
)
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
auth_store = {
"version": 1,
"providers": {
"openai-codex": {
"tokens": {
"access_token": "acc-1",
"refresh_token": "ref-1",
},
},
},
"credential_pool": {
"openai-codex": [{
"id": "cx1",
"label": "codex-auto",
"auth_type": "oauth",
"priority": 0,
"source": "device_code",
"access_token": "acc-1",
"refresh_token": "ref-1",
}]
},
}
(hermes_home / "auth.json").write_text(json.dumps(auth_store))
from types import SimpleNamespace
from hermes_cli.auth_commands import auth_remove_command
auth_remove_command(SimpleNamespace(provider="openai-codex", target="1"))
updated = json.loads((hermes_home / "auth.json").read_text())
suppressed = updated.get("suppressed_sources", {})
assert "openai-codex" in suppressed
assert "device_code" in suppressed["openai-codex"]
# Tokens in providers state should also be cleared
assert "openai-codex" not in updated.get("providers", {})
def test_auth_remove_codex_manual_source_suppresses_reseed(tmp_path, monkeypatch):
"""Removing a manually-added (`manual:device_code`) openai-codex credential must also suppress."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
monkeypatch.setattr(
"agent.credential_pool._seed_from_singletons",
lambda provider, entries: (False, set()),
)
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
auth_store = {
"version": 1,
"providers": {
"openai-codex": {
"tokens": {
"access_token": "acc-2",
"refresh_token": "ref-2",
},
},
},
"credential_pool": {
"openai-codex": [{
"id": "cx2",
"label": "manual-codex",
"auth_type": "oauth",
"priority": 0,
"source": "manual:device_code",
"access_token": "acc-2",
"refresh_token": "ref-2",
}]
},
}
(hermes_home / "auth.json").write_text(json.dumps(auth_store))
from types import SimpleNamespace
from hermes_cli.auth_commands import auth_remove_command
auth_remove_command(SimpleNamespace(provider="openai-codex", target="1"))
updated = json.loads((hermes_home / "auth.json").read_text())
suppressed = updated.get("suppressed_sources", {})
# Critical: manual:device_code source must also trigger the suppression path
assert "openai-codex" in suppressed
assert "device_code" in suppressed["openai-codex"]
assert "openai-codex" not in updated.get("providers", {})
def test_auth_add_codex_clears_suppression_marker(tmp_path, monkeypatch):
"""Re-linking codex via `hermes auth add openai-codex` must clear any suppression marker."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
monkeypatch.setattr(
"agent.credential_pool._seed_from_singletons",
lambda provider, entries: (False, set()),
)
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
# Pre-existing suppression (simulating a prior `hermes auth remove`)
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1,
"providers": {},
"suppressed_sources": {"openai-codex": ["device_code"]},
}))
token = _jwt_with_email("codex@example.com")
monkeypatch.setattr(
"hermes_cli.auth._codex_device_code_login",
lambda: {
"tokens": {
"access_token": token,
"refresh_token": "refreshed",
},
"base_url": "https://chatgpt.com/backend-api/codex",
"last_refresh": "2026-01-01T00:00:00Z",
},
)
from hermes_cli.auth_commands import auth_add_command
class _Args:
provider = "openai-codex"
auth_type = "oauth"
api_key = None
label = None
auth_add_command(_Args())
payload = json.loads((hermes_home / "auth.json").read_text())
# Suppression marker must be cleared
assert "openai-codex" not in payload.get("suppressed_sources", {})
# New pool entry must be present
entries = payload["credential_pool"]["openai-codex"]
assert any(e["source"] == "manual:device_code" for e in entries)
def test_seed_from_singletons_respects_codex_suppression(tmp_path, monkeypatch):
"""_seed_from_singletons() for openai-codex must skip auto-import when suppressed."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
# Suppression marker in place
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1,
"providers": {},
"suppressed_sources": {"openai-codex": ["device_code"]},
}))
# Make _import_codex_cli_tokens return tokens — these would normally trigger
# a re-seed, but suppression must skip it.
def _fake_import():
return {
"access_token": "would-be-reimported",
"refresh_token": "would-be-reimported",
}
monkeypatch.setattr("hermes_cli.auth._import_codex_cli_tokens", _fake_import)
from agent.credential_pool import _seed_from_singletons
entries = []
changed, active_sources = _seed_from_singletons("openai-codex", entries)
# With suppression in place: nothing changes, no entries added, no sources
assert changed is False
assert entries == []
assert active_sources == set()
# Verify the auth store was NOT modified (no auto-import happened)
after = json.loads((hermes_home / "auth.json").read_text())
assert "openai-codex" not in after.get("providers", {})

View file

@ -299,3 +299,415 @@ def test_mint_retry_uses_latest_rotated_refresh_token(tmp_path, monkeypatch):
assert creds["api_key"] == "agent-key"
assert refresh_calls == ["refresh-old", "refresh-1"]
# =============================================================================
# _login_nous: "Skip (keep current)" must preserve prior provider + model
# =============================================================================
class TestLoginNousSkipKeepsCurrent:
"""When a user runs `hermes model` → Nous Portal → Skip (keep current) after
a successful OAuth login, the prior provider and model MUST be preserved.
Regression: previously, _update_config_for_provider was called
unconditionally after login, which flipped model.provider to "nous" while
keeping the old model.default (e.g. anthropic/claude-opus-4.6 from
OpenRouter), leaving the user with a mismatched provider/model pair.
"""
def _setup_home_with_openrouter(self, tmp_path, monkeypatch):
import yaml
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
config_path = hermes_home / "config.yaml"
config_path.write_text(yaml.safe_dump({
"model": {
"provider": "openrouter",
"default": "anthropic/claude-opus-4.6",
},
}, sort_keys=False))
auth_path = hermes_home / "auth.json"
auth_path.write_text(json.dumps({
"version": 1,
"active_provider": "openrouter",
"providers": {"openrouter": {"api_key": "sk-or-fake"}},
}))
return hermes_home, config_path, auth_path
def _patch_login_internals(self, monkeypatch, *, prompt_returns):
"""Patch OAuth + model-list + prompt so _login_nous doesn't hit network."""
import hermes_cli.auth as auth_mod
import hermes_cli.models as models_mod
import hermes_cli.nous_subscription as ns
fake_auth_state = {
"access_token": "fake-nous-token",
"agent_key": "fake-agent-key",
"inference_base_url": "https://inference-api.nousresearch.com",
"portal_base_url": "https://portal.nousresearch.com",
"refresh_token": "fake-refresh",
"token_expires_at": 9999999999,
}
monkeypatch.setattr(
auth_mod, "_nous_device_code_login",
lambda **kwargs: dict(fake_auth_state),
)
monkeypatch.setattr(
auth_mod, "_prompt_model_selection",
lambda *a, **kw: prompt_returns,
)
monkeypatch.setattr(models_mod, "get_pricing_for_provider", lambda p: {})
monkeypatch.setattr(models_mod, "filter_nous_free_models", lambda ids, p: ids)
monkeypatch.setattr(models_mod, "check_nous_free_tier", lambda: None)
monkeypatch.setattr(
models_mod, "partition_nous_models_by_tier",
lambda ids, p, free_tier=False: (ids, []),
)
monkeypatch.setattr(ns, "prompt_enable_tool_gateway", lambda cfg: None)
def test_skip_keep_current_preserves_provider_and_model(self, tmp_path, monkeypatch):
"""User picks Skip → config.yaml untouched, Nous creds still saved."""
import argparse
import yaml
from hermes_cli.auth import PROVIDER_REGISTRY, _login_nous
hermes_home, config_path, auth_path = self._setup_home_with_openrouter(
tmp_path, monkeypatch,
)
self._patch_login_internals(monkeypatch, prompt_returns=None)
args = argparse.Namespace(
portal_url=None, inference_url=None, client_id=None, scope=None,
no_browser=True, timeout=15.0, ca_bundle=None, insecure=False,
)
_login_nous(args, PROVIDER_REGISTRY["nous"])
# config.yaml model section must be unchanged
cfg_after = yaml.safe_load(config_path.read_text())
assert cfg_after["model"]["provider"] == "openrouter"
assert cfg_after["model"]["default"] == "anthropic/claude-opus-4.6"
assert "base_url" not in cfg_after["model"]
# auth.json: active_provider restored to openrouter, but Nous creds saved
auth_after = json.loads(auth_path.read_text())
assert auth_after["active_provider"] == "openrouter"
assert "nous" in auth_after["providers"]
assert auth_after["providers"]["nous"]["access_token"] == "fake-nous-token"
# Existing openrouter creds still intact
assert auth_after["providers"]["openrouter"]["api_key"] == "sk-or-fake"
def test_picking_model_switches_to_nous(self, tmp_path, monkeypatch):
"""User picks a Nous model → provider flips to nous with that model."""
import argparse
import yaml
from hermes_cli.auth import PROVIDER_REGISTRY, _login_nous
hermes_home, config_path, auth_path = self._setup_home_with_openrouter(
tmp_path, monkeypatch,
)
self._patch_login_internals(
monkeypatch, prompt_returns="xiaomi/mimo-v2-pro",
)
args = argparse.Namespace(
portal_url=None, inference_url=None, client_id=None, scope=None,
no_browser=True, timeout=15.0, ca_bundle=None, insecure=False,
)
_login_nous(args, PROVIDER_REGISTRY["nous"])
cfg_after = yaml.safe_load(config_path.read_text())
assert cfg_after["model"]["provider"] == "nous"
assert cfg_after["model"]["default"] == "xiaomi/mimo-v2-pro"
auth_after = json.loads(auth_path.read_text())
assert auth_after["active_provider"] == "nous"
def test_skip_with_no_prior_active_provider_clears_it(self, tmp_path, monkeypatch):
"""Fresh install (no prior active_provider) → Skip clears active_provider
instead of leaving it as nous."""
import argparse
import yaml
from hermes_cli.auth import PROVIDER_REGISTRY, _login_nous
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
config_path = hermes_home / "config.yaml"
config_path.write_text(yaml.safe_dump({"model": {}}, sort_keys=False))
# No auth.json yet — simulates first-run before any OAuth
self._patch_login_internals(monkeypatch, prompt_returns=None)
args = argparse.Namespace(
portal_url=None, inference_url=None, client_id=None, scope=None,
no_browser=True, timeout=15.0, ca_bundle=None, insecure=False,
)
_login_nous(args, PROVIDER_REGISTRY["nous"])
auth_path = hermes_home / "auth.json"
auth_after = json.loads(auth_path.read_text())
# active_provider should NOT be set to "nous" after Skip
assert auth_after.get("active_provider") in (None, "")
# But Nous creds are still saved
assert "nous" in auth_after.get("providers", {})
# =============================================================================
# persist_nous_credentials: shared helper for CLI + web dashboard login paths
# =============================================================================
def _full_state_fixture() -> dict:
"""Shape of the dict returned by _nous_device_code_login /
refresh_nous_oauth_from_state. Used as helper input."""
return {
"portal_base_url": "https://portal.example.com",
"inference_base_url": "https://inference.example.com/v1",
"client_id": "hermes-cli",
"scope": "inference:mint_agent_key",
"token_type": "Bearer",
"access_token": "access-tok",
"refresh_token": "refresh-tok",
"obtained_at": "2026-04-17T22:00:00+00:00",
"expires_at": "2026-04-17T22:15:00+00:00",
"expires_in": 900,
"agent_key": "agent-key-value",
"agent_key_id": "ak-id",
"agent_key_expires_at": "2026-04-18T22:00:00+00:00",
"agent_key_expires_in": 86400,
"agent_key_reused": False,
"agent_key_obtained_at": "2026-04-17T22:00:10+00:00",
"tls": {"insecure": False, "ca_bundle": None},
}
def test_persist_nous_credentials_writes_both_pool_and_providers(tmp_path, monkeypatch):
"""Helper must populate BOTH credential_pool.nous AND providers.nous.
Regression guard: before this helper existed, `hermes auth add nous`
wrote only the pool. After the Nous agent_key's 24h TTL expired, the
401-recovery path in run_agent.py called resolve_nous_runtime_credentials
which reads providers.nous, found it empty, raised AuthError, and the
agent failed with "Non-retryable client error". Both stores must stay
in sync at write time.
"""
from hermes_cli.auth import persist_nous_credentials, NOUS_DEVICE_CODE_SOURCE
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1, "providers": {},
}))
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
entry = persist_nous_credentials(_full_state_fixture())
assert entry is not None
assert entry.provider == "nous"
assert entry.source == NOUS_DEVICE_CODE_SOURCE
payload = json.loads((hermes_home / "auth.json").read_text())
# providers.nous populated with the full state (new behaviour)
singleton = payload["providers"]["nous"]
assert singleton["access_token"] == "access-tok"
assert singleton["refresh_token"] == "refresh-tok"
assert singleton["agent_key"] == "agent-key-value"
assert singleton["agent_key_expires_at"] == "2026-04-18T22:00:00+00:00"
# credential_pool.nous has exactly one canonical device_code entry
pool_entries = payload["credential_pool"]["nous"]
assert len(pool_entries) == 1, pool_entries
pool_entry = pool_entries[0]
assert pool_entry["source"] == NOUS_DEVICE_CODE_SOURCE
assert pool_entry["agent_key"] == "agent-key-value"
assert pool_entry["inference_base_url"] == "https://inference.example.com/v1"
def test_persist_nous_credentials_allows_recovery_from_401(tmp_path, monkeypatch):
"""End-to-end: after persisting via the helper, resolve_nous_runtime_credentials
must succeed (not raise "Hermes is not logged into Nous Portal").
This is the exact path that run_agent.py's `_try_refresh_nous_client_credentials`
calls after a Nous 401 before the fix it would raise AuthError because
providers.nous was empty.
"""
from hermes_cli.auth import persist_nous_credentials, resolve_nous_runtime_credentials
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1, "providers": {},
}))
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
persist_nous_credentials(_full_state_fixture())
# Stub the network-touching steps so we don't actually contact the
# portal — the point of this test is that state lookup succeeds and
# doesn't raise "Hermes is not logged into Nous Portal".
def _fake_refresh_access_token(*, client, portal_base_url, client_id, refresh_token):
return {
"access_token": "access-new",
"refresh_token": "refresh-new",
"expires_in": 900,
"token_type": "Bearer",
}
def _fake_mint_agent_key(*, client, portal_base_url, access_token, min_ttl_seconds):
return _mint_payload(api_key="new-agent-key")
monkeypatch.setattr("hermes_cli.auth._refresh_access_token", _fake_refresh_access_token)
monkeypatch.setattr("hermes_cli.auth._mint_agent_key", _fake_mint_agent_key)
creds = resolve_nous_runtime_credentials(min_key_ttl_seconds=300, force_mint=True)
assert creds["api_key"] == "new-agent-key"
def test_persist_nous_credentials_idempotent_no_duplicate_pool_entries(tmp_path, monkeypatch):
"""Re-running persist must upsert — not accumulate duplicate device_code rows.
Regression guard for the review comment on PR #11858: before normalisation,
the helper wrote `manual:device_code` while `_seed_from_singletons` wrote
`device_code`, so the pool grew a second duplicate entry on every
``load_pool()``. The helper now writes providers.nous and lets seeding
materialise the pool entry under the canonical ``device_code`` source, so
two persists still leave the pool with exactly one row.
"""
from hermes_cli.auth import persist_nous_credentials, NOUS_DEVICE_CODE_SOURCE
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1, "providers": {},
}))
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
first = _full_state_fixture()
persist_nous_credentials(first)
second = _full_state_fixture()
second["access_token"] = "access-second"
second["agent_key"] = "agent-key-second"
persist_nous_credentials(second)
payload = json.loads((hermes_home / "auth.json").read_text())
# providers.nous reflects the latest write (singleton semantics)
assert payload["providers"]["nous"]["access_token"] == "access-second"
assert payload["providers"]["nous"]["agent_key"] == "agent-key-second"
# credential_pool.nous has exactly one entry, carrying the latest agent_key
pool_entries = payload["credential_pool"]["nous"]
assert len(pool_entries) == 1, pool_entries
assert pool_entries[0]["source"] == NOUS_DEVICE_CODE_SOURCE
assert pool_entries[0]["agent_key"] == "agent-key-second"
# And no stray `manual:device_code` / `manual:dashboard_device_code` rows
assert not any(
e["source"].startswith("manual:") for e in pool_entries
)
def test_persist_nous_credentials_reloads_pool_after_singleton_write(tmp_path, monkeypatch):
"""The entry returned by the helper must come from a fresh ``load_pool`` so
callers observe the canonical seeded state, including any legacy entries
that ``_seed_from_singletons`` pruned or upserted.
"""
from hermes_cli.auth import persist_nous_credentials, NOUS_DEVICE_CODE_SOURCE
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1, "providers": {},
}))
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
entry = persist_nous_credentials(_full_state_fixture())
assert entry is not None
assert entry.source == NOUS_DEVICE_CODE_SOURCE
# Label derived by _seed_from_singletons via label_from_token; we don't
# assert its exact value, just that the helper returned a real entry.
assert entry.access_token == "access-tok"
assert entry.agent_key == "agent-key-value"
def test_persist_nous_credentials_embeds_custom_label(tmp_path, monkeypatch):
"""User-supplied ``--label`` round-trips through providers.nous and the pool.
Previously `hermes auth add nous --type oauth --label <name>` silently
dropped the label because persist_nous_credentials() ignored it and
_seed_from_singletons always auto-derived via label_from_token(). The
fix stashes the label inside providers.nous so seeding prefers it.
"""
from hermes_cli.auth import persist_nous_credentials, NOUS_DEVICE_CODE_SOURCE
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1, "providers": {},
}))
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
entry = persist_nous_credentials(_full_state_fixture(), label="my-personal")
assert entry is not None
assert entry.source == NOUS_DEVICE_CODE_SOURCE
assert entry.label == "my-personal"
# providers.nous carries the label so re-seeding on the next load_pool
# doesn't overwrite it with the auto-derived fingerprint.
payload = json.loads((hermes_home / "auth.json").read_text())
assert payload["providers"]["nous"]["label"] == "my-personal"
def test_persist_nous_credentials_custom_label_survives_reseed(tmp_path, monkeypatch):
"""Reopening the pool (which re-runs _seed_from_singletons) must keep the
user-chosen label instead of clobbering it with label_from_token output.
"""
from hermes_cli.auth import persist_nous_credentials
from agent.credential_pool import load_pool
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1, "providers": {},
}))
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
persist_nous_credentials(_full_state_fixture(), label="work-acct")
# Second load_pool triggers _seed_from_singletons again. Without the
# fix, this call overwrote the label with label_from_token(access_token).
pool = load_pool("nous")
entries = pool.entries()
assert len(entries) == 1
assert entries[0].label == "work-acct"
def test_persist_nous_credentials_no_label_uses_auto_derived(tmp_path, monkeypatch):
"""When the caller doesn't pass ``label``, the auto-derived fingerprint
is used (unchanged default behaviour regression guard).
"""
from hermes_cli.auth import persist_nous_credentials
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "auth.json").write_text(json.dumps({
"version": 1, "providers": {},
}))
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
entry = persist_nous_credentials(_full_state_fixture())
assert entry is not None
# label_from_token derives from the access_token; exact value depends on
# the fingerprinter but it must not be empty and must not equal an
# arbitrary user string we never passed.
assert entry.label
assert entry.label != "my-personal"
# No "label" key embedded in providers.nous when the caller didn't supply one.
payload = json.loads((hermes_home / "auth.json").read_text())
assert "label" not in payload["providers"]["nous"]

View file

@ -0,0 +1,294 @@
"""Tests for the auxiliary-model configuration UI in ``hermes model``.
Covers the helper functions:
- ``_save_aux_choice`` writes to config.yaml without touching main model config
- ``_reset_aux_to_auto`` clears routing fields but preserves timeouts
- ``_format_aux_current`` renders current task config for the menu
- ``_AUX_TASKS`` stays in sync with ``DEFAULT_CONFIG["auxiliary"]``
These are pure-function tests the interactive menu loops are not covered
here (they're stdin-driven curses prompts).
"""
from __future__ import annotations
import pytest
from hermes_cli.config import DEFAULT_CONFIG, load_config
from hermes_cli.main import (
_AUX_TASKS,
_format_aux_current,
_reset_aux_to_auto,
_save_aux_choice,
)
# ── Default config ──────────────────────────────────────────────────────────
def test_title_generation_present_in_default_config():
"""`title_generation` task must be defined in DEFAULT_CONFIG.
Regression for an existing gap: title_generator.py calls
``call_llm(task="title_generation", ...)`` but the task was missing
from DEFAULT_CONFIG["auxiliary"], so the config-backed timeout/provider
overrides never worked for that task.
"""
assert "title_generation" in DEFAULT_CONFIG["auxiliary"]
tg = DEFAULT_CONFIG["auxiliary"]["title_generation"]
assert tg["provider"] == "auto"
assert tg["model"] == ""
assert tg["timeout"] > 0
def test_aux_tasks_keys_all_exist_in_default_config():
"""Every task the menu offers must be defined in DEFAULT_CONFIG."""
aux_keys = {k for k, _name, _desc in _AUX_TASKS}
default_keys = set(DEFAULT_CONFIG["auxiliary"].keys())
missing = aux_keys - default_keys
assert not missing, (
f"_AUX_TASKS references tasks not in DEFAULT_CONFIG.auxiliary: {missing}"
)
# ── _format_aux_current ─────────────────────────────────────────────────────
@pytest.mark.parametrize(
"task_cfg,expected",
[
({}, "auto"),
({"provider": "", "model": ""}, "auto"),
({"provider": "auto", "model": ""}, "auto"),
({"provider": "auto", "model": "gpt-4o"}, "auto · gpt-4o"),
({"provider": "openrouter", "model": ""}, "openrouter"),
(
{"provider": "openrouter", "model": "google/gemini-2.5-flash"},
"openrouter · google/gemini-2.5-flash",
),
({"provider": "nous", "model": "gemini-3-flash"}, "nous · gemini-3-flash"),
(
{"provider": "custom", "base_url": "http://localhost:11434/v1", "model": ""},
"custom (localhost:11434/v1)",
),
(
{
"provider": "custom",
"base_url": "http://localhost:11434/v1/",
"model": "qwen2.5:32b",
},
"custom (localhost:11434/v1) · qwen2.5:32b",
),
],
)
def test_format_aux_current(task_cfg, expected):
assert _format_aux_current(task_cfg) == expected
def test_format_aux_current_handles_non_dict():
assert _format_aux_current(None) == "auto"
assert _format_aux_current("string") == "auto"
# ── _save_aux_choice ────────────────────────────────────────────────────────
def test_save_aux_choice_persists_to_config_yaml(tmp_path, monkeypatch):
"""Saving a task writes provider/model/base_url/api_key to auxiliary.<task>."""
from pathlib import Path
monkeypatch.setenv("HERMES_HOME", str(tmp_path / ".hermes"))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
(tmp_path / ".hermes").mkdir(exist_ok=True)
_save_aux_choice(
"vision", provider="openrouter", model="google/gemini-2.5-flash",
)
cfg = load_config()
v = cfg["auxiliary"]["vision"]
assert v["provider"] == "openrouter"
assert v["model"] == "google/gemini-2.5-flash"
assert v["base_url"] == ""
assert v["api_key"] == ""
def test_save_aux_choice_preserves_timeout(tmp_path, monkeypatch):
"""Saving must NOT clobber user-tuned timeout values."""
from pathlib import Path
monkeypatch.setenv("HERMES_HOME", str(tmp_path / ".hermes"))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
(tmp_path / ".hermes").mkdir(exist_ok=True)
# Default vision timeout is 120
cfg_before = load_config()
default_timeout = cfg_before["auxiliary"]["vision"]["timeout"]
assert default_timeout == 120
_save_aux_choice("vision", provider="nous", model="gemini-3-flash")
cfg_after = load_config()
assert cfg_after["auxiliary"]["vision"]["timeout"] == default_timeout
# download_timeout also preserved for vision
assert cfg_after["auxiliary"]["vision"].get("download_timeout") == 30
def test_save_aux_choice_does_not_touch_main_model(tmp_path, monkeypatch):
"""Aux config must never mutate model.default / model.provider / model.base_url."""
from pathlib import Path
monkeypatch.setenv("HERMES_HOME", str(tmp_path / ".hermes"))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
(tmp_path / ".hermes").mkdir(exist_ok=True)
# Simulate a configured main model
from hermes_cli.config import save_config
cfg = load_config()
cfg["model"] = {
"default": "claude-sonnet-4.6",
"provider": "anthropic",
"base_url": "",
}
save_config(cfg)
_save_aux_choice(
"compression", provider="custom",
base_url="http://localhost:11434/v1", model="qwen2.5:32b",
)
cfg = load_config()
# Main model untouched
assert cfg["model"]["default"] == "claude-sonnet-4.6"
assert cfg["model"]["provider"] == "anthropic"
# Aux saved correctly
c = cfg["auxiliary"]["compression"]
assert c["provider"] == "custom"
assert c["model"] == "qwen2.5:32b"
assert c["base_url"] == "http://localhost:11434/v1"
def test_save_aux_choice_creates_missing_task_entry(tmp_path, monkeypatch):
"""Saving a task that was wiped from config.yaml should recreate it."""
from pathlib import Path
monkeypatch.setenv("HERMES_HOME", str(tmp_path / ".hermes"))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
(tmp_path / ".hermes").mkdir(exist_ok=True)
# Remove vision from config entirely
from hermes_cli.config import save_config
cfg = load_config()
cfg.setdefault("auxiliary", {}).pop("vision", None)
save_config(cfg)
_save_aux_choice("vision", provider="nous", model="gemini-3-flash")
cfg = load_config()
assert cfg["auxiliary"]["vision"]["provider"] == "nous"
assert cfg["auxiliary"]["vision"]["model"] == "gemini-3-flash"
# ── _reset_aux_to_auto ──────────────────────────────────────────────────────
def test_reset_aux_to_auto_clears_routing_preserves_timeouts(tmp_path, monkeypatch):
from pathlib import Path
monkeypatch.setenv("HERMES_HOME", str(tmp_path / ".hermes"))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
(tmp_path / ".hermes").mkdir(exist_ok=True)
# Configure two tasks non-auto, and bump a timeout
_save_aux_choice("vision", provider="openrouter", model="gpt-4o")
_save_aux_choice("compression", provider="nous", model="gemini-3-flash")
from hermes_cli.config import save_config
cfg = load_config()
cfg["auxiliary"]["vision"]["timeout"] = 300 # user-tuned
save_config(cfg)
n = _reset_aux_to_auto()
assert n == 2 # both changed
cfg = load_config()
for task in ("vision", "compression"):
v = cfg["auxiliary"][task]
assert v["provider"] == "auto"
assert v["model"] == ""
assert v["base_url"] == ""
assert v["api_key"] == ""
# User-tuned timeout survives reset
assert cfg["auxiliary"]["vision"]["timeout"] == 300
# Default compression timeout preserved
assert cfg["auxiliary"]["compression"]["timeout"] == 120
def test_reset_aux_to_auto_idempotent(tmp_path, monkeypatch):
"""Second reset on already-auto config returns 0 without errors."""
from pathlib import Path
monkeypatch.setenv("HERMES_HOME", str(tmp_path / ".hermes"))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
(tmp_path / ".hermes").mkdir(exist_ok=True)
assert _reset_aux_to_auto() == 0
_save_aux_choice("vision", provider="nous", model="gemini-3-flash")
assert _reset_aux_to_auto() == 1
assert _reset_aux_to_auto() == 0
# ── Menu dispatch ───────────────────────────────────────────────────────────
def test_select_provider_and_model_dispatches_to_aux_menu(tmp_path, monkeypatch):
"""Picking 'Configure auxiliary models...' in the provider list calls _aux_config_menu."""
from pathlib import Path
monkeypatch.setenv("HERMES_HOME", str(tmp_path / ".hermes"))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
(tmp_path / ".hermes").mkdir(exist_ok=True)
from hermes_cli import main as main_mod
called = {"aux": 0, "flow": 0}
def fake_prompt(choices, *, default=0):
# Find the aux-config entry by its label text and return its index
for i, label in enumerate(choices):
if "Configure auxiliary models" in label:
return i
raise AssertionError("aux entry not in provider list")
monkeypatch.setattr(main_mod, "_prompt_provider_choice", fake_prompt)
monkeypatch.setattr(main_mod, "_aux_config_menu", lambda: called.__setitem__("aux", called["aux"] + 1))
# Guard against any main flow accidentally running
monkeypatch.setattr(main_mod, "_model_flow_openrouter",
lambda *a, **kw: called.__setitem__("flow", called["flow"] + 1))
main_mod.select_provider_and_model()
assert called["aux"] == 1, "aux menu not invoked"
assert called["flow"] == 0, "main provider flow should not run"
def test_leave_unchanged_replaces_cancel_label(tmp_path, monkeypatch):
"""The bottom cancel entry now reads 'Leave unchanged' (UX polish)."""
from pathlib import Path
monkeypatch.setenv("HERMES_HOME", str(tmp_path / ".hermes"))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
(tmp_path / ".hermes").mkdir(exist_ok=True)
from hermes_cli import main as main_mod
captured: list[list[str]] = []
def fake_prompt(choices, *, default=0):
captured.append(list(choices))
# Pick 'Leave unchanged' (last item) to exit cleanly
for i, label in enumerate(choices):
if label == "Leave unchanged":
return i
raise AssertionError("Leave unchanged not in provider list")
monkeypatch.setattr(main_mod, "_prompt_provider_choice", fake_prompt)
main_mod.select_provider_and_model()
assert captured, "provider menu never rendered"
labels = captured[0]
assert "Leave unchanged" in labels
assert "Cancel" not in labels, "Cancel label should be replaced"
assert any("Configure auxiliary models" in label for label in labels)

View file

@ -106,6 +106,43 @@ class TestCmdUpdateBranchFallback:
pull_cmds = [c for c in commands if "pull" in c]
assert len(pull_cmds) == 0
@patch("shutil.which")
@patch("subprocess.run")
def test_update_refreshes_repo_and_tui_node_dependencies(
self, mock_run, mock_which, mock_args
):
mock_which.side_effect = {"uv": "/usr/bin/uv", "npm": "/usr/bin/npm"}.get
mock_run.side_effect = _make_run_side_effect(
branch="main", verify_ok=True, commit_count="1"
)
cmd_update(mock_args)
npm_calls = [
(call.args[0], call.kwargs.get("cwd"))
for call in mock_run.call_args_list
if call.args and call.args[0][0] == "/usr/bin/npm"
]
# cmd_update runs npm commands in three locations:
# 1. repo root — slash-command / TUI bridge deps
# 2. ui-tui/ — Ink TUI deps
# 3. web/ — install + "npm run build" for the web frontend
full_flags = [
"/usr/bin/npm",
"install",
"--silent",
"--no-fund",
"--no-audit",
"--progress=false",
]
assert npm_calls == [
(full_flags, PROJECT_ROOT),
(full_flags, PROJECT_ROOT / "ui-tui"),
(["/usr/bin/npm", "install", "--silent"], PROJECT_ROOT / "web"),
(["/usr/bin/npm", "run", "build"], PROJECT_ROOT / "web"),
]
def test_update_non_interactive_skips_migration_prompt(self, mock_args, capsys):
"""When stdin/stdout aren't TTYs, config migration prompt is skipped."""
with patch("shutil.which", return_value=None), patch(

View file

@ -93,15 +93,18 @@ class TestResolveCommand:
def test_canonical_name_resolves(self):
assert resolve_command("help").name == "help"
assert resolve_command("background").name == "background"
assert resolve_command("copy").name == "copy"
assert resolve_command("agents").name == "agents"
def test_alias_resolves_to_canonical(self):
assert resolve_command("bg").name == "background"
assert resolve_command("reset").name == "new"
assert resolve_command("q").name == "quit"
assert resolve_command("q").name == "queue"
assert resolve_command("exit").name == "quit"
assert resolve_command("gateway").name == "platforms"
assert resolve_command("set-home").name == "sethome"
assert resolve_command("reload_mcp").name == "reload-mcp"
assert resolve_command("tasks").name == "agents"
def test_leading_slash_stripped(self):
assert resolve_command("/help").name == "help"

View file

@ -459,7 +459,7 @@ class TestCustomProviderCompatibility:
migrate_config(interactive=False, quiet=True)
raw = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert raw["_config_version"] == 17
assert raw["_config_version"] == 19
assert raw["providers"]["openai-direct"] == {
"api": "https://api.openai.com/v1",
"api_key": "test-key",
@ -606,6 +606,26 @@ class TestInterimAssistantMessageConfig:
migrate_config(interactive=False, quiet=True)
raw = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert raw["_config_version"] == 17
assert raw["_config_version"] == 19
assert raw["display"]["tool_progress"] == "off"
assert raw["display"]["interim_assistant_messages"] is True
class TestDiscordChannelPromptsConfig:
def test_default_config_includes_discord_channel_prompts(self):
assert DEFAULT_CONFIG["discord"]["channel_prompts"] == {}
def test_migrate_adds_discord_channel_prompts_default(self, tmp_path):
config_path = tmp_path / "config.yaml"
config_path.write_text(
yaml.safe_dump({"_config_version": 17, "discord": {"auto_thread": True}}),
encoding="utf-8",
)
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
migrate_config(interactive=False, quiet=True)
raw = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert raw["_config_version"] == 19
assert raw["discord"]["auto_thread"] is True
assert raw["discord"]["channel_prompts"] == {}

View file

@ -0,0 +1,169 @@
import textwrap
from hermes_cli.config import load_config, save_config
def _write_config(tmp_path, body: str):
(tmp_path / "config.yaml").write_text(textwrap.dedent(body), encoding="utf-8")
def _read_config(tmp_path) -> str:
return (tmp_path / "config.yaml").read_text(encoding="utf-8")
def test_save_config_preserves_env_refs_on_unrelated_change(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("TU_ZI_API_KEY", "sk-realsecret")
monkeypatch.setenv("ALT_SECRET", "alt-secret")
_write_config(
tmp_path,
"""\
custom_providers:
- name: tuzi
base_url: https://api.tu-zi.com
api_key: ${TU_ZI_API_KEY}
headers:
Authorization: Bearer ${ALT_SECRET}
model: claude-opus-4-6
model:
default: claude-opus-4-6
""",
)
config = load_config()
config["model"]["default"] = "doubao-pro"
save_config(config)
saved = _read_config(tmp_path)
assert "api_key: ${TU_ZI_API_KEY}" in saved
assert "Authorization: Bearer ${ALT_SECRET}" in saved
assert "sk-realsecret" not in saved
assert "alt-secret" not in saved
def test_save_config_preserves_unresolved_env_refs(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.delenv("MISSING_SECRET", raising=False)
_write_config(
tmp_path,
"""\
custom_providers:
- name: unresolved
api_key: ${MISSING_SECRET}
model: claude-opus-4-6
model:
default: claude-opus-4-6
""",
)
config = load_config()
config["display"]["compact"] = True
save_config(config)
assert "api_key: ${MISSING_SECRET}" in _read_config(tmp_path)
def test_save_config_allows_intentional_secret_value_change(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("TU_ZI_API_KEY", "sk-old-secret")
_write_config(
tmp_path,
"""\
custom_providers:
- name: tuzi
api_key: ${TU_ZI_API_KEY}
model: claude-opus-4-6
model:
default: claude-opus-4-6
""",
)
config = load_config()
config["custom_providers"][0]["api_key"] = "sk-new-secret"
save_config(config)
saved = _read_config(tmp_path)
assert "api_key: sk-new-secret" in saved
assert "${TU_ZI_API_KEY}" not in saved
def test_save_config_preserves_template_when_env_rotates_after_load(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("TU_ZI_API_KEY", "sk-old-secret")
_write_config(
tmp_path,
"""\
custom_providers:
- name: tuzi
api_key: ${TU_ZI_API_KEY}
model: claude-opus-4-6
model:
default: claude-opus-4-6
""",
)
config = load_config()
monkeypatch.setenv("TU_ZI_API_KEY", "sk-rotated-secret")
config["model"]["default"] = "doubao-pro"
save_config(config)
saved = _read_config(tmp_path)
assert "api_key: ${TU_ZI_API_KEY}" in saved
assert "sk-old-secret" not in saved
assert "sk-rotated-secret" not in saved
def test_save_config_keeps_edited_partial_template_strings_literal(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("ALT_SECRET", "alt-secret")
_write_config(
tmp_path,
"""\
custom_providers:
- name: tuzi
headers:
Authorization: Bearer ${ALT_SECRET}
model: claude-opus-4-6
model:
default: claude-opus-4-6
""",
)
config = load_config()
config["custom_providers"][0]["headers"]["Authorization"] = "Token alt-secret"
save_config(config)
saved = _read_config(tmp_path)
assert "Authorization: Token alt-secret" in saved
assert "Authorization: Bearer ${ALT_SECRET}" not in saved
def test_save_config_falls_back_to_positional_matching_for_duplicate_names(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("FIRST_SECRET", "first-secret")
monkeypatch.setenv("SECOND_SECRET", "second-secret")
_write_config(
tmp_path,
"""\
custom_providers:
- name: duplicate
api_key: ${FIRST_SECRET}
model: claude-opus-4-6
- name: duplicate
api_key: ${SECOND_SECRET}
model: doubao-pro
model:
default: claude-opus-4-6
""",
)
config = load_config()
config["display"]["compact"] = True
save_config(config)
saved = _read_config(tmp_path)
assert saved.count("name: duplicate") == 2
assert "api_key: ${FIRST_SECRET}" in saved
assert "api_key: ${SECOND_SECRET}" in saved
assert "first-secret" not in saved
assert "second-secret" not in saved

View file

@ -428,7 +428,9 @@ class TestRunDebug:
run_debug(args)
out = capsys.readouterr().out
assert "hermes debug share" in out
assert "hermes debug" in out
assert "share" in out
assert "delete" in out
def test_share_subcommand_routes(self, hermes_home):
from hermes_cli.debug import run_debug
@ -447,15 +449,417 @@ class TestRunDebug:
# Argparse integration
# ---------------------------------------------------------------------------
class TestArgparseIntegration:
def test_module_imports_clean(self):
from hermes_cli.debug import run_debug, run_debug_share
assert callable(run_debug)
assert callable(run_debug_share)
# ---------------------------------------------------------------------------
# Delete / auto-delete
# ---------------------------------------------------------------------------
def test_cmd_debug_dispatches(self):
from hermes_cli.main import cmd_debug
class TestExtractPasteId:
def test_paste_rs_url(self):
from hermes_cli.debug import _extract_paste_id
assert _extract_paste_id("https://paste.rs/abc123") == "abc123"
def test_paste_rs_trailing_slash(self):
from hermes_cli.debug import _extract_paste_id
assert _extract_paste_id("https://paste.rs/abc123/") == "abc123"
def test_http_variant(self):
from hermes_cli.debug import _extract_paste_id
assert _extract_paste_id("http://paste.rs/xyz") == "xyz"
def test_non_paste_rs_returns_none(self):
from hermes_cli.debug import _extract_paste_id
assert _extract_paste_id("https://dpaste.com/ABCDEF") is None
def test_empty_returns_none(self):
from hermes_cli.debug import _extract_paste_id
assert _extract_paste_id("") is None
class TestDeletePaste:
def test_delete_sends_delete_request(self):
from hermes_cli.debug import delete_paste
mock_resp = MagicMock()
mock_resp.status = 200
mock_resp.__enter__ = lambda s: s
mock_resp.__exit__ = MagicMock(return_value=False)
with patch("hermes_cli.debug.urllib.request.urlopen",
return_value=mock_resp) as mock_open:
result = delete_paste("https://paste.rs/abc123")
assert result is True
req = mock_open.call_args[0][0]
assert req.method == "DELETE"
assert "paste.rs/abc123" in req.full_url
def test_delete_rejects_non_paste_rs(self):
from hermes_cli.debug import delete_paste
with pytest.raises(ValueError, match="only paste.rs"):
delete_paste("https://dpaste.com/something")
class TestScheduleAutoDelete:
"""``_schedule_auto_delete`` used to spawn a detached Python subprocess
per call (one per paste URL batch). Those subprocesses slept 6 hours
and accumulated forever under repeated use 15+ orphaned interpreters
were observed in production.
The new implementation is stateless: it records pending deletions to
``~/.hermes/pastes/pending.json`` and lets ``_sweep_expired_pastes``
handle the DELETE requests synchronously on the next ``hermes debug``
invocation.
"""
def test_does_not_spawn_subprocess(self, hermes_home):
"""Regression guard: _schedule_auto_delete must NEVER spawn subprocesses.
We assert this structurally rather than by mocking Popen: the new
implementation doesn't even import ``subprocess`` at module scope,
so a mock patch wouldn't find it.
"""
import ast
import inspect
from hermes_cli.debug import _schedule_auto_delete
# Strip the docstring before scanning so the regression-rationale
# prose inside it doesn't trigger our banned-word checks.
source = inspect.getsource(_schedule_auto_delete)
tree = ast.parse(source)
func_node = tree.body[0]
if (
func_node.body
and isinstance(func_node.body[0], ast.Expr)
and isinstance(func_node.body[0].value, ast.Constant)
and isinstance(func_node.body[0].value.value, str)
):
func_node.body = func_node.body[1:]
code_only = ast.unparse(func_node)
assert "Popen" not in code_only, (
"_schedule_auto_delete must not spawn subprocesses — "
"use pending.json + _sweep_expired_pastes instead"
)
assert "subprocess" not in code_only, (
"_schedule_auto_delete must not reference subprocess at all"
)
assert "time.sleep" not in code_only, (
"Regression: sleeping in _schedule_auto_delete is the bug being fixed"
)
# And verify that calling it doesn't produce any orphaned children
# (it should just write pending.json synchronously).
import os as _os
before = set(_os.listdir("/proc")) if _os.path.exists("/proc") else None
_schedule_auto_delete(
["https://paste.rs/abc", "https://paste.rs/def"],
delay_seconds=10,
)
if before is not None:
after = set(_os.listdir("/proc"))
new = after - before
# Filter to only integer-named entries (process PIDs)
new_pids = [p for p in new if p.isdigit()]
# It's fine if unrelated processes appeared — we just need to make
# sure we didn't spawn a long-sleeping one. The old bug spawned
# a python interpreter whose cmdline contained "time.sleep".
for pid in new_pids:
try:
with open(f"/proc/{pid}/cmdline", "rb") as f:
cmdline = f.read().decode("utf-8", errors="replace")
assert "time.sleep" not in cmdline, (
f"Leaked sleeper subprocess PID {pid}: {cmdline}"
)
except OSError:
pass # process exited already
def test_records_pending_to_json(self, hermes_home):
"""Scheduled URLs are persisted to pending.json with expiration."""
from hermes_cli.debug import _schedule_auto_delete, _pending_file
import json
_schedule_auto_delete(
["https://paste.rs/abc", "https://paste.rs/def"],
delay_seconds=10,
)
pending_path = _pending_file()
assert pending_path.exists()
entries = json.loads(pending_path.read_text())
assert len(entries) == 2
urls = {e["url"] for e in entries}
assert urls == {"https://paste.rs/abc", "https://paste.rs/def"}
# expire_at is ~now + delay_seconds
import time
for e in entries:
assert e["expire_at"] > time.time()
assert e["expire_at"] <= time.time() + 15
def test_skips_non_paste_rs_urls(self, hermes_home):
"""dpaste.com URLs auto-expire — don't track them."""
from hermes_cli.debug import _schedule_auto_delete, _pending_file
_schedule_auto_delete(["https://dpaste.com/something"])
# pending.json should not be created for non-paste.rs URLs
assert not _pending_file().exists()
def test_merges_with_existing_pending(self, hermes_home):
"""Subsequent calls merge into existing pending.json."""
from hermes_cli.debug import _schedule_auto_delete, _load_pending
_schedule_auto_delete(["https://paste.rs/first"], delay_seconds=10)
_schedule_auto_delete(["https://paste.rs/second"], delay_seconds=10)
entries = _load_pending()
urls = {e["url"] for e in entries}
assert urls == {"https://paste.rs/first", "https://paste.rs/second"}
def test_dedupes_same_url(self, hermes_home):
"""Same URL recorded twice → one entry with the later expire_at."""
from hermes_cli.debug import _schedule_auto_delete, _load_pending
_schedule_auto_delete(["https://paste.rs/dup"], delay_seconds=10)
_schedule_auto_delete(["https://paste.rs/dup"], delay_seconds=100)
entries = _load_pending()
assert len(entries) == 1
assert entries[0]["url"] == "https://paste.rs/dup"
class TestSweepExpiredPastes:
"""Test the opportunistic sweep that replaces the sleeping subprocess."""
def test_sweep_empty_is_noop(self, hermes_home):
from hermes_cli.debug import _sweep_expired_pastes
deleted, remaining = _sweep_expired_pastes()
assert deleted == 0
assert remaining == 0
def test_sweep_deletes_expired_entries(self, hermes_home):
from hermes_cli.debug import (
_sweep_expired_pastes,
_save_pending,
_load_pending,
)
import time
# Seed pending.json with one expired + one future entry
_save_pending([
{"url": "https://paste.rs/expired", "expire_at": time.time() - 100},
{"url": "https://paste.rs/future", "expire_at": time.time() + 3600},
])
delete_calls = []
def fake_delete(url):
delete_calls.append(url)
return True
with patch("hermes_cli.debug.delete_paste", side_effect=fake_delete):
deleted, remaining = _sweep_expired_pastes()
assert delete_calls == ["https://paste.rs/expired"]
assert deleted == 1
assert remaining == 1
entries = _load_pending()
urls = {e["url"] for e in entries}
assert urls == {"https://paste.rs/future"}
def test_sweep_leaves_future_entries_alone(self, hermes_home):
from hermes_cli.debug import _sweep_expired_pastes, _save_pending
import time
_save_pending([
{"url": "https://paste.rs/future1", "expire_at": time.time() + 3600},
{"url": "https://paste.rs/future2", "expire_at": time.time() + 7200},
])
with patch("hermes_cli.debug.delete_paste") as mock_delete:
deleted, remaining = _sweep_expired_pastes()
mock_delete.assert_not_called()
assert deleted == 0
assert remaining == 2
def test_sweep_survives_network_failure(self, hermes_home):
"""Failed DELETEs stay in pending.json until the 24h grace window."""
from hermes_cli.debug import (
_sweep_expired_pastes,
_save_pending,
_load_pending,
)
import time
_save_pending([
{"url": "https://paste.rs/flaky", "expire_at": time.time() - 100},
])
with patch(
"hermes_cli.debug.delete_paste",
side_effect=Exception("network down"),
):
deleted, remaining = _sweep_expired_pastes()
# Failure within 24h grace → kept for retry
assert deleted == 0
assert remaining == 1
assert len(_load_pending()) == 1
def test_sweep_drops_entries_past_grace_window(self, hermes_home):
"""After 24h past expiration, give up even on network failures."""
from hermes_cli.debug import (
_sweep_expired_pastes,
_save_pending,
_load_pending,
)
import time
# Expired 25 hours ago → past the 24h grace window
very_old = time.time() - (25 * 3600)
_save_pending([
{"url": "https://paste.rs/ancient", "expire_at": very_old},
])
with patch(
"hermes_cli.debug.delete_paste",
side_effect=Exception("network down"),
):
deleted, remaining = _sweep_expired_pastes()
assert deleted == 1
assert remaining == 0
assert _load_pending() == []
class TestRunDebugSweepsOnInvocation:
"""``run_debug`` must sweep expired pastes on every invocation."""
def test_run_debug_calls_sweep(self, hermes_home):
from hermes_cli.debug import run_debug
args = MagicMock()
args.debug_command = None # default → prints help
with patch("hermes_cli.debug._sweep_expired_pastes") as mock_sweep:
run_debug(args)
mock_sweep.assert_called_once()
def test_run_debug_survives_sweep_failure(self, hermes_home, capsys):
"""If the sweep throws, the subcommand still runs."""
from hermes_cli.debug import run_debug
args = MagicMock()
args.debug_command = None
cmd_debug(args)
with patch(
"hermes_cli.debug._sweep_expired_pastes",
side_effect=RuntimeError("boom"),
):
run_debug(args) # must not raise
# Default subcommand still printed help
out = capsys.readouterr().out
assert "Usage: hermes debug" in out
class TestRunDebugDelete:
def test_deletes_valid_url(self, capsys):
from hermes_cli.debug import run_debug_delete
args = MagicMock()
args.urls = ["https://paste.rs/abc"]
with patch("hermes_cli.debug.delete_paste", return_value=True):
run_debug_delete(args)
out = capsys.readouterr().out
assert "Deleted" in out
assert "paste.rs/abc" in out
def test_handles_delete_failure(self, capsys):
from hermes_cli.debug import run_debug_delete
args = MagicMock()
args.urls = ["https://paste.rs/abc"]
with patch("hermes_cli.debug.delete_paste",
side_effect=Exception("network error")):
run_debug_delete(args)
out = capsys.readouterr().out
assert "Could not delete" in out
def test_no_urls_shows_usage(self, capsys):
from hermes_cli.debug import run_debug_delete
args = MagicMock()
args.urls = []
run_debug_delete(args)
out = capsys.readouterr().out
assert "Usage" in out
class TestShareIncludesAutoDelete:
"""Verify that run_debug_share schedules auto-deletion and prints TTL."""
def test_share_schedules_auto_delete(self, hermes_home, capsys):
from hermes_cli.debug import run_debug_share
args = MagicMock()
args.lines = 50
args.expire = 7
args.local = False
with patch("hermes_cli.dump.run_dump"), \
patch("hermes_cli.debug.upload_to_pastebin",
return_value="https://paste.rs/test1"), \
patch("hermes_cli.debug._schedule_auto_delete") as mock_sched:
run_debug_share(args)
# auto-delete was scheduled with the uploaded URLs
mock_sched.assert_called_once()
urls_arg = mock_sched.call_args[0][0]
assert "https://paste.rs/test1" in urls_arg
out = capsys.readouterr().out
assert "auto-delete" in out
def test_share_shows_privacy_notice(self, hermes_home, capsys):
from hermes_cli.debug import run_debug_share
args = MagicMock()
args.lines = 50
args.expire = 7
args.local = False
with patch("hermes_cli.dump.run_dump"), \
patch("hermes_cli.debug.upload_to_pastebin",
return_value="https://paste.rs/test"), \
patch("hermes_cli.debug._schedule_auto_delete"):
run_debug_share(args)
out = capsys.readouterr().out
assert "public paste service" in out
def test_local_no_privacy_notice(self, hermes_home, capsys):
from hermes_cli.debug import run_debug_share
args = MagicMock()
args.lines = 50
args.expire = 7
args.local = True
with patch("hermes_cli.dump.run_dump"):
run_debug_share(args)
out = capsys.readouterr().out
assert "public paste service" not in out

View file

@ -0,0 +1,64 @@
"""Tests for warn_deprecated_cwd_env_vars() migration warning."""
import os
import pytest
class TestDeprecatedCwdWarning:
"""Warn when MESSAGING_CWD or TERMINAL_CWD is set in .env."""
def test_messaging_cwd_triggers_warning(self, monkeypatch, capsys):
monkeypatch.setenv("MESSAGING_CWD", "/some/path")
monkeypatch.delenv("TERMINAL_CWD", raising=False)
from hermes_cli.config import warn_deprecated_cwd_env_vars
warn_deprecated_cwd_env_vars(config={})
captured = capsys.readouterr()
assert "MESSAGING_CWD" in captured.err
assert "deprecated" in captured.err.lower()
assert "config.yaml" in captured.err
def test_terminal_cwd_triggers_warning_when_config_placeholder(self, monkeypatch, capsys):
monkeypatch.setenv("TERMINAL_CWD", "/project")
monkeypatch.delenv("MESSAGING_CWD", raising=False)
from hermes_cli.config import warn_deprecated_cwd_env_vars
# config has placeholder cwd → TERMINAL_CWD likely from .env
warn_deprecated_cwd_env_vars(config={"terminal": {"cwd": "."}})
captured = capsys.readouterr()
assert "TERMINAL_CWD" in captured.err
assert "deprecated" in captured.err.lower()
def test_no_warning_when_config_has_explicit_cwd(self, monkeypatch, capsys):
monkeypatch.setenv("TERMINAL_CWD", "/project")
monkeypatch.delenv("MESSAGING_CWD", raising=False)
from hermes_cli.config import warn_deprecated_cwd_env_vars
# config has explicit cwd → TERMINAL_CWD could be from config bridge
warn_deprecated_cwd_env_vars(config={"terminal": {"cwd": "/project"}})
captured = capsys.readouterr()
assert "TERMINAL_CWD" not in captured.err
def test_no_warning_when_env_clean(self, monkeypatch, capsys):
monkeypatch.delenv("MESSAGING_CWD", raising=False)
monkeypatch.delenv("TERMINAL_CWD", raising=False)
from hermes_cli.config import warn_deprecated_cwd_env_vars
warn_deprecated_cwd_env_vars(config={})
captured = capsys.readouterr()
assert captured.err == ""
def test_both_deprecated_vars_warn(self, monkeypatch, capsys):
monkeypatch.setenv("MESSAGING_CWD", "/msg/path")
monkeypatch.setenv("TERMINAL_CWD", "/term/path")
from hermes_cli.config import warn_deprecated_cwd_env_vars
warn_deprecated_cwd_env_vars(config={})
captured = capsys.readouterr()
assert "MESSAGING_CWD" in captured.err
assert "TERMINAL_CWD" in captured.err

View file

@ -0,0 +1,217 @@
"""Unit tests for hermes_cli/dingtalk_auth.py (QR device-flow registration)."""
from __future__ import annotations
import sys
from unittest.mock import MagicMock, patch
import pytest
# ---------------------------------------------------------------------------
# API layer — _api_post + error mapping
# ---------------------------------------------------------------------------
class TestApiPost:
def test_raises_on_network_error(self):
import requests
from hermes_cli.dingtalk_auth import _api_post, RegistrationError
with patch("hermes_cli.dingtalk_auth.requests.post",
side_effect=requests.ConnectionError("nope")):
with pytest.raises(RegistrationError, match="Network error"):
_api_post("/app/registration/init", {"source": "hermes"})
def test_raises_on_nonzero_errcode(self):
from hermes_cli.dingtalk_auth import _api_post, RegistrationError
mock_resp = MagicMock()
mock_resp.raise_for_status = MagicMock()
mock_resp.json.return_value = {"errcode": 42, "errmsg": "boom"}
with patch("hermes_cli.dingtalk_auth.requests.post", return_value=mock_resp):
with pytest.raises(RegistrationError, match=r"boom \(errcode=42\)"):
_api_post("/app/registration/init", {"source": "hermes"})
def test_returns_data_on_success(self):
from hermes_cli.dingtalk_auth import _api_post
mock_resp = MagicMock()
mock_resp.raise_for_status = MagicMock()
mock_resp.json.return_value = {"errcode": 0, "nonce": "abc"}
with patch("hermes_cli.dingtalk_auth.requests.post", return_value=mock_resp):
result = _api_post("/app/registration/init", {"source": "hermes"})
assert result["nonce"] == "abc"
# ---------------------------------------------------------------------------
# begin_registration — 2-step nonce → device_code chain
# ---------------------------------------------------------------------------
class TestBeginRegistration:
def test_chains_init_then_begin(self):
from hermes_cli.dingtalk_auth import begin_registration
responses = [
{"errcode": 0, "nonce": "nonce123"},
{
"errcode": 0,
"device_code": "dev-xyz",
"verification_uri_complete": "https://open-dev.dingtalk.com/openapp/registration/openClaw?user_code=ABCD",
"expires_in": 7200,
"interval": 2,
},
]
with patch("hermes_cli.dingtalk_auth._api_post", side_effect=responses):
result = begin_registration()
assert result["device_code"] == "dev-xyz"
assert "verification_uri_complete" in result
assert result["interval"] == 2
assert result["expires_in"] == 7200
def test_missing_nonce_raises(self):
from hermes_cli.dingtalk_auth import begin_registration, RegistrationError
with patch("hermes_cli.dingtalk_auth._api_post",
return_value={"errcode": 0, "nonce": ""}):
with pytest.raises(RegistrationError, match="missing nonce"):
begin_registration()
def test_missing_device_code_raises(self):
from hermes_cli.dingtalk_auth import begin_registration, RegistrationError
responses = [
{"errcode": 0, "nonce": "n1"},
{"errcode": 0, "verification_uri_complete": "http://x"}, # no device_code
]
with patch("hermes_cli.dingtalk_auth._api_post", side_effect=responses):
with pytest.raises(RegistrationError, match="missing device_code"):
begin_registration()
def test_missing_verification_uri_raises(self):
from hermes_cli.dingtalk_auth import begin_registration, RegistrationError
responses = [
{"errcode": 0, "nonce": "n1"},
{"errcode": 0, "device_code": "dev"}, # no verification_uri_complete
]
with patch("hermes_cli.dingtalk_auth._api_post", side_effect=responses):
with pytest.raises(RegistrationError,
match="missing verification_uri_complete"):
begin_registration()
# ---------------------------------------------------------------------------
# wait_for_registration_success — polling loop
# ---------------------------------------------------------------------------
class TestWaitForSuccess:
def test_returns_credentials_on_success(self):
from hermes_cli.dingtalk_auth import wait_for_registration_success
responses = [
{"status": "WAITING"},
{"status": "WAITING"},
{"status": "SUCCESS", "client_id": "cid-1", "client_secret": "sec-1"},
]
with patch("hermes_cli.dingtalk_auth.poll_registration", side_effect=responses), \
patch("hermes_cli.dingtalk_auth.time.sleep"):
cid, secret = wait_for_registration_success(
device_code="dev", interval=0, expires_in=60
)
assert cid == "cid-1"
assert secret == "sec-1"
def test_success_without_credentials_raises(self):
from hermes_cli.dingtalk_auth import wait_for_registration_success, RegistrationError
with patch("hermes_cli.dingtalk_auth.poll_registration",
return_value={"status": "SUCCESS", "client_id": "", "client_secret": ""}), \
patch("hermes_cli.dingtalk_auth.time.sleep"):
with pytest.raises(RegistrationError, match="credentials are missing"):
wait_for_registration_success(
device_code="dev", interval=0, expires_in=60
)
def test_invokes_waiting_callback(self):
from hermes_cli.dingtalk_auth import wait_for_registration_success
callback = MagicMock()
responses = [
{"status": "WAITING"},
{"status": "WAITING"},
{"status": "SUCCESS", "client_id": "cid", "client_secret": "sec"},
]
with patch("hermes_cli.dingtalk_auth.poll_registration", side_effect=responses), \
patch("hermes_cli.dingtalk_auth.time.sleep"):
wait_for_registration_success(
device_code="dev", interval=0, expires_in=60, on_waiting=callback
)
assert callback.call_count == 2
# ---------------------------------------------------------------------------
# QR rendering — terminal output
# ---------------------------------------------------------------------------
class TestRenderQR:
def test_returns_false_when_qrcode_missing(self, monkeypatch):
from hermes_cli import dingtalk_auth
# Simulate qrcode import failure
monkeypatch.setitem(sys.modules, "qrcode", None)
assert dingtalk_auth.render_qr_to_terminal("https://example.com") is False
def test_prints_when_qrcode_available(self, capsys):
"""End-to-end: render a real QR and verify SOMETHING got printed."""
try:
import qrcode # noqa: F401
except ImportError:
pytest.skip("qrcode library not available")
from hermes_cli.dingtalk_auth import render_qr_to_terminal
result = render_qr_to_terminal("https://example.com/test")
captured = capsys.readouterr()
assert result is True
assert len(captured.out) > 100 # rendered matrix is non-trivial
# ---------------------------------------------------------------------------
# Configuration — env var overrides
# ---------------------------------------------------------------------------
class TestConfigOverrides:
def test_base_url_default(self, monkeypatch):
monkeypatch.delenv("DINGTALK_REGISTRATION_BASE_URL", raising=False)
# Force module reload to pick up current env
import importlib
import hermes_cli.dingtalk_auth as mod
importlib.reload(mod)
assert mod.REGISTRATION_BASE_URL == "https://oapi.dingtalk.com"
def test_base_url_override_via_env(self, monkeypatch):
monkeypatch.setenv("DINGTALK_REGISTRATION_BASE_URL",
"https://test.example.com/")
import importlib
import hermes_cli.dingtalk_auth as mod
importlib.reload(mod)
# Trailing slash stripped
assert mod.REGISTRATION_BASE_URL == "https://test.example.com"
def test_source_default(self, monkeypatch):
monkeypatch.delenv("DINGTALK_REGISTRATION_SOURCE", raising=False)
import importlib
import hermes_cli.dingtalk_auth as mod
importlib.reload(mod)
assert mod.REGISTRATION_SOURCE == "openClaw"

View file

@ -343,3 +343,57 @@ def test_run_doctor_kimi_cn_env_is_detected_and_probe_is_null_safe(monkeypatch,
assert "Kimi / Moonshot (China)" in out
assert "str expected, not NoneType" not in out
assert any(url == "https://api.moonshot.cn/v1/models" for url, _, _ in calls)
@pytest.mark.parametrize("base_url", [None, "https://opencode.ai/zen/go/v1"])
def test_run_doctor_opencode_go_skips_invalid_models_probe(monkeypatch, tmp_path, base_url):
home = tmp_path / ".hermes"
home.mkdir(parents=True, exist_ok=True)
(home / "config.yaml").write_text("memory: {}\n", encoding="utf-8")
(home / ".env").write_text("OPENCODE_GO_API_KEY=***\n", encoding="utf-8")
project = tmp_path / "project"
project.mkdir(exist_ok=True)
monkeypatch.setattr(doctor_mod, "HERMES_HOME", home)
monkeypatch.setattr(doctor_mod, "PROJECT_ROOT", project)
monkeypatch.setattr(doctor_mod, "_DHH", str(home))
monkeypatch.setenv("OPENCODE_GO_API_KEY", "sk-test")
if base_url:
monkeypatch.setenv("OPENCODE_GO_BASE_URL", base_url)
else:
monkeypatch.delenv("OPENCODE_GO_BASE_URL", raising=False)
fake_model_tools = types.SimpleNamespace(
check_tool_availability=lambda *a, **kw: ([], []),
TOOLSET_REQUIREMENTS={},
)
monkeypatch.setitem(sys.modules, "model_tools", fake_model_tools)
try:
from hermes_cli import auth as _auth_mod
monkeypatch.setattr(_auth_mod, "get_nous_auth_status", lambda: {})
monkeypatch.setattr(_auth_mod, "get_codex_auth_status", lambda: {})
except ImportError:
pass
calls = []
def fake_get(url, headers=None, timeout=None):
calls.append((url, headers, timeout))
return types.SimpleNamespace(status_code=200)
import httpx
monkeypatch.setattr(httpx, "get", fake_get)
import io, contextlib
buf = io.StringIO()
with contextlib.redirect_stdout(buf):
doctor_mod.run_doctor(Namespace(fix=False))
out = buf.getvalue()
assert any(
"OpenCode Go" in line and "(key configured)" in line
for line in out.splitlines()
)
assert not any(url == "https://opencode.ai/zen/go/v1/models" for url, _, _ in calls)
assert not any("opencode" in url.lower() and "models" in url.lower() for url, _, _ in calls)

View file

@ -0,0 +1,275 @@
"""Tests for the Command Installation check in hermes doctor."""
import os
import sys
import types
from argparse import Namespace
from pathlib import Path
import pytest
import hermes_cli.doctor as doctor_mod
def _setup_doctor_env(monkeypatch, tmp_path, venv_name="venv"):
"""Create a minimal HERMES_HOME + PROJECT_ROOT for doctor tests."""
home = tmp_path / ".hermes"
home.mkdir(parents=True, exist_ok=True)
(home / "config.yaml").write_text("memory: {}\n", encoding="utf-8")
project = tmp_path / "project"
project.mkdir(exist_ok=True)
# Create a fake venv entry point
venv_bin_dir = project / venv_name / "bin"
venv_bin_dir.mkdir(parents=True, exist_ok=True)
hermes_bin = venv_bin_dir / "hermes"
hermes_bin.write_text("#!/usr/bin/env python\n# entry point\n")
hermes_bin.chmod(0o755)
monkeypatch.setattr(doctor_mod, "HERMES_HOME", home)
monkeypatch.setattr(doctor_mod, "PROJECT_ROOT", project)
monkeypatch.setattr(doctor_mod, "_DHH", str(home))
# Stub model_tools so doctor doesn't fail on import
fake_model_tools = types.SimpleNamespace(
check_tool_availability=lambda *a, **kw: ([], []),
TOOLSET_REQUIREMENTS={},
)
monkeypatch.setitem(sys.modules, "model_tools", fake_model_tools)
# Stub auth checks
try:
from hermes_cli import auth as _auth_mod
monkeypatch.setattr(_auth_mod, "get_nous_auth_status", lambda: {})
monkeypatch.setattr(_auth_mod, "get_codex_auth_status", lambda: {})
except Exception:
pass
# Stub httpx.get to avoid network calls
try:
import httpx
monkeypatch.setattr(httpx, "get", lambda *a, **kw: types.SimpleNamespace(status_code=200))
except Exception:
pass
return home, project, hermes_bin
def _run_doctor(fix=False):
"""Run doctor and capture stdout."""
import io
import contextlib
buf = io.StringIO()
with contextlib.redirect_stdout(buf):
doctor_mod.run_doctor(Namespace(fix=fix))
return buf.getvalue()
class TestDoctorCommandInstallation:
"""Tests for the ◆ Command Installation section."""
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_correct_symlink_shows_ok(self, monkeypatch, tmp_path):
home, project, hermes_bin = _setup_doctor_env(monkeypatch, tmp_path)
# Create the command link dir with correct symlink
cmd_link_dir = tmp_path / ".local" / "bin"
cmd_link_dir.mkdir(parents=True)
cmd_link = cmd_link_dir / "hermes"
cmd_link.symlink_to(hermes_bin)
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out = _run_doctor(fix=False)
assert "Command Installation" in out
assert "Venv entry point exists" in out
assert "correct target" in out
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_missing_symlink_shows_fail(self, monkeypatch, tmp_path):
home, project, hermes_bin = _setup_doctor_env(monkeypatch, tmp_path)
monkeypatch.setattr(Path, "home", lambda: tmp_path)
# Don't create the symlink — it should be missing
out = _run_doctor(fix=False)
assert "Command Installation" in out
assert "Venv entry point exists" in out
assert "not found" in out
assert "hermes doctor --fix" in out
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_fix_creates_missing_symlink(self, monkeypatch, tmp_path):
home, project, hermes_bin = _setup_doctor_env(monkeypatch, tmp_path)
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out = _run_doctor(fix=True)
assert "Command Installation" in out
assert "Created symlink" in out
# Verify the symlink was actually created
cmd_link = tmp_path / ".local" / "bin" / "hermes"
assert cmd_link.is_symlink()
assert cmd_link.resolve() == hermes_bin.resolve()
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_wrong_target_symlink_shows_warn(self, monkeypatch, tmp_path):
home, project, hermes_bin = _setup_doctor_env(monkeypatch, tmp_path)
# Create a symlink pointing to the wrong target
cmd_link_dir = tmp_path / ".local" / "bin"
cmd_link_dir.mkdir(parents=True)
cmd_link = cmd_link_dir / "hermes"
wrong_target = tmp_path / "wrong_hermes"
wrong_target.write_text("#!/usr/bin/env python\n")
cmd_link.symlink_to(wrong_target)
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out = _run_doctor(fix=False)
assert "Command Installation" in out
assert "wrong target" in out
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_fix_repairs_wrong_symlink(self, monkeypatch, tmp_path):
home, project, hermes_bin = _setup_doctor_env(monkeypatch, tmp_path)
# Create a symlink pointing to wrong target
cmd_link_dir = tmp_path / ".local" / "bin"
cmd_link_dir.mkdir(parents=True)
cmd_link = cmd_link_dir / "hermes"
wrong_target = tmp_path / "wrong_hermes"
wrong_target.write_text("#!/usr/bin/env python\n")
cmd_link.symlink_to(wrong_target)
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out = _run_doctor(fix=True)
assert "Fixed symlink" in out
# Verify the symlink now points to the correct target
assert cmd_link.is_symlink()
assert cmd_link.resolve() == hermes_bin.resolve()
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_missing_venv_entry_point_shows_warn(self, monkeypatch, tmp_path):
home = tmp_path / ".hermes"
home.mkdir(parents=True, exist_ok=True)
(home / "config.yaml").write_text("memory: {}\n", encoding="utf-8")
project = tmp_path / "project"
project.mkdir(exist_ok=True)
# Do NOT create any venv entry point
monkeypatch.setattr(doctor_mod, "HERMES_HOME", home)
monkeypatch.setattr(doctor_mod, "PROJECT_ROOT", project)
monkeypatch.setattr(doctor_mod, "_DHH", str(home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
fake_model_tools = types.SimpleNamespace(
check_tool_availability=lambda *a, **kw: ([], []),
TOOLSET_REQUIREMENTS={},
)
monkeypatch.setitem(sys.modules, "model_tools", fake_model_tools)
try:
from hermes_cli import auth as _auth_mod
monkeypatch.setattr(_auth_mod, "get_nous_auth_status", lambda: {})
monkeypatch.setattr(_auth_mod, "get_codex_auth_status", lambda: {})
except Exception:
pass
try:
import httpx
monkeypatch.setattr(httpx, "get", lambda *a, **kw: types.SimpleNamespace(status_code=200))
except Exception:
pass
out = _run_doctor(fix=False)
assert "Command Installation" in out
assert "Venv entry point not found" in out
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_dot_venv_dir_is_found(self, monkeypatch, tmp_path):
"""The check finds entry points in .venv/ as well as venv/."""
home, project, _ = _setup_doctor_env(monkeypatch, tmp_path, venv_name=".venv")
# Create the command link with correct symlink
hermes_bin = project / ".venv" / "bin" / "hermes"
cmd_link_dir = tmp_path / ".local" / "bin"
cmd_link_dir.mkdir(parents=True)
cmd_link = cmd_link_dir / "hermes"
cmd_link.symlink_to(hermes_bin)
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out = _run_doctor(fix=False)
assert "Venv entry point exists" in out
assert ".venv/bin/hermes" in out
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_non_symlink_regular_file_shows_ok(self, monkeypatch, tmp_path):
"""If ~/.local/bin/hermes is a regular file (not symlink), accept it."""
home, project, hermes_bin = _setup_doctor_env(monkeypatch, tmp_path)
cmd_link_dir = tmp_path / ".local" / "bin"
cmd_link_dir.mkdir(parents=True)
cmd_link = cmd_link_dir / "hermes"
cmd_link.write_text("#!/bin/sh\nexec python -m hermes_cli.main \"$@\"\n")
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out = _run_doctor(fix=False)
assert "non-symlink" in out
@pytest.mark.skipif(sys.platform == "win32", reason="Symlink check is Unix-only")
def test_termux_uses_prefix_bin(self, monkeypatch, tmp_path):
"""On Termux, the command link dir is $PREFIX/bin."""
prefix_dir = tmp_path / "termux_prefix"
prefix_bin = prefix_dir / "bin"
prefix_bin.mkdir(parents=True)
home, project, hermes_bin = _setup_doctor_env(monkeypatch, tmp_path)
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", str(prefix_dir))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out = _run_doctor(fix=False)
assert "Command Installation" in out
assert "$PREFIX/bin" in out
def test_windows_skips_check(self, monkeypatch, tmp_path):
"""On Windows, the Command Installation section is skipped."""
home = tmp_path / ".hermes"
home.mkdir(parents=True, exist_ok=True)
(home / "config.yaml").write_text("memory: {}\n", encoding="utf-8")
project = tmp_path / "project"
project.mkdir(exist_ok=True)
monkeypatch.setattr(doctor_mod, "HERMES_HOME", home)
monkeypatch.setattr(doctor_mod, "PROJECT_ROOT", project)
monkeypatch.setattr(doctor_mod, "_DHH", str(home))
monkeypatch.setattr(sys, "platform", "win32")
fake_model_tools = types.SimpleNamespace(
check_tool_availability=lambda *a, **kw: ([], []),
TOOLSET_REQUIREMENTS={},
)
monkeypatch.setitem(sys.modules, "model_tools", fake_model_tools)
try:
from hermes_cli import auth as _auth_mod
monkeypatch.setattr(_auth_mod, "get_nous_auth_status", lambda: {})
monkeypatch.setattr(_auth_mod, "get_codex_auth_status", lambda: {})
except Exception:
pass
try:
import httpx
monkeypatch.setattr(httpx, "get", lambda *a, **kw: types.SimpleNamespace(status_code=200))
except Exception:
pass
out = _run_doctor(fix=False)
assert "Command Installation" not in out

View file

@ -39,6 +39,76 @@ class TestSystemdLingerStatus:
assert gateway.get_systemd_linger_status() == (None, "not supported in Termux")
class TestContainerSystemdSupport:
def test_supports_systemd_services_in_container_with_user_manager(self, monkeypatch):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr(gateway, "is_wsl", lambda: False)
monkeypatch.setattr(gateway, "is_container", lambda: True)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/systemctl")
monkeypatch.setattr(gateway, "_systemd_operational", lambda system=False: not system)
assert gateway.supports_systemd_services() is True
def test_supports_systemd_services_in_container_with_system_manager(self, monkeypatch):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr(gateway, "is_wsl", lambda: False)
monkeypatch.setattr(gateway, "is_container", lambda: True)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/systemctl")
monkeypatch.setattr(gateway, "_systemd_operational", lambda system=False: system)
assert gateway.supports_systemd_services() is True
def test_supports_systemd_services_in_container_without_systemd(self, monkeypatch):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr(gateway, "is_wsl", lambda: False)
monkeypatch.setattr(gateway, "is_container", lambda: True)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/systemctl")
monkeypatch.setattr(gateway, "_systemd_operational", lambda system=False: False)
assert gateway.supports_systemd_services() is False
def test_gateway_install_in_container_with_operational_systemd_uses_systemd(monkeypatch):
monkeypatch.setattr(gateway, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway, "is_wsl", lambda: False)
monkeypatch.setattr(gateway, "is_macos", lambda: False)
monkeypatch.setattr(gateway, "is_managed", lambda: False)
calls = []
monkeypatch.setattr(
gateway,
"systemd_install",
lambda force=False, system=False, run_as_user=None: calls.append((force, system, run_as_user)),
)
args = SimpleNamespace(
gateway_command="install",
force=False,
system=False,
run_as_user=None,
)
gateway.gateway_command(args)
assert calls == [(False, False, None)]
def test_gateway_start_in_container_with_operational_systemd_uses_systemd(monkeypatch):
monkeypatch.setattr(gateway, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway, "is_wsl", lambda: False)
monkeypatch.setattr(gateway, "is_macos", lambda: False)
calls = []
monkeypatch.setattr(gateway, "systemd_start", lambda system=False: calls.append(system))
args = SimpleNamespace(gateway_command="start", system=False, all=False)
gateway.gateway_command(args)
assert calls == [False]
def test_systemd_status_warns_when_linger_disabled(monkeypatch, tmp_path, capsys):
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("[Unit]\n")
@ -179,6 +249,21 @@ def test_install_linux_gateway_from_setup_system_choice_as_root_installs(monkeyp
assert calls == [(True, True, "alice")]
def test_find_gateway_pids_falls_back_to_pid_file_when_process_scan_fails(monkeypatch):
monkeypatch.setattr(gateway, "_get_service_pids", lambda: set())
monkeypatch.setattr(gateway, "is_windows", lambda: False)
monkeypatch.setattr("gateway.status.get_running_pid", lambda: 321)
def fake_run(cmd, **kwargs):
if cmd[:4] == ["ps", "-A", "eww", "-o"]:
return SimpleNamespace(returncode=1, stdout="", stderr="ps failed")
raise AssertionError(f"Unexpected command: {cmd}")
monkeypatch.setattr(gateway.subprocess, "run", fake_run)
assert gateway.find_gateway_pids() == [321]
# ---------------------------------------------------------------------------
# _wait_for_gateway_exit
# ---------------------------------------------------------------------------

View file

@ -450,7 +450,6 @@ class TestGatewayServiceDetection:
assert gateway_cli._is_service_running() is False
class TestGatewaySystemServiceRouting:
def test_systemd_restart_self_requests_graceful_restart_and_waits(self, monkeypatch, capsys):
calls = []
@ -554,6 +553,38 @@ class TestGatewaySystemServiceRouting:
assert calls == [(False, False)]
def test_gateway_status_reports_manual_process_when_service_is_stopped(self, monkeypatch, capsys):
user_unit = SimpleNamespace(exists=lambda: True)
system_unit = SimpleNamespace(exists=lambda: False)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(
gateway_cli,
"get_systemd_unit_path",
lambda system=False: system_unit if system else user_unit,
)
monkeypatch.setattr(gateway_cli, "systemd_status", lambda deep=False, system=False: print("service stopped"))
monkeypatch.setattr(
gateway_cli,
"get_gateway_runtime_snapshot",
lambda system=False: gateway_cli.GatewayRuntimeSnapshot(
manager="systemd (user)",
service_installed=True,
service_running=False,
gateway_pids=(4321,),
service_scope="user",
),
)
gateway_cli.gateway_command(SimpleNamespace(gateway_command="status", deep=False, system=False))
out = capsys.readouterr().out
assert "service stopped" in out
assert "Gateway process is running for this profile" in out
assert "PID(s): 4321" in out
def test_gateway_status_on_termux_shows_manual_guidance(self, monkeypatch, capsys):
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: True)
@ -613,6 +644,7 @@ class TestDetectVenvDir:
# Not inside a virtualenv
monkeypatch.setattr("sys.prefix", "/usr")
monkeypatch.setattr("sys.base_prefix", "/usr")
monkeypatch.delenv("VIRTUAL_ENV", raising=False)
monkeypatch.setattr(gateway_cli, "PROJECT_ROOT", tmp_path)
dot_venv = tmp_path / ".venv"
@ -624,6 +656,7 @@ class TestDetectVenvDir:
def test_falls_back_to_venv_directory(self, tmp_path, monkeypatch):
monkeypatch.setattr("sys.prefix", "/usr")
monkeypatch.setattr("sys.base_prefix", "/usr")
monkeypatch.delenv("VIRTUAL_ENV", raising=False)
monkeypatch.setattr(gateway_cli, "PROJECT_ROOT", tmp_path)
venv = tmp_path / "venv"
@ -635,6 +668,7 @@ class TestDetectVenvDir:
def test_prefers_dot_venv_over_venv(self, tmp_path, monkeypatch):
monkeypatch.setattr("sys.prefix", "/usr")
monkeypatch.setattr("sys.base_prefix", "/usr")
monkeypatch.delenv("VIRTUAL_ENV", raising=False)
monkeypatch.setattr(gateway_cli, "PROJECT_ROOT", tmp_path)
(tmp_path / ".venv").mkdir()
@ -646,6 +680,7 @@ class TestDetectVenvDir:
def test_returns_none_when_no_virtualenv(self, tmp_path, monkeypatch):
monkeypatch.setattr("sys.prefix", "/usr")
monkeypatch.setattr("sys.base_prefix", "/usr")
monkeypatch.delenv("VIRTUAL_ENV", raising=False)
monkeypatch.setattr(gateway_cli, "PROJECT_ROOT", tmp_path)
result = gateway_cli._detect_venv_dir()
@ -1142,3 +1177,556 @@ class TestDockerAwareGateway:
out = capsys.readouterr().out
assert "docker" in out.lower()
assert "hermes gateway run" in out
class TestLegacyHermesUnitDetection:
"""Tests for _find_legacy_hermes_units / has_legacy_hermes_units.
These guard against the scenario that tripped Luis in April 2026: an
older install left a ``hermes.service`` unit behind when the service was
renamed to ``hermes-gateway.service``. After PR #5646 (signal recovery
via systemd), the two services began SIGTERM-flapping over the same
Telegram bot token in a 30-second cycle.
The detector must flag ``hermes.service`` ONLY when it actually runs our
gateway, and must NEVER flag profile units
(``hermes-gateway-<profile>.service``) or unrelated third-party services.
"""
# Minimal ExecStart that looks like our gateway
_OUR_UNIT_TEXT = (
"[Unit]\nDescription=Hermes Gateway\n[Service]\n"
"ExecStart=/usr/bin/python -m hermes_cli.main gateway run --replace\n"
)
@staticmethod
def _setup_search_paths(tmp_path, monkeypatch):
"""Redirect the legacy search to user_dir + system_dir under tmp_path."""
user_dir = tmp_path / "user"
system_dir = tmp_path / "system"
user_dir.mkdir()
system_dir.mkdir()
monkeypatch.setattr(
gateway_cli,
"_legacy_unit_search_paths",
lambda: [(False, user_dir), (True, system_dir)],
)
return user_dir, system_dir
def test_detects_legacy_hermes_service_in_user_scope(self, tmp_path, monkeypatch):
user_dir, _ = self._setup_search_paths(tmp_path, monkeypatch)
legacy = user_dir / "hermes.service"
legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
results = gateway_cli._find_legacy_hermes_units()
assert len(results) == 1
name, path, is_system = results[0]
assert name == "hermes.service"
assert path == legacy
assert is_system is False
assert gateway_cli.has_legacy_hermes_units() is True
def test_detects_legacy_hermes_service_in_system_scope(self, tmp_path, monkeypatch):
_, system_dir = self._setup_search_paths(tmp_path, monkeypatch)
legacy = system_dir / "hermes.service"
legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
results = gateway_cli._find_legacy_hermes_units()
assert len(results) == 1
name, path, is_system = results[0]
assert name == "hermes.service"
assert path == legacy
assert is_system is True
def test_ignores_profile_unit_hermes_gateway_coder(self, tmp_path, monkeypatch):
"""CRITICAL: profile units must NOT be flagged as legacy.
Teknium's concern — ``hermes-gateway-coder.service`` is our standard
naming for the ``coder`` profile. The legacy detector is an explicit
allowlist, not a glob, so profile units are safe.
"""
user_dir, system_dir = self._setup_search_paths(tmp_path, monkeypatch)
# Drop profile units in BOTH scopes with our ExecStart
for base in (user_dir, system_dir):
(base / "hermes-gateway-coder.service").write_text(
self._OUR_UNIT_TEXT, encoding="utf-8"
)
(base / "hermes-gateway-orcha.service").write_text(
self._OUR_UNIT_TEXT, encoding="utf-8"
)
(base / "hermes-gateway.service").write_text(
self._OUR_UNIT_TEXT, encoding="utf-8"
)
results = gateway_cli._find_legacy_hermes_units()
assert results == []
assert gateway_cli.has_legacy_hermes_units() is False
def test_ignores_unrelated_hermes_service(self, tmp_path, monkeypatch):
"""Third-party ``hermes.service`` that isn't ours stays untouched.
If a user has some other package named ``hermes`` installed as a
service, we must not flag it.
"""
user_dir, _ = self._setup_search_paths(tmp_path, monkeypatch)
(user_dir / "hermes.service").write_text(
"[Unit]\nDescription=Some Other Hermes\n[Service]\n"
"ExecStart=/opt/other-hermes/bin/daemon --foreground\n",
encoding="utf-8",
)
results = gateway_cli._find_legacy_hermes_units()
assert results == []
assert gateway_cli.has_legacy_hermes_units() is False
def test_returns_empty_when_no_legacy_files_exist(self, tmp_path, monkeypatch):
self._setup_search_paths(tmp_path, monkeypatch)
assert gateway_cli._find_legacy_hermes_units() == []
assert gateway_cli.has_legacy_hermes_units() is False
def test_detects_both_scopes_simultaneously(self, tmp_path, monkeypatch):
"""When a user has BOTH user-scope and system-scope legacy units,
both are reported so the migration step can remove them together."""
user_dir, system_dir = self._setup_search_paths(tmp_path, monkeypatch)
(user_dir / "hermes.service").write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
(system_dir / "hermes.service").write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
results = gateway_cli._find_legacy_hermes_units()
scopes = sorted(is_system for _, _, is_system in results)
assert scopes == [False, True]
def test_accepts_alternate_execstart_formats(self, tmp_path, monkeypatch):
"""Older installs may have used different python invocations.
ExecStart variants we've seen in the wild:
- python -m hermes_cli.main gateway run
- python path/to/hermes_cli/main.py gateway run
- hermes gateway run (direct binary)
- python path/to/gateway/run.py
"""
user_dir, _ = self._setup_search_paths(tmp_path, monkeypatch)
variants = [
"ExecStart=/venv/bin/python -m hermes_cli.main gateway run --replace",
"ExecStart=/venv/bin/python /opt/hermes/hermes_cli/main.py gateway run",
"ExecStart=/usr/local/bin/hermes gateway run --replace",
"ExecStart=/venv/bin/python /opt/hermes/gateway/run.py",
]
for i, execstart in enumerate(variants):
name = f"hermes.service" if i == 0 else f"hermes.service" # same name
# Test each variant fresh
(user_dir / "hermes.service").write_text(
f"[Unit]\nDescription=Old Hermes\n[Service]\n{execstart}\n",
encoding="utf-8",
)
results = gateway_cli._find_legacy_hermes_units()
assert len(results) == 1, f"Variant {i} not detected: {execstart!r}"
def test_print_legacy_unit_warning_is_noop_when_empty(self, tmp_path, monkeypatch, capsys):
self._setup_search_paths(tmp_path, monkeypatch)
gateway_cli.print_legacy_unit_warning()
out = capsys.readouterr().out
assert out == ""
def test_print_legacy_unit_warning_shows_migration_hint(self, tmp_path, monkeypatch, capsys):
user_dir, _ = self._setup_search_paths(tmp_path, monkeypatch)
(user_dir / "hermes.service").write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
gateway_cli.print_legacy_unit_warning()
out = capsys.readouterr().out
assert "Legacy" in out
assert "hermes.service" in out
assert "hermes gateway migrate-legacy" in out
def test_handles_unreadable_unit_file_gracefully(self, tmp_path, monkeypatch):
"""A permission error reading a unit file must not crash detection."""
user_dir, _ = self._setup_search_paths(tmp_path, monkeypatch)
unreadable = user_dir / "hermes.service"
unreadable.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
# Simulate a read failure — monkeypatch Path.read_text to raise
original_read_text = gateway_cli.Path.read_text
def raising_read_text(self, *args, **kwargs):
if self == unreadable:
raise PermissionError("simulated")
return original_read_text(self, *args, **kwargs)
monkeypatch.setattr(gateway_cli.Path, "read_text", raising_read_text)
# Should not raise
results = gateway_cli._find_legacy_hermes_units()
assert results == []
class TestRemoveLegacyHermesUnits:
"""Tests for remove_legacy_hermes_units (the migration action)."""
_OUR_UNIT_TEXT = (
"[Unit]\nDescription=Hermes Gateway\n[Service]\n"
"ExecStart=/usr/bin/python -m hermes_cli.main gateway run --replace\n"
)
@staticmethod
def _setup(tmp_path, monkeypatch, as_root=False):
user_dir = tmp_path / "user"
system_dir = tmp_path / "system"
user_dir.mkdir()
system_dir.mkdir()
monkeypatch.setattr(
gateway_cli,
"_legacy_unit_search_paths",
lambda: [(False, user_dir), (True, system_dir)],
)
# Mock systemctl — return success for everything
systemctl_calls: list[list[str]] = []
def fake_run(cmd, **kwargs):
systemctl_calls.append(cmd)
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
monkeypatch.setattr(gateway_cli.os, "geteuid", lambda: 0 if as_root else 1000)
return user_dir, system_dir, systemctl_calls
def test_returns_zero_when_no_legacy_units(self, tmp_path, monkeypatch, capsys):
self._setup(tmp_path, monkeypatch)
removed, remaining = gateway_cli.remove_legacy_hermes_units(interactive=False)
assert removed == 0
assert remaining == []
assert "No legacy" in capsys.readouterr().out
def test_dry_run_lists_without_removing(self, tmp_path, monkeypatch, capsys):
user_dir, _, calls = self._setup(tmp_path, monkeypatch)
legacy = user_dir / "hermes.service"
legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
removed, remaining = gateway_cli.remove_legacy_hermes_units(
interactive=False, dry_run=True
)
assert removed == 0
assert remaining == [legacy]
assert legacy.exists() # Not removed
assert calls == [] # No systemctl invocations
out = capsys.readouterr().out
assert "dry-run" in out
def test_removes_user_scope_legacy_unit(self, tmp_path, monkeypatch, capsys):
user_dir, _, calls = self._setup(tmp_path, monkeypatch)
legacy = user_dir / "hermes.service"
legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
removed, remaining = gateway_cli.remove_legacy_hermes_units(interactive=False)
assert removed == 1
assert remaining == []
assert not legacy.exists()
# Must have invoked stop → disable → daemon-reload on user scope
cmds_joined = [" ".join(c) for c in calls]
assert any("--user stop hermes.service" in c for c in cmds_joined)
assert any("--user disable hermes.service" in c for c in cmds_joined)
assert any("--user daemon-reload" in c for c in cmds_joined)
def test_system_scope_without_root_defers_removal(self, tmp_path, monkeypatch, capsys):
_, system_dir, calls = self._setup(tmp_path, monkeypatch, as_root=False)
legacy = system_dir / "hermes.service"
legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
removed, remaining = gateway_cli.remove_legacy_hermes_units(interactive=False)
assert removed == 0
assert remaining == [legacy]
assert legacy.exists() # Not removed — requires sudo
out = capsys.readouterr().out
assert "sudo hermes gateway migrate-legacy" in out
def test_system_scope_with_root_removes(self, tmp_path, monkeypatch, capsys):
_, system_dir, calls = self._setup(tmp_path, monkeypatch, as_root=True)
legacy = system_dir / "hermes.service"
legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
removed, remaining = gateway_cli.remove_legacy_hermes_units(interactive=False)
assert removed == 1
assert remaining == []
assert not legacy.exists()
cmds_joined = [" ".join(c) for c in calls]
# System-scope uses plain "systemctl" (no --user)
assert any(
c.startswith("systemctl stop hermes.service") for c in cmds_joined
)
assert any(
c.startswith("systemctl disable hermes.service") for c in cmds_joined
)
def test_removes_both_scopes_with_root(self, tmp_path, monkeypatch, capsys):
user_dir, system_dir, _ = self._setup(tmp_path, monkeypatch, as_root=True)
user_legacy = user_dir / "hermes.service"
system_legacy = system_dir / "hermes.service"
user_legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
system_legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
removed, remaining = gateway_cli.remove_legacy_hermes_units(interactive=False)
assert removed == 2
assert remaining == []
assert not user_legacy.exists()
assert not system_legacy.exists()
def test_does_not_touch_profile_units_during_migration(
self, tmp_path, monkeypatch, capsys
):
"""Teknium's constraint: profile units (hermes-gateway-coder.service)
must survive a migration call, even if we somehow include them in the
search dir."""
user_dir, _, _ = self._setup(tmp_path, monkeypatch, as_root=True)
profile_unit = user_dir / "hermes-gateway-coder.service"
profile_unit.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
default_unit = user_dir / "hermes-gateway.service"
default_unit.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
removed, remaining = gateway_cli.remove_legacy_hermes_units(interactive=False)
assert removed == 0
assert remaining == []
# Both the profile unit and the current default unit must survive
assert profile_unit.exists()
assert default_unit.exists()
def test_interactive_prompt_no_skips_removal(self, tmp_path, monkeypatch, capsys):
"""When interactive=True and user answers no, no removal happens."""
user_dir, _, _ = self._setup(tmp_path, monkeypatch)
legacy = user_dir / "hermes.service"
legacy.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
monkeypatch.setattr(gateway_cli, "prompt_yes_no", lambda *a, **k: False)
removed, remaining = gateway_cli.remove_legacy_hermes_units(interactive=True)
assert removed == 0
assert remaining == [legacy]
assert legacy.exists()
class TestMigrateLegacyCommand:
"""Tests for the `hermes gateway migrate-legacy` subcommand dispatch."""
def test_migrate_legacy_subparser_accepts_dry_run_and_yes(self):
"""Verify the argparse subparser is registered and parses flags."""
import hermes_cli.main as cli_main
parser = cli_main.build_parser() if hasattr(cli_main, "build_parser") else None
# Fall back to calling main's setup helper if direct access isn't exposed
# The key thing: the subparser must exist. We verify by constructing
# a namespace through argparse directly — but if build_parser isn't
# public, just confirm that `hermes gateway --help` shows it.
import subprocess
import sys
project_root = cli_main.PROJECT_ROOT if hasattr(cli_main, "PROJECT_ROOT") else None
if project_root is None:
import hermes_cli.gateway as gw
project_root = gw.PROJECT_ROOT
result = subprocess.run(
[sys.executable, "-m", "hermes_cli.main", "gateway", "--help"],
cwd=str(project_root),
capture_output=True,
text=True,
timeout=15,
)
assert result.returncode == 0
assert "migrate-legacy" in result.stdout
def test_gateway_command_migrate_legacy_dispatches(
self, tmp_path, monkeypatch, capsys
):
"""gateway_command(args) with subcmd='migrate-legacy' calls the helper."""
called = {}
def fake_remove(interactive=True, dry_run=False):
called["interactive"] = interactive
called["dry_run"] = dry_run
return 0, []
monkeypatch.setattr(gateway_cli, "remove_legacy_hermes_units", fake_remove)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
args = SimpleNamespace(
gateway_command="migrate-legacy", dry_run=False, yes=True
)
gateway_cli.gateway_command(args)
assert called == {"interactive": False, "dry_run": False}
def test_gateway_command_migrate_legacy_dry_run_passes_through(
self, monkeypatch
):
called = {}
def fake_remove(interactive=True, dry_run=False):
called["interactive"] = interactive
called["dry_run"] = dry_run
return 0, []
monkeypatch.setattr(gateway_cli, "remove_legacy_hermes_units", fake_remove)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
args = SimpleNamespace(
gateway_command="migrate-legacy", dry_run=True, yes=False
)
gateway_cli.gateway_command(args)
assert called == {"interactive": True, "dry_run": True}
def test_migrate_legacy_on_unsupported_platform_prints_message(
self, monkeypatch, capsys
):
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
args = SimpleNamespace(
gateway_command="migrate-legacy", dry_run=False, yes=True
)
gateway_cli.gateway_command(args)
out = capsys.readouterr().out
assert "only applies to systemd" in out
class TestSystemdInstallOffersLegacyRemoval:
"""Verify that systemd_install prompts to remove legacy units first."""
def test_install_offers_removal_when_legacy_detected(
self, tmp_path, monkeypatch, capsys
):
"""When legacy units exist, install flow should call the removal
helper before writing the new unit."""
remove_called = {}
def fake_remove(interactive=True, dry_run=False):
remove_called["invoked"] = True
remove_called["interactive"] = interactive
return 1, []
# has_legacy_hermes_units must return True
monkeypatch.setattr(gateway_cli, "has_legacy_hermes_units", lambda: True)
monkeypatch.setattr(gateway_cli, "remove_legacy_hermes_units", fake_remove)
monkeypatch.setattr(gateway_cli, "print_legacy_unit_warning", lambda: None)
# Answer "yes" to the legacy-removal prompt
monkeypatch.setattr(gateway_cli, "prompt_yes_no", lambda *a, **k: True)
# Mock the rest of the install flow
unit_path = tmp_path / "hermes-gateway.service"
monkeypatch.setattr(
gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path
)
monkeypatch.setattr(
gateway_cli,
"generate_systemd_unit",
lambda system=False, run_as_user=None: "unit text\n",
)
monkeypatch.setattr(
gateway_cli.subprocess,
"run",
lambda cmd, **kw: SimpleNamespace(returncode=0, stdout="", stderr=""),
)
monkeypatch.setattr(gateway_cli, "_ensure_linger_enabled", lambda: None)
gateway_cli.systemd_install()
assert remove_called.get("invoked") is True
assert remove_called.get("interactive") is False # prompted elsewhere
def test_install_declines_legacy_removal_when_user_says_no(
self, tmp_path, monkeypatch
):
"""When legacy units exist and user declines, install still proceeds
but doesn't touch them."""
remove_called = {"invoked": False}
def fake_remove(interactive=True, dry_run=False):
remove_called["invoked"] = True
return 0, []
monkeypatch.setattr(gateway_cli, "has_legacy_hermes_units", lambda: True)
monkeypatch.setattr(gateway_cli, "remove_legacy_hermes_units", fake_remove)
monkeypatch.setattr(gateway_cli, "print_legacy_unit_warning", lambda: None)
monkeypatch.setattr(gateway_cli, "prompt_yes_no", lambda *a, **k: False)
unit_path = tmp_path / "hermes-gateway.service"
monkeypatch.setattr(
gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path
)
monkeypatch.setattr(
gateway_cli,
"generate_systemd_unit",
lambda system=False, run_as_user=None: "unit text\n",
)
monkeypatch.setattr(
gateway_cli.subprocess,
"run",
lambda cmd, **kw: SimpleNamespace(returncode=0, stdout="", stderr=""),
)
monkeypatch.setattr(gateway_cli, "_ensure_linger_enabled", lambda: None)
gateway_cli.systemd_install()
# Helper must NOT have been called
assert remove_called["invoked"] is False
# New unit should still have been written
assert unit_path.exists()
assert unit_path.read_text() == "unit text\n"
def test_install_skips_legacy_check_when_none_present(
self, tmp_path, monkeypatch
):
"""No legacy → no prompt, no helper call."""
prompt_called = {"count": 0}
def counting_prompt(*a, **k):
prompt_called["count"] += 1
return True
remove_called = {"invoked": False}
def fake_remove(interactive=True, dry_run=False):
remove_called["invoked"] = True
return 0, []
monkeypatch.setattr(gateway_cli, "has_legacy_hermes_units", lambda: False)
monkeypatch.setattr(gateway_cli, "remove_legacy_hermes_units", fake_remove)
monkeypatch.setattr(gateway_cli, "prompt_yes_no", counting_prompt)
unit_path = tmp_path / "hermes-gateway.service"
monkeypatch.setattr(
gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path
)
monkeypatch.setattr(
gateway_cli,
"generate_systemd_unit",
lambda system=False, run_as_user=None: "unit text\n",
)
monkeypatch.setattr(
gateway_cli.subprocess,
"run",
lambda cmd, **kw: SimpleNamespace(returncode=0, stdout="", stderr=""),
)
monkeypatch.setattr(gateway_cli, "_ensure_linger_enabled", lambda: None)
gateway_cli.systemd_install()
assert prompt_called["count"] == 0
assert remove_called["invoked"] is False

View file

@ -178,10 +178,6 @@ class TestGeminiContextLength:
ctx = get_model_context_length("gemma-4-31b-it", provider="gemini")
assert ctx == 256000
def test_gemma_4_26b_context(self):
ctx = get_model_context_length("gemma-4-26b-it", provider="gemini")
assert ctx == 256000
def test_gemini_3_context(self):
ctx = get_model_context_length("gemini-3.1-pro-preview", provider="gemini")
assert ctx == 1048576
@ -211,6 +207,58 @@ class TestGeminiAgentInit:
assert agent.api_mode == "chat_completions"
assert agent.provider == "gemini"
def test_gemini_uses_x_goog_api_key_not_bearer(self, monkeypatch):
"""Regression test for issue #7893.
When provider=gemini, the OpenAI client must be constructed with
api_key='not-used' and default_headers={'x-goog-api-key': real_key}.
This prevents the SDK from injecting Authorization: Bearer, which
Google's endpoint rejects with HTTP 400.
"""
monkeypatch.setenv("GOOGLE_API_KEY", "AIzaSy_REAL_KEY")
real_key = "AIzaSy_REAL_KEY"
with patch("run_agent.OpenAI") as mock_openai:
mock_openai.return_value = MagicMock()
from run_agent import AIAgent
AIAgent(
model="gemini-2.5-flash",
provider="gemini",
api_key=real_key,
base_url="https://generativelanguage.googleapis.com/v1beta/openai",
)
call_kwargs = mock_openai.call_args[1]
# The SDK must NOT receive the real key as api_key (which would emit Bearer)
assert call_kwargs.get("api_key") == "not-used", (
"api_key must be 'not-used' to suppress Authorization: Bearer for Gemini"
)
# The real key must be in x-goog-api-key header
headers = call_kwargs.get("default_headers", {})
assert headers.get("x-goog-api-key") == real_key, (
"x-goog-api-key header must carry the real Gemini API key"
)
def test_gemini_resolve_provider_client_auth(self, monkeypatch):
"""Regression test for issue #7893 — resolve_provider_client path.
When resolve_provider_client('gemini') is called, the returned OpenAI
client must use x-goog-api-key header, not Authorization: Bearer.
"""
monkeypatch.setenv("GEMINI_API_KEY", "AIzaSy_TEST_KEY")
real_key = "AIzaSy_TEST_KEY"
with patch("agent.auxiliary_client.OpenAI") as mock_openai:
mock_openai.return_value = MagicMock()
mock_openai.return_value.api_key = "not-used"
from agent.auxiliary_client import resolve_provider_client
resolve_provider_client("gemini")
call_kwargs = mock_openai.call_args[1]
assert call_kwargs.get("api_key") == "not-used", (
"api_key must be 'not-used' to prevent Bearer injection for Gemini"
)
headers = call_kwargs.get("default_headers", {})
assert headers.get("x-goog-api-key") == real_key, (
"x-goog-api-key header must carry the real Gemini API key"
)
# ── models.dev Integration ──

View file

@ -539,3 +539,64 @@ class TestDispatcher:
mcp_command(_make_args(mcp_action=None))
out = capsys.readouterr().out
assert "Commands:" in out or "No MCP servers" in out
# ---------------------------------------------------------------------------
# Tests: Task 7 consolidation — cmd_mcp_remove evicts manager cache,
# cmd_mcp_login forces re-auth
# ---------------------------------------------------------------------------
class TestMcpRemoveEvictsManager:
def test_remove_evicts_in_memory_provider(self, tmp_path, capsys, monkeypatch):
"""After cmd_mcp_remove, the MCPOAuthManager no longer caches the provider."""
_seed_config(tmp_path, {
"oauth-srv": {"url": "https://example.com/mcp", "auth": "oauth"},
})
monkeypatch.setattr("builtins.input", lambda _: "y")
monkeypatch.setattr(
"hermes_cli.mcp_config.get_hermes_home", lambda: tmp_path
)
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
from tools.mcp_oauth_manager import get_manager, reset_manager_for_tests
reset_manager_for_tests()
mgr = get_manager()
mgr.get_or_build_provider(
"oauth-srv", "https://example.com/mcp", None,
)
assert "oauth-srv" in mgr._entries
from hermes_cli.mcp_config import cmd_mcp_remove
cmd_mcp_remove(_make_args(name="oauth-srv"))
assert "oauth-srv" not in mgr._entries
class TestMcpLogin:
def test_login_rejects_unknown_server(self, tmp_path, capsys):
_seed_config(tmp_path, {})
from hermes_cli.mcp_config import cmd_mcp_login
cmd_mcp_login(_make_args(name="ghost"))
out = capsys.readouterr().out
assert "not found" in out
def test_login_rejects_non_oauth_server(self, tmp_path, capsys):
_seed_config(tmp_path, {
"srv": {"url": "https://example.com/mcp", "auth": "header"},
})
from hermes_cli.mcp_config import cmd_mcp_login
cmd_mcp_login(_make_args(name="srv"))
out = capsys.readouterr().out
assert "not configured for OAuth" in out
def test_login_rejects_stdio_server(self, tmp_path, capsys):
_seed_config(tmp_path, {
"srv": {"command": "npx", "args": ["some-server"]},
})
from hermes_cli.mcp_config import cmd_mcp_login
cmd_mcp_login(_make_args(name="srv"))
out = capsys.readouterr().out
assert "no URL" in out or "not an OAuth" in out

View file

@ -0,0 +1,157 @@
"""Tests for the `hermes memory reset` CLI command.
Covers:
- Reset both stores (MEMORY.md + USER.md)
- Reset individual stores (--target memory / --target user)
- Skip confirmation with --yes
- Graceful handling when no memory files exist
- Profile-scoped reset (uses HERMES_HOME)
"""
import os
import pytest
from argparse import Namespace
from pathlib import Path
@pytest.fixture
def memory_env(tmp_path, monkeypatch):
"""Set up a fake HERMES_HOME with memory files."""
hermes_home = tmp_path / ".hermes"
memories = hermes_home / "memories"
memories.mkdir(parents=True)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
# Create sample memory files
(memories / "MEMORY.md").write_text(
"§\nHermes repo is at ~/.hermes/hermes-agent\n§\nUser prefers dark themes",
encoding="utf-8",
)
(memories / "USER.md").write_text(
"§\nUser is Teknium\n§\nTimezone: US Pacific",
encoding="utf-8",
)
return hermes_home, memories
def _run_memory_reset(target="all", yes=False, monkeypatch=None, confirm_input="no"):
"""Invoke the memory reset logic from cmd_memory in main.py.
Simulates what happens when `hermes memory reset` is run.
"""
from hermes_constants import get_hermes_home, display_hermes_home
mem_dir = get_hermes_home() / "memories"
files_to_reset = []
if target in ("all", "memory"):
files_to_reset.append(("MEMORY.md", "agent notes"))
if target in ("all", "user"):
files_to_reset.append(("USER.md", "user profile"))
existing = [(f, desc) for f, desc in files_to_reset if (mem_dir / f).exists()]
if not existing:
return "nothing"
if not yes:
if confirm_input != "yes":
return "cancelled"
for f, desc in existing:
(mem_dir / f).unlink()
return "deleted"
class TestMemoryReset:
"""Tests for `hermes memory reset` subcommand."""
def test_reset_all_with_yes_flag(self, memory_env):
"""--yes flag should skip confirmation and delete both files."""
hermes_home, memories = memory_env
assert (memories / "MEMORY.md").exists()
assert (memories / "USER.md").exists()
result = _run_memory_reset(target="all", yes=True)
assert result == "deleted"
assert not (memories / "MEMORY.md").exists()
assert not (memories / "USER.md").exists()
def test_reset_memory_only(self, memory_env):
"""--target memory should only delete MEMORY.md."""
hermes_home, memories = memory_env
result = _run_memory_reset(target="memory", yes=True)
assert result == "deleted"
assert not (memories / "MEMORY.md").exists()
assert (memories / "USER.md").exists()
def test_reset_user_only(self, memory_env):
"""--target user should only delete USER.md."""
hermes_home, memories = memory_env
result = _run_memory_reset(target="user", yes=True)
assert result == "deleted"
assert (memories / "MEMORY.md").exists()
assert not (memories / "USER.md").exists()
def test_reset_no_files_exist(self, tmp_path, monkeypatch):
"""Should return 'nothing' when no memory files exist."""
hermes_home = tmp_path / ".hermes"
(hermes_home / "memories").mkdir(parents=True)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
result = _run_memory_reset(target="all", yes=True)
assert result == "nothing"
def test_reset_confirmation_denied(self, memory_env):
"""Without --yes and without typing 'yes', should be cancelled."""
hermes_home, memories = memory_env
result = _run_memory_reset(target="all", yes=False, confirm_input="no")
assert result == "cancelled"
# Files should still exist
assert (memories / "MEMORY.md").exists()
assert (memories / "USER.md").exists()
def test_reset_confirmation_accepted(self, memory_env):
"""Typing 'yes' should proceed with deletion."""
hermes_home, memories = memory_env
result = _run_memory_reset(target="all", yes=False, confirm_input="yes")
assert result == "deleted"
assert not (memories / "MEMORY.md").exists()
assert not (memories / "USER.md").exists()
def test_reset_profile_scoped(self, tmp_path, monkeypatch):
"""Reset should work on the active profile's HERMES_HOME."""
profile_home = tmp_path / "profiles" / "myprofile"
memories = profile_home / "memories"
memories.mkdir(parents=True)
(memories / "MEMORY.md").write_text("profile memory", encoding="utf-8")
(memories / "USER.md").write_text("profile user", encoding="utf-8")
monkeypatch.setenv("HERMES_HOME", str(profile_home))
result = _run_memory_reset(target="all", yes=True)
assert result == "deleted"
assert not (memories / "MEMORY.md").exists()
assert not (memories / "USER.md").exists()
def test_reset_partial_files(self, memory_env):
"""Reset should work when only one memory file exists."""
hermes_home, memories = memory_env
(memories / "USER.md").unlink()
result = _run_memory_reset(target="all", yes=True)
assert result == "deleted"
assert not (memories / "MEMORY.md").exists()
def test_reset_empty_memories_dir(self, tmp_path, monkeypatch):
"""No memories dir at all should report nothing."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir(parents=True)
# No memories dir
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
# The memories dir won't exist; get_hermes_home() / "memories" won't have files
result = _run_memory_reset(target="all", yes=True)
assert result == "nothing"

View file

@ -93,6 +93,59 @@ class TestCopilotDotPreservation:
assert result == expected
# ── Copilot model-name normalization (issue #6879 regression) ──────────
class TestCopilotModelNormalization:
"""Copilot requires bare dot-notation model IDs.
Regression coverage for issue #6879 and the broken Copilot branch
that previously left vendor-prefixed Anthropic IDs (e.g.
``anthropic/claude-sonnet-4.6``) and dash-notation Claude IDs (e.g.
``claude-sonnet-4-6``) unchanged, causing the Copilot API to reject
the request with HTTP 400 "model_not_supported".
"""
@pytest.mark.parametrize("model,expected", [
# Vendor-prefixed Anthropic IDs — prefix must be stripped.
("anthropic/claude-opus-4.6", "claude-opus-4.6"),
("anthropic/claude-sonnet-4.6", "claude-sonnet-4.6"),
("anthropic/claude-sonnet-4.5", "claude-sonnet-4.5"),
("anthropic/claude-haiku-4.5", "claude-haiku-4.5"),
# Vendor-prefixed OpenAI IDs — prefix must be stripped.
("openai/gpt-5.4", "gpt-5.4"),
("openai/gpt-4o", "gpt-4o"),
("openai/gpt-4o-mini", "gpt-4o-mini"),
# Dash-notation Claude IDs — must be converted to dot-notation.
("claude-opus-4-6", "claude-opus-4.6"),
("claude-sonnet-4-6", "claude-sonnet-4.6"),
("claude-sonnet-4-5", "claude-sonnet-4.5"),
("claude-haiku-4-5", "claude-haiku-4.5"),
# Combined: vendor-prefixed + dash-notation.
("anthropic/claude-opus-4-6", "claude-opus-4.6"),
("anthropic/claude-sonnet-4-6", "claude-sonnet-4.6"),
# Already-canonical inputs pass through unchanged.
("claude-sonnet-4.6", "claude-sonnet-4.6"),
("gpt-5.4", "gpt-5.4"),
("gpt-5-mini", "gpt-5-mini"),
])
def test_copilot_normalization(self, model, expected):
assert normalize_model_for_provider(model, "copilot") == expected
@pytest.mark.parametrize("model,expected", [
("anthropic/claude-sonnet-4.6", "claude-sonnet-4.6"),
("claude-sonnet-4-6", "claude-sonnet-4.6"),
("claude-opus-4-6", "claude-opus-4.6"),
("openai/gpt-5.4", "gpt-5.4"),
])
def test_copilot_acp_normalization(self, model, expected):
"""Copilot ACP shares the same API expectations as HTTP Copilot."""
assert normalize_model_for_provider(model, "copilot-acp") == expected
def test_openai_codex_still_strips_openai_prefix(self):
"""Regression: openai-codex must still strip the openai/ prefix."""
assert normalize_model_for_provider("openai/gpt-5.4", "openai-codex") == "gpt-5.4"
# ── Aggregator providers (regression) ──────────────────────────────────
class TestAggregatorProviders:

View file

@ -0,0 +1,62 @@
"""Tests for the prompt_toolkit /model picker scroll viewport.
Regression for: when a provider exposes many models (e.g. Ollama Cloud's
36+), the picker rendered every choice into a Window with no max height,
clipping the bottom border and any items past the terminal's last row.
The viewport helper now caps visible items and slides the offset to keep
the cursor on screen.
"""
from cli import HermesCLI
_compute = HermesCLI._compute_model_picker_viewport
class TestPickerViewport:
def test_short_list_no_scroll(self):
offset, visible = _compute(selected=0, scroll_offset=0, n=5, term_rows=30)
assert offset == 0
assert visible == 5
def test_long_list_caps_visible_to_chrome_budget(self):
# 30 rows minus reserved_below=6 minus panel_chrome=6 → max_visible=18.
offset, visible = _compute(selected=0, scroll_offset=0, n=36, term_rows=30)
assert visible == 18
assert offset == 0
def test_cursor_past_window_scrolls_down(self):
offset, visible = _compute(selected=22, scroll_offset=0, n=36, term_rows=30)
assert visible == 18
assert 22 in range(offset, offset + visible)
def test_cursor_above_window_scrolls_up(self):
offset, visible = _compute(selected=3, scroll_offset=15, n=36, term_rows=30)
assert offset == 3
assert 3 in range(offset, offset + visible)
def test_offset_clamped_to_bottom(self):
# Selected on the last item — offset must keep the visible window
# full, not walk past the end of the list.
offset, visible = _compute(selected=35, scroll_offset=0, n=36, term_rows=30)
assert offset + visible == 36
assert 35 in range(offset, offset + visible)
def test_tiny_terminal_uses_minimum_visible(self):
# term_rows below the chrome budget falls back to the floor of 3 rows.
_, visible = _compute(selected=0, scroll_offset=0, n=20, term_rows=10)
assert visible == 3
def test_offset_recovers_after_stage_switch(self):
# When the user backs out of the model stage and re-enters with
# selected=0, a stale offset from the previous stage must collapse.
offset, visible = _compute(selected=0, scroll_offset=25, n=36, term_rows=30)
assert offset == 0
assert 0 in range(offset, offset + visible)
def test_full_navigation_keeps_cursor_visible(self):
offset = 0
for cursor in list(range(36)) + list(range(35, -1, -1)):
offset, visible = _compute(cursor, offset, n=36, term_rows=30)
assert cursor in range(offset, offset + visible), (
f"cursor={cursor} out of view: offset={offset} visible={visible}"
)

View file

@ -0,0 +1,101 @@
"""Regression tests for Copilot api_mode recomputation during /model switch.
When switching models within the Copilot provider (e.g. GPT-5 Claude),
the stale api_mode from resolve_runtime_provider must be overridden with
a fresh value computed from the *new* model. Without the fix, Claude
requests went through the Responses API and failed with
``unsupported_api_for_model``.
"""
from unittest.mock import patch
from hermes_cli.model_switch import switch_model
_MOCK_VALIDATION = {
"accepted": True,
"persist": True,
"recognized": True,
"message": None,
}
def _run_copilot_switch(
raw_input: str,
current_provider: str = "copilot",
current_model: str = "gpt-5.4",
explicit_provider: str = "",
runtime_api_mode: str = "codex_responses",
):
"""Run switch_model with Copilot mocks and return the result."""
with (
patch("hermes_cli.model_switch.resolve_alias", return_value=None),
patch("hermes_cli.model_switch.list_provider_models", return_value=[]),
patch(
"hermes_cli.runtime_provider.resolve_runtime_provider",
return_value={
"api_key": "ghu_test_token",
"base_url": "https://api.githubcopilot.com",
"api_mode": runtime_api_mode,
},
),
patch(
"hermes_cli.models.validate_requested_model",
return_value=_MOCK_VALIDATION,
),
patch("hermes_cli.model_switch.get_model_info", return_value=None),
patch("hermes_cli.model_switch.get_model_capabilities", return_value=None),
patch("hermes_cli.models.detect_provider_for_model", return_value=None),
):
return switch_model(
raw_input=raw_input,
current_provider=current_provider,
current_model=current_model,
explicit_provider=explicit_provider,
)
def test_same_provider_copilot_switch_recomputes_api_mode():
"""GPT-5 → Claude on copilot: api_mode must flip to chat_completions."""
result = _run_copilot_switch(
raw_input="claude-opus-4.6",
current_provider="copilot",
current_model="gpt-5.4",
)
assert result.success, f"switch_model failed: {result.error_message}"
assert result.new_model == "claude-opus-4.6"
assert result.target_provider == "copilot"
assert result.api_mode == "chat_completions"
def test_explicit_copilot_switch_uses_selected_model_api_mode():
"""Cross-provider switch to copilot: api_mode from new model, not stale runtime."""
result = _run_copilot_switch(
raw_input="claude-opus-4.6",
current_provider="openrouter",
current_model="anthropic/claude-sonnet-4.6",
explicit_provider="copilot",
)
assert result.success, f"switch_model failed: {result.error_message}"
assert result.new_model == "claude-opus-4.6"
assert result.target_provider == "github-copilot"
assert result.api_mode == "chat_completions"
def test_copilot_gpt5_keeps_codex_responses():
"""GPT-5 → GPT-5 on copilot: api_mode must stay codex_responses."""
result = _run_copilot_switch(
raw_input="gpt-5.4-mini",
current_provider="copilot",
current_model="gpt-5.4",
runtime_api_mode="codex_responses",
)
assert result.success, f"switch_model failed: {result.error_message}"
assert result.new_model == "gpt-5.4-mini"
assert result.target_provider == "copilot"
# gpt-5.4-mini is a GPT-5 variant — should use codex_responses
# (gpt-5-mini is the special case that uses chat_completions)
assert result.api_mode == "codex_responses"

View file

@ -0,0 +1,252 @@
"""Regression tests for OpenCode /v1 stripping during /model switch.
When switching to an Anthropic-routed OpenCode model mid-session (e.g.
``/model minimax-m2.7`` on opencode-go, or ``/model claude-sonnet-4-6``
on opencode-zen), the resolved base_url must have its trailing ``/v1``
stripped before being handed to the Anthropic SDK.
Without the strip, the SDK prepends its own ``/v1/messages`` path and
requests hit ``https://opencode.ai/zen/go/v1/v1/messages`` a double
``/v1`` that returns OpenCode's website 404 page with HTML body.
``hermes_cli.runtime_provider.resolve_runtime_provider`` already strips
``/v1`` at fresh agent init (PR #4918), but the ``/model`` mid-session
switch path in ``hermes_cli.model_switch.switch_model`` was missing the
same logic these tests guard against that regression.
"""
from unittest.mock import patch
import pytest
from hermes_cli.model_switch import switch_model
_MOCK_VALIDATION = {
"accepted": True,
"persist": True,
"recognized": True,
"message": None,
}
def _run_opencode_switch(
raw_input: str,
current_provider: str,
current_model: str,
current_base_url: str,
explicit_provider: str = "",
runtime_base_url: str = "",
):
"""Run switch_model with OpenCode mocks and return the result.
runtime_base_url defaults to current_base_url; tests can override it
to simulate the credential resolver returning a base_url different
from the session's current one.
"""
effective_runtime_base = runtime_base_url or current_base_url
with (
patch("hermes_cli.model_switch.resolve_alias", return_value=None),
patch("hermes_cli.model_switch.list_provider_models", return_value=[]),
patch(
"hermes_cli.runtime_provider.resolve_runtime_provider",
return_value={
"api_key": "sk-opencode-fake",
"base_url": effective_runtime_base,
"api_mode": "chat_completions",
},
),
patch(
"hermes_cli.models.validate_requested_model",
return_value=_MOCK_VALIDATION,
),
patch("hermes_cli.model_switch.get_model_info", return_value=None),
patch("hermes_cli.model_switch.get_model_capabilities", return_value=None),
patch("hermes_cli.models.detect_provider_for_model", return_value=None),
):
return switch_model(
raw_input=raw_input,
current_provider=current_provider,
current_model=current_model,
current_base_url=current_base_url,
current_api_key="sk-opencode-fake",
explicit_provider=explicit_provider,
)
class TestOpenCodeGoV1Strip:
"""OpenCode Go: ``/model minimax-*`` must strip /v1."""
def test_switch_to_minimax_m27_strips_v1(self):
"""GLM-5 → MiniMax-M2.7: base_url loses trailing /v1."""
result = _run_opencode_switch(
raw_input="minimax-m2.7",
current_provider="opencode-go",
current_model="glm-5",
current_base_url="https://opencode.ai/zen/go/v1",
)
assert result.success, f"switch_model failed: {result.error_message}"
assert result.api_mode == "anthropic_messages"
assert result.base_url == "https://opencode.ai/zen/go", (
f"Expected /v1 stripped for anthropic_messages; got {result.base_url}"
)
def test_switch_to_minimax_m25_strips_v1(self):
"""Same behavior for M2.5."""
result = _run_opencode_switch(
raw_input="minimax-m2.5",
current_provider="opencode-go",
current_model="kimi-k2.5",
current_base_url="https://opencode.ai/zen/go/v1",
)
assert result.success
assert result.api_mode == "anthropic_messages"
assert result.base_url == "https://opencode.ai/zen/go"
def test_switch_to_glm_leaves_v1_intact(self):
"""OpenAI-compatible models (GLM, Kimi, MiMo) keep /v1."""
result = _run_opencode_switch(
raw_input="glm-5.1",
current_provider="opencode-go",
current_model="minimax-m2.7",
current_base_url="https://opencode.ai/zen/go", # stripped from previous Anthropic model
runtime_base_url="https://opencode.ai/zen/go/v1",
)
assert result.success
assert result.api_mode == "chat_completions"
assert result.base_url == "https://opencode.ai/zen/go/v1", (
f"chat_completions must keep /v1; got {result.base_url}"
)
def test_switch_to_kimi_leaves_v1_intact(self):
result = _run_opencode_switch(
raw_input="kimi-k2.5",
current_provider="opencode-go",
current_model="glm-5",
current_base_url="https://opencode.ai/zen/go/v1",
)
assert result.success
assert result.api_mode == "chat_completions"
assert result.base_url == "https://opencode.ai/zen/go/v1"
def test_trailing_slash_also_stripped(self):
"""``/v1/`` with trailing slash is also stripped cleanly."""
result = _run_opencode_switch(
raw_input="minimax-m2.7",
current_provider="opencode-go",
current_model="glm-5",
current_base_url="https://opencode.ai/zen/go/v1/",
)
assert result.success
assert result.api_mode == "anthropic_messages"
assert result.base_url == "https://opencode.ai/zen/go"
class TestOpenCodeZenV1Strip:
"""OpenCode Zen: ``/model claude-*`` must strip /v1."""
def test_switch_to_claude_sonnet_strips_v1(self):
"""Gemini → Claude on opencode-zen: /v1 stripped."""
result = _run_opencode_switch(
raw_input="claude-sonnet-4-6",
current_provider="opencode-zen",
current_model="gemini-3-flash",
current_base_url="https://opencode.ai/zen/v1",
)
assert result.success
assert result.api_mode == "anthropic_messages"
assert result.base_url == "https://opencode.ai/zen"
def test_switch_to_gemini_leaves_v1_intact(self):
"""Gemini on opencode-zen stays on chat_completions with /v1."""
result = _run_opencode_switch(
raw_input="gemini-3-flash",
current_provider="opencode-zen",
current_model="claude-sonnet-4-6",
current_base_url="https://opencode.ai/zen", # stripped from previous Claude
runtime_base_url="https://opencode.ai/zen/v1",
)
assert result.success
assert result.api_mode == "chat_completions"
assert result.base_url == "https://opencode.ai/zen/v1"
def test_switch_to_gpt_uses_codex_responses_keeps_v1(self):
"""GPT on opencode-zen uses codex_responses api_mode — /v1 kept."""
result = _run_opencode_switch(
raw_input="gpt-5.4",
current_provider="opencode-zen",
current_model="claude-sonnet-4-6",
current_base_url="https://opencode.ai/zen",
runtime_base_url="https://opencode.ai/zen/v1",
)
assert result.success
assert result.api_mode == "codex_responses"
assert result.base_url == "https://opencode.ai/zen/v1"
class TestAgentSwitchModelDefenseInDepth:
"""run_agent.AIAgent.switch_model() also strips /v1 as defense-in-depth."""
def test_agent_switch_model_strips_v1_for_anthropic_messages(self):
"""Even if a caller hands in a /v1 URL, the agent strips it."""
from run_agent import AIAgent
# Build a bare agent instance without running __init__; we only want
# to exercise switch_model's base_url normalization logic.
agent = AIAgent.__new__(AIAgent)
agent.model = "glm-5"
agent.provider = "opencode-go"
agent.base_url = "https://opencode.ai/zen/go/v1"
agent.api_key = "sk-opencode-fake"
agent.api_mode = "chat_completions"
agent._client_kwargs = {}
# Intercept the expensive client rebuild — we only need to verify
# that base_url was normalized before it reached the Anthropic
# client factory.
captured = {}
def _fake_build_anthropic_client(api_key, base_url):
captured["api_key"] = api_key
captured["base_url"] = base_url
return object() # placeholder client — no real calls expected
# The downstream cache/plumbing touches a bunch of private state
# that wasn't initialized above; we don't want to rebuild the full
# runtime for this single assertion, so short-circuit after the
# strip by raising inside the stubbed factory.
class _Sentinel(Exception):
pass
def _raise_after_capture(api_key, base_url):
captured["api_key"] = api_key
captured["base_url"] = base_url
raise _Sentinel("strip verified")
with patch(
"agent.anthropic_adapter.build_anthropic_client",
side_effect=_raise_after_capture,
), patch("agent.anthropic_adapter.resolve_anthropic_token", return_value=""), patch(
"agent.anthropic_adapter._is_oauth_token", return_value=False
):
with pytest.raises(_Sentinel):
agent.switch_model(
new_model="minimax-m2.7",
new_provider="opencode-go",
api_key="sk-opencode-fake",
base_url="https://opencode.ai/zen/go/v1",
api_mode="anthropic_messages",
)
assert captured.get("base_url") == "https://opencode.ai/zen/go", (
f"agent.switch_model did not strip /v1; passed {captured.get('base_url')} "
"to build_anthropic_client"
)

View file

@ -163,7 +163,7 @@ class TestNormalizeProvider:
class TestProviderLabel:
def test_known_labels_and_auto(self):
assert provider_label("anthropic") == "Anthropic"
assert provider_label("kimi") == "Kimi / Moonshot"
assert provider_label("kimi") == "Kimi / Kimi Coding Plan"
assert provider_label("copilot") == "GitHub Copilot"
assert provider_label("copilot-acp") == "GitHub Copilot ACP"
assert provider_label("auto") == "Auto"
@ -370,6 +370,8 @@ class TestCopilotNormalization:
assert opencode_model_api_mode("opencode-zen", "minimax-m2.5") == "chat_completions"
def test_opencode_go_api_modes_match_docs(self):
assert opencode_model_api_mode("opencode-go", "glm-5.1") == "chat_completions"
assert opencode_model_api_mode("opencode-go", "opencode-go/glm-5.1") == "chat_completions"
assert opencode_model_api_mode("opencode-go", "glm-5") == "chat_completions"
assert opencode_model_api_mode("opencode-go", "opencode-go/glm-5") == "chat_completions"
assert opencode_model_api_mode("opencode-go", "kimi-k2.5") == "chat_completions"
@ -401,7 +403,8 @@ class TestValidateFormatChecks:
def test_no_slash_model_rejected_if_not_in_api(self):
result = _validate("gpt-5.4", api_models=["openai/gpt-5.4"])
assert result["accepted"] is True
assert result["accepted"] is False
assert result["persist"] is False
assert "not found" in result["message"]
@ -427,10 +430,10 @@ class TestValidateApiFound:
# -- validate — API not found ------------------------------------------------
class TestValidateApiNotFound:
def test_model_not_in_api_accepted_with_warning(self):
def test_model_not_in_api_rejected_with_guidance(self):
result = _validate("anthropic/claude-nonexistent")
assert result["accepted"] is True
assert result["persist"] is True
assert result["accepted"] is False
assert result["persist"] is False
assert "not found" in result["message"]
def test_warning_includes_suggestions(self):
@ -447,37 +450,36 @@ class TestValidateApiNotFound:
assert result["recognized"] is True
def test_dissimilar_model_shows_suggestions_not_autocorrect(self):
"""Models too different for auto-correction still get suggestions."""
"""Models too different for auto-correction are rejected with suggestions."""
result = _validate("anthropic/claude-nonexistent")
assert result["accepted"] is True
assert result["accepted"] is False
assert result.get("corrected_model") is None
assert "not found" in result["message"]
# -- validate — API unreachable — accept and persist everything ----------------
# -- validate — API unreachable — reject with guidance ----------------
class TestValidateApiFallback:
def test_any_model_accepted_when_api_down(self):
def test_any_model_rejected_when_api_down(self):
result = _validate("anthropic/claude-opus-4.6", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
assert result["accepted"] is False
assert result["persist"] is False
def test_unknown_model_also_accepted_when_api_down(self):
"""No hardcoded catalog gatekeeping — accept, persist, and warn."""
def test_unknown_model_also_rejected_when_api_down(self):
result = _validate("anthropic/claude-next-gen", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
assert result["accepted"] is False
assert result["persist"] is False
assert "could not reach" in result["message"].lower()
def test_zai_model_accepted_when_api_down(self):
def test_zai_model_rejected_when_api_down(self):
result = _validate("glm-5", provider="zai", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
assert result["accepted"] is False
assert result["persist"] is False
def test_unknown_provider_accepted_when_api_down(self):
def test_unknown_provider_rejected_when_api_down(self):
result = _validate("some-model", provider="totally-unknown", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
assert result["accepted"] is False
assert result["persist"] is False
def test_custom_endpoint_warns_with_probed_url_and_v1_hint(self):
with patch(
@ -497,8 +499,8 @@ class TestValidateApiFallback:
base_url="http://localhost:8000",
)
assert result["accepted"] is True
assert result["persist"] is True
assert result["accepted"] is False
assert result["persist"] is False
assert "http://localhost:8000/v1/models" in result["message"]
assert "http://localhost:8000/v1" in result["message"]
@ -530,11 +532,11 @@ class TestValidateCodexAutoCorrection:
assert result["message"] is None
def test_very_different_name_falls_to_suggestions(self):
"""Names too different for auto-correction get the suggestion list."""
"""Names too different for auto-correction are rejected with a suggestion list."""
codex_models = ["gpt-5.4-mini", "gpt-5.4", "gpt-5.3-codex"]
with patch("hermes_cli.models.provider_model_ids", return_value=codex_models):
result = validate_requested_model("totally-wrong", "openai-codex")
assert result["accepted"] is True
assert result["accepted"] is False
assert result["recognized"] is False
assert result.get("corrected_model") is None
assert "not found" in result["message"]

View file

@ -0,0 +1,83 @@
"""Tests for non-ASCII credential detection and sanitization.
Covers the fix for issue #6843 — API keys containing Unicode lookalike
characters (e.g. ʋ U+028B instead of v) cause UnicodeEncodeError when
httpx tries to encode the Authorization header as ASCII.
"""
import os
import sys
import tempfile
import pytest
from hermes_cli.config import _check_non_ascii_credential
class TestCheckNonAsciiCredential:
"""Tests for _check_non_ascii_credential()."""
def test_ascii_key_unchanged(self):
key = "sk-proj-" + "a" * 100
result = _check_non_ascii_credential("TEST_API_KEY", key)
assert result == key
def test_strips_unicode_v_lookalike(self, capsys):
"""The exact scenario from issue #6843: ʋ instead of v."""
key = "sk-proj-abc" + "ʋ" + "def" # \u028b
result = _check_non_ascii_credential("OPENROUTER_API_KEY", key)
assert result == "sk-proj-abcdef"
assert "ʋ" not in result
# Should print a warning
captured = capsys.readouterr()
assert "non-ASCII" in captured.err
def test_strips_multiple_non_ascii(self, capsys):
key = "sk-proj-aʋbécd"
result = _check_non_ascii_credential("OPENAI_API_KEY", key)
assert result == "sk-proj-abcd"
captured = capsys.readouterr()
assert "U+028B" in captured.err # reports the char
def test_empty_key(self):
result = _check_non_ascii_credential("TEST_KEY", "")
assert result == ""
def test_all_ascii_no_warning(self, capsys):
result = _check_non_ascii_credential("KEY", "all-ascii-value-123")
assert result == "all-ascii-value-123"
captured = capsys.readouterr()
assert captured.err == ""
class TestEnvLoaderSanitization:
"""Tests for _sanitize_loaded_credentials in env_loader."""
def test_strips_non_ascii_from_api_key(self, monkeypatch):
from hermes_cli.env_loader import _sanitize_loaded_credentials
monkeypatch.setenv("OPENROUTER_API_KEY", "sk-proj-abcʋdef")
_sanitize_loaded_credentials()
assert os.environ["OPENROUTER_API_KEY"] == "sk-proj-abcdef"
def test_strips_non_ascii_from_token(self, monkeypatch):
from hermes_cli.env_loader import _sanitize_loaded_credentials
monkeypatch.setenv("DISCORD_BOT_TOKEN", "tokénvalue")
_sanitize_loaded_credentials()
assert os.environ["DISCORD_BOT_TOKEN"] == "toknvalue"
def test_ignores_non_credential_vars(self, monkeypatch):
from hermes_cli.env_loader import _sanitize_loaded_credentials
monkeypatch.setenv("MY_UNICODE_VAR", "héllo wörld")
_sanitize_loaded_credentials()
# Not a credential suffix — should be left alone
assert os.environ["MY_UNICODE_VAR"] == "héllo wörld"
def test_ascii_credentials_untouched(self, monkeypatch):
from hermes_cli.env_loader import _sanitize_loaded_credentials
monkeypatch.setenv("OPENAI_API_KEY", "sk-proj-allascii123")
_sanitize_loaded_credentials()
assert os.environ["OPENAI_API_KEY"] == "sk-proj-allascii123"

View file

@ -24,7 +24,7 @@ def test_get_nous_subscription_features_recognizes_direct_exa_backend(monkeypatc
def test_get_nous_subscription_features_prefers_managed_modal_in_auto_mode(monkeypatch):
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
monkeypatch.setattr("tools.tool_backend_helpers.managed_nous_tools_enabled", lambda: True)
monkeypatch.setattr(ns, "get_env_value", lambda name: "")
monkeypatch.setattr(ns, "get_nous_auth_status", lambda: {"logged_in": True})
monkeypatch.setattr(ns, "managed_nous_tools_enabled", lambda: True)

View file

@ -0,0 +1,410 @@
"""Tests for Ollama Cloud provider integration."""
import os
import pytest
from unittest.mock import patch, MagicMock
from hermes_cli.auth import PROVIDER_REGISTRY, resolve_provider, resolve_api_key_provider_credentials
from hermes_cli.models import _PROVIDER_MODELS, _PROVIDER_LABELS, _PROVIDER_ALIASES, normalize_provider
from hermes_cli.model_normalize import normalize_model_for_provider
from agent.model_metadata import _URL_TO_PROVIDER, _PROVIDER_PREFIXES
from agent.models_dev import PROVIDER_TO_MODELS_DEV, list_agentic_models
# ── Provider Registry ──
class TestOllamaCloudProviderRegistry:
def test_ollama_cloud_in_registry(self):
assert "ollama-cloud" in PROVIDER_REGISTRY
def test_ollama_cloud_config(self):
pconfig = PROVIDER_REGISTRY["ollama-cloud"]
assert pconfig.id == "ollama-cloud"
assert pconfig.name == "Ollama Cloud"
assert pconfig.auth_type == "api_key"
assert pconfig.inference_base_url == "https://ollama.com/v1"
def test_ollama_cloud_env_vars(self):
pconfig = PROVIDER_REGISTRY["ollama-cloud"]
assert pconfig.api_key_env_vars == ("OLLAMA_API_KEY",)
assert pconfig.base_url_env_var == "OLLAMA_BASE_URL"
def test_ollama_cloud_base_url(self):
assert "ollama.com" in PROVIDER_REGISTRY["ollama-cloud"].inference_base_url
# ── Provider Aliases ──
PROVIDER_ENV_VARS = (
"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"GOOGLE_API_KEY", "GEMINI_API_KEY", "OLLAMA_API_KEY",
"GLM_API_KEY", "ZAI_API_KEY", "KIMI_API_KEY",
"MINIMAX_API_KEY", "DEEPSEEK_API_KEY",
)
@pytest.fixture(autouse=True)
def _clean_provider_env(monkeypatch):
for var in PROVIDER_ENV_VARS:
monkeypatch.delenv(var, raising=False)
class TestOllamaCloudAliases:
def test_explicit_ollama_cloud(self):
assert resolve_provider("ollama-cloud") == "ollama-cloud"
def test_alias_ollama_underscore(self):
"""ollama_cloud (underscore) is the unambiguous cloud alias."""
assert resolve_provider("ollama_cloud") == "ollama-cloud"
def test_bare_ollama_stays_local(self):
"""Bare 'ollama' alias routes to 'custom' (local) — not cloud."""
assert resolve_provider("ollama") == "custom"
def test_models_py_aliases(self):
assert _PROVIDER_ALIASES.get("ollama_cloud") == "ollama-cloud"
# bare "ollama" stays local
assert _PROVIDER_ALIASES.get("ollama") == "custom"
def test_normalize_provider(self):
assert normalize_provider("ollama-cloud") == "ollama-cloud"
# ── Auto-detection ──
class TestOllamaCloudAutoDetection:
def test_auto_detects_ollama_api_key(self, monkeypatch):
monkeypatch.setenv("OLLAMA_API_KEY", "test-ollama-key")
assert resolve_provider("auto") == "ollama-cloud"
# ── Credential Resolution ──
class TestOllamaCloudCredentials:
def test_resolve_with_ollama_api_key(self, monkeypatch):
monkeypatch.setenv("OLLAMA_API_KEY", "ollama-secret")
creds = resolve_api_key_provider_credentials("ollama-cloud")
assert creds["provider"] == "ollama-cloud"
assert creds["api_key"] == "ollama-secret"
assert creds["base_url"] == "https://ollama.com/v1"
def test_resolve_with_custom_base_url(self, monkeypatch):
monkeypatch.setenv("OLLAMA_API_KEY", "key")
monkeypatch.setenv("OLLAMA_BASE_URL", "https://custom.ollama/v1")
creds = resolve_api_key_provider_credentials("ollama-cloud")
assert creds["base_url"] == "https://custom.ollama/v1"
def test_runtime_ollama_cloud(self, monkeypatch):
monkeypatch.setenv("OLLAMA_API_KEY", "ollama-key")
from hermes_cli.runtime_provider import resolve_runtime_provider
result = resolve_runtime_provider(requested="ollama-cloud")
assert result["provider"] == "ollama-cloud"
assert result["api_mode"] == "chat_completions"
assert result["api_key"] == "ollama-key"
assert result["base_url"] == "https://ollama.com/v1"
# ── Model Catalog (dynamic — no static list) ──
class TestOllamaCloudModelCatalog:
def test_no_static_model_list(self):
"""Ollama Cloud models are fetched dynamically — no static list to maintain."""
assert "ollama-cloud" not in _PROVIDER_MODELS
def test_provider_label(self):
assert "ollama-cloud" in _PROVIDER_LABELS
assert _PROVIDER_LABELS["ollama-cloud"] == "Ollama Cloud"
def test_provider_model_ids_returns_dynamic_models(self, tmp_path, monkeypatch):
"""provider_model_ids('ollama-cloud') should call fetch_ollama_cloud_models()."""
from hermes_cli.models import provider_model_ids
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("OLLAMA_API_KEY", "test-key")
mock_mdev = {
"ollama-cloud": {
"models": {
"qwen3.5:397b": {"tool_call": True},
"glm-5": {"tool_call": True},
}
}
}
with patch("hermes_cli.models.fetch_api_models", return_value=["qwen3.5:397b"]), \
patch("agent.models_dev.fetch_models_dev", return_value=mock_mdev):
result = provider_model_ids("ollama-cloud", force_refresh=True)
assert len(result) > 0
assert "qwen3.5:397b" in result
# ── Model Picker (list_authenticated_providers) ──
class TestOllamaCloudModelPicker:
def test_ollama_cloud_shows_model_count(self, tmp_path, monkeypatch):
"""Ollama Cloud should show non-zero model count in provider picker."""
from hermes_cli.model_switch import list_authenticated_providers
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("OLLAMA_API_KEY", "test-key")
mock_mdev = {
"ollama-cloud": {
"models": {
"qwen3.5:397b": {"tool_call": True},
"glm-5": {"tool_call": True},
}
}
}
with patch("hermes_cli.models.fetch_api_models", return_value=["qwen3.5:397b"]), \
patch("agent.models_dev.fetch_models_dev", return_value=mock_mdev):
providers = list_authenticated_providers(current_provider="ollama-cloud")
ollama = next((p for p in providers if p["slug"] == "ollama-cloud"), None)
assert ollama is not None, "ollama-cloud should appear when OLLAMA_API_KEY is set"
assert ollama["total_models"] > 0, "ollama-cloud should show non-zero model count"
def test_ollama_cloud_not_shown_without_creds(self, monkeypatch):
"""Ollama Cloud should not appear without credentials."""
from hermes_cli.model_switch import list_authenticated_providers
monkeypatch.delenv("OLLAMA_API_KEY", raising=False)
providers = list_authenticated_providers(current_provider="openrouter")
ollama = next((p for p in providers if p["slug"] == "ollama-cloud"), None)
assert ollama is None, "ollama-cloud should not appear without OLLAMA_API_KEY"
# ── Merged Model Discovery ──
class TestOllamaCloudMergedDiscovery:
def test_merges_live_and_models_dev(self, tmp_path, monkeypatch):
"""Live API models appear first, models.dev additions fill gaps."""
from hermes_cli.models import fetch_ollama_cloud_models
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("OLLAMA_API_KEY", "test-key")
mock_mdev = {
"ollama-cloud": {
"models": {
"glm-5": {"tool_call": True},
"kimi-k2.5": {"tool_call": True},
"nemotron-3-super": {"tool_call": True},
}
}
}
with patch("hermes_cli.models.fetch_api_models", return_value=["qwen3.5:397b", "glm-5"]), \
patch("agent.models_dev.fetch_models_dev", return_value=mock_mdev):
result = fetch_ollama_cloud_models(force_refresh=True)
# Live models first, then models.dev additions (deduped)
assert result[0] == "qwen3.5:397b" # from live API
assert result[1] == "glm-5" # from live API (also in models.dev)
assert "kimi-k2.5" in result # from models.dev only
assert "nemotron-3-super" in result # from models.dev only
assert result.count("glm-5") == 1 # no duplicates
def test_falls_back_to_models_dev_without_api_key(self, tmp_path, monkeypatch):
"""Without API key, only models.dev results are returned."""
from hermes_cli.models import fetch_ollama_cloud_models
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.delenv("OLLAMA_API_KEY", raising=False)
mock_mdev = {
"ollama-cloud": {
"models": {
"glm-5": {"tool_call": True},
}
}
}
with patch("agent.models_dev.fetch_models_dev", return_value=mock_mdev):
result = fetch_ollama_cloud_models(force_refresh=True)
assert result == ["glm-5"]
def test_uses_disk_cache(self, tmp_path, monkeypatch):
"""Second call returns cached results without hitting APIs."""
from hermes_cli.models import fetch_ollama_cloud_models
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("OLLAMA_API_KEY", "test-key")
with patch("hermes_cli.models.fetch_api_models", return_value=["model-a"]) as mock_api, \
patch("agent.models_dev.fetch_models_dev", return_value={}):
first = fetch_ollama_cloud_models(force_refresh=True)
assert first == ["model-a"]
assert mock_api.call_count == 1
# Second call — should use disk cache, not call API
second = fetch_ollama_cloud_models()
assert second == ["model-a"]
assert mock_api.call_count == 1 # no extra API call
def test_force_refresh_bypasses_cache(self, tmp_path, monkeypatch):
"""force_refresh=True always hits the API even with fresh cache."""
from hermes_cli.models import fetch_ollama_cloud_models
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("OLLAMA_API_KEY", "test-key")
with patch("hermes_cli.models.fetch_api_models", return_value=["model-a"]) as mock_api, \
patch("agent.models_dev.fetch_models_dev", return_value={}):
fetch_ollama_cloud_models(force_refresh=True)
fetch_ollama_cloud_models(force_refresh=True)
assert mock_api.call_count == 2
def test_stale_cache_used_on_total_failure(self, tmp_path, monkeypatch):
"""If both API and models.dev fail, stale cache is returned."""
from hermes_cli.models import fetch_ollama_cloud_models, _save_ollama_cloud_cache
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("OLLAMA_API_KEY", "test-key")
# Pre-populate a stale cache
_save_ollama_cloud_cache(["stale-model"])
# Make the cache appear stale by backdating it
import json
cache_path = tmp_path / "ollama_cloud_models_cache.json"
with open(cache_path) as f:
data = json.load(f)
data["cached_at"] = 0 # epoch = very stale
with open(cache_path, "w") as f:
json.dump(data, f)
with patch("hermes_cli.models.fetch_api_models", return_value=None), \
patch("agent.models_dev.fetch_models_dev", return_value={}):
result = fetch_ollama_cloud_models(force_refresh=True)
assert result == ["stale-model"]
def test_empty_on_total_failure_no_cache(self, tmp_path, monkeypatch):
"""Returns empty list when everything fails and no cache exists."""
from hermes_cli.models import fetch_ollama_cloud_models
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.delenv("OLLAMA_API_KEY", raising=False)
with patch("agent.models_dev.fetch_models_dev", return_value={}):
result = fetch_ollama_cloud_models(force_refresh=True)
assert result == []
# ── Model Normalization ──
class TestOllamaCloudModelNormalization:
def test_passthrough_bare_name(self):
"""Ollama Cloud is a passthrough provider — model names used as-is."""
assert normalize_model_for_provider("qwen3.5:397b", "ollama-cloud") == "qwen3.5:397b"
def test_passthrough_with_tag(self):
assert normalize_model_for_provider("cogito-2.1:671b", "ollama-cloud") == "cogito-2.1:671b"
def test_passthrough_no_tag(self):
assert normalize_model_for_provider("glm-5", "ollama-cloud") == "glm-5"
# ── URL-to-Provider Mapping ──
class TestOllamaCloudUrlMapping:
def test_url_to_provider(self):
assert _URL_TO_PROVIDER.get("ollama.com") == "ollama-cloud"
def test_provider_prefix_canonical(self):
assert "ollama-cloud" in _PROVIDER_PREFIXES
def test_provider_prefix_alias(self):
assert "ollama" in _PROVIDER_PREFIXES
# ── models.dev Integration ──
class TestOllamaCloudModelsDev:
def test_ollama_cloud_mapped(self):
assert PROVIDER_TO_MODELS_DEV.get("ollama-cloud") == "ollama-cloud"
def test_list_agentic_models_with_mock_data(self):
"""list_agentic_models filters correctly from mock models.dev data."""
mock_data = {
"ollama-cloud": {
"models": {
"qwen3.5:397b": {"tool_call": True},
"glm-5": {"tool_call": True},
"nemotron-3-nano:30b": {"tool_call": True},
"some-embedding:latest": {"tool_call": False},
}
}
}
with patch("agent.models_dev.fetch_models_dev", return_value=mock_data):
result = list_agentic_models("ollama-cloud")
assert "qwen3.5:397b" in result
assert "glm-5" in result
assert "nemotron-3-nano:30b" in result
assert "some-embedding:latest" not in result # no tool_call
# ── Agent Init (no SyntaxError) ──
class TestOllamaCloudAgentInit:
def test_agent_imports_without_error(self):
"""Verify run_agent.py has no SyntaxError."""
import importlib
import run_agent
importlib.reload(run_agent)
def test_ollama_cloud_agent_uses_chat_completions(self, monkeypatch):
"""Ollama Cloud falls through to chat_completions — no special elif needed."""
monkeypatch.setenv("OLLAMA_API_KEY", "test-key")
with patch("run_agent.OpenAI") as mock_openai:
mock_openai.return_value = MagicMock()
from run_agent import AIAgent
agent = AIAgent(
model="qwen3.5:397b",
provider="ollama-cloud",
api_key="test-key",
base_url="https://ollama.com/v1",
)
assert agent.api_mode == "chat_completions"
assert agent.provider == "ollama-cloud"
# ── providers.py New System ──
class TestOllamaCloudProvidersNew:
def test_overlay_exists(self):
from hermes_cli.providers import HERMES_OVERLAYS
assert "ollama-cloud" in HERMES_OVERLAYS
overlay = HERMES_OVERLAYS["ollama-cloud"]
assert overlay.transport == "openai_chat"
assert overlay.base_url_env_var == "OLLAMA_BASE_URL"
def test_alias_resolves(self):
from hermes_cli.providers import normalize_provider as np
assert np("ollama") == "custom" # bare "ollama" = local
assert np("ollama-cloud") == "ollama-cloud"
def test_label_override(self):
from hermes_cli.providers import _LABEL_OVERRIDES
assert _LABEL_OVERRIDES.get("ollama-cloud") == "Ollama Cloud"
def test_get_label(self):
from hermes_cli.providers import get_label
assert get_label("ollama-cloud") == "Ollama Cloud"
def test_get_provider(self):
from hermes_cli.providers import get_provider
pdef = get_provider("ollama-cloud")
assert pdef is not None
assert pdef.id == "ollama-cloud"
assert pdef.transport == "openai_chat"
# ── Auxiliary Model ──
class TestOllamaCloudAuxiliary:
def test_aux_model_defined(self):
from agent.auxiliary_client import _API_KEY_PROVIDER_AUX_MODELS
assert "ollama-cloud" in _API_KEY_PROVIDER_AUX_MODELS
assert _API_KEY_PROVIDER_AUX_MODELS["ollama-cloud"] == "nemotron-3-nano:30b"

View file

@ -15,7 +15,7 @@ def test_opencode_go_appears_when_api_key_set():
opencode_go = next((p for p in providers if p["slug"] == "opencode-go"), None)
assert opencode_go is not None, "opencode-go should appear when OPENCODE_GO_API_KEY is set"
assert opencode_go["models"] == ["glm-5", "kimi-k2.5", "mimo-v2-pro", "mimo-v2-omni", "minimax-m2.7", "minimax-m2.5"]
assert opencode_go["models"] == ["kimi-k2.5", "glm-5.1", "glm-5", "mimo-v2-pro", "mimo-v2-omni", "minimax-m2.7", "minimax-m2.5"]
# opencode-go can appear as "built-in" (from PROVIDER_TO_MODELS_DEV when
# models.dev is reachable) or "hermes" (from HERMES_OVERLAYS fallback when
# the API is unavailable, e.g. in CI).

View file

@ -173,60 +173,6 @@ class TestMemoryPluginCliDiscovery:
# ── Honcho register_cli ──────────────────────────────────────────────────
class TestHonchoRegisterCli:
def test_builds_subcommand_tree(self):
"""register_cli creates the expected subparser tree."""
from plugins.memory.honcho.cli import register_cli
parser = argparse.ArgumentParser()
register_cli(parser)
# Verify key subcommands exist by parsing them
args = parser.parse_args(["status"])
assert args.honcho_command == "status"
args = parser.parse_args(["peer", "--user", "alice"])
assert args.honcho_command == "peer"
assert args.user == "alice"
args = parser.parse_args(["mode", "tools"])
assert args.honcho_command == "mode"
assert args.mode == "tools"
args = parser.parse_args(["tokens", "--context", "500"])
assert args.honcho_command == "tokens"
assert args.context == 500
args = parser.parse_args(["--target-profile", "coder", "status"])
assert args.target_profile == "coder"
assert args.honcho_command == "status"
def test_setup_redirects_to_memory_setup(self):
"""hermes honcho setup redirects to memory setup."""
from plugins.memory.honcho.cli import register_cli
parser = argparse.ArgumentParser()
register_cli(parser)
args = parser.parse_args(["setup"])
assert args.honcho_command == "setup"
def test_mode_choices_are_recall_modes(self):
"""Mode subcommand uses recall mode choices (hybrid/context/tools)."""
from plugins.memory.honcho.cli import register_cli
parser = argparse.ArgumentParser()
register_cli(parser)
# Valid recall modes should parse
for mode in ("hybrid", "context", "tools"):
args = parser.parse_args(["mode", mode])
assert args.mode == mode
# Old memoryMode values should fail
with pytest.raises(SystemExit):
parser.parse_args(["mode", "honcho"])
# ── ProviderCollector no-op ──────────────────────────────────────────────

View file

@ -18,6 +18,8 @@ from hermes_cli.plugins import (
PluginManager,
PluginManifest,
get_plugin_manager,
get_plugin_command_handler,
get_plugin_commands,
get_pre_tool_call_block_message,
discover_plugins,
invoke_hook,
@ -605,7 +607,292 @@ class TestPreLlmCallTargetRouting:
assert "plain text C" in _plugin_user_context
# NOTE: TestPluginCommands removed register_command() was never implemented
# in PluginContext (hermes_cli/plugins.py). The tests referenced _plugin_commands,
# commands_registered, get_plugin_command_handler, and GATEWAY_KNOWN_COMMANDS
# integration — all of which are unimplemented features.
# ── TestPluginCommands ────────────────────────────────────────────────────
class TestPluginCommands:
"""Tests for plugin slash command registration via register_command()."""
def test_register_command_basic(self):
"""register_command() stores handler, description, and plugin name."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
handler = lambda args: f"echo {args}"
ctx.register_command("mycmd", handler, description="My custom command")
assert "mycmd" in mgr._plugin_commands
entry = mgr._plugin_commands["mycmd"]
assert entry["handler"] is handler
assert entry["description"] == "My custom command"
assert entry["plugin"] == "test-plugin"
def test_register_command_normalizes_name(self):
"""Names are lowercased, stripped, and leading slashes removed."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
ctx.register_command("/MyCmd ", lambda a: a, description="test")
assert "mycmd" in mgr._plugin_commands
assert "/MyCmd " not in mgr._plugin_commands
def test_register_command_empty_name_rejected(self, caplog):
"""Empty name after normalization is rejected with a warning."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
with caplog.at_level(logging.WARNING, logger="hermes_cli.plugins"):
ctx.register_command("", lambda a: a)
assert len(mgr._plugin_commands) == 0
assert "empty name" in caplog.text
def test_register_command_builtin_conflict_rejected(self, caplog):
"""Commands that conflict with built-in names are rejected."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
with caplog.at_level(logging.WARNING, logger="hermes_cli.plugins"):
ctx.register_command("help", lambda a: a)
assert "help" not in mgr._plugin_commands
assert "conflicts" in caplog.text.lower()
def test_register_command_default_description(self):
"""Missing description defaults to 'Plugin command'."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
ctx.register_command("status-cmd", lambda a: a)
assert mgr._plugin_commands["status-cmd"]["description"] == "Plugin command"
def test_get_plugin_command_handler_found(self):
"""get_plugin_command_handler() returns the handler for a registered command."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
handler = lambda args: f"result: {args}"
ctx.register_command("mycmd", handler, description="test")
with patch("hermes_cli.plugins._plugin_manager", mgr):
result = get_plugin_command_handler("mycmd")
assert result is handler
def test_get_plugin_command_handler_not_found(self):
"""get_plugin_command_handler() returns None for unregistered commands."""
mgr = PluginManager()
with patch("hermes_cli.plugins._plugin_manager", mgr):
assert get_plugin_command_handler("nonexistent") is None
def test_get_plugin_commands_returns_dict(self):
"""get_plugin_commands() returns the full commands dict."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
ctx.register_command("cmd-a", lambda a: a, description="A")
ctx.register_command("cmd-b", lambda a: a, description="B")
with patch("hermes_cli.plugins._plugin_manager", mgr):
cmds = get_plugin_commands()
assert "cmd-a" in cmds
assert "cmd-b" in cmds
assert cmds["cmd-a"]["description"] == "A"
def test_commands_tracked_on_loaded_plugin(self, tmp_path, monkeypatch):
"""Commands registered during discover_and_load() are tracked on LoadedPlugin."""
plugins_dir = tmp_path / "hermes_test" / "plugins"
_make_plugin_dir(
plugins_dir, "cmd-plugin",
register_body=(
'ctx.register_command("mycmd", lambda a: "ok", description="Test")'
),
)
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes_test"))
mgr = PluginManager()
mgr.discover_and_load()
loaded = mgr._plugins["cmd-plugin"]
assert loaded.enabled
assert "mycmd" in loaded.commands_registered
def test_commands_in_list_plugins_output(self, tmp_path, monkeypatch):
"""list_plugins() includes command count."""
plugins_dir = tmp_path / "hermes_test" / "plugins"
_make_plugin_dir(
plugins_dir, "cmd-plugin",
register_body=(
'ctx.register_command("mycmd", lambda a: "ok", description="Test")'
),
)
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes_test"))
mgr = PluginManager()
mgr.discover_and_load()
info = mgr.list_plugins()
assert len(info) == 1
assert info[0]["commands"] == 1
def test_handler_receives_raw_args(self):
"""The handler is called with the raw argument string."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
received = []
ctx.register_command("echo", lambda args: received.append(args) or "ok")
handler = mgr._plugin_commands["echo"]["handler"]
handler("hello world")
assert received == ["hello world"]
def test_multiple_plugins_register_different_commands(self):
"""Multiple plugins can each register their own commands."""
mgr = PluginManager()
for plugin_name, cmd_name in [("plugin-a", "cmd-a"), ("plugin-b", "cmd-b")]:
manifest = PluginManifest(name=plugin_name, source="user")
ctx = PluginContext(manifest, mgr)
ctx.register_command(cmd_name, lambda a: a, description=f"From {plugin_name}")
assert "cmd-a" in mgr._plugin_commands
assert "cmd-b" in mgr._plugin_commands
assert mgr._plugin_commands["cmd-a"]["plugin"] == "plugin-a"
assert mgr._plugin_commands["cmd-b"]["plugin"] == "plugin-b"
# ── TestPluginDispatchTool ────────────────────────────────────────────────
class TestPluginDispatchTool:
"""Tests for PluginContext.dispatch_tool() — tool dispatch with agent context."""
def test_dispatch_tool_calls_registry(self):
"""dispatch_tool() delegates to registry.dispatch()."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
mock_registry = MagicMock()
mock_registry.dispatch.return_value = '{"result": "ok"}'
with patch("hermes_cli.plugins.PluginContext.dispatch_tool.__module__", "hermes_cli.plugins"):
with patch.dict("sys.modules", {}):
with patch("tools.registry.registry", mock_registry):
result = ctx.dispatch_tool("web_search", {"query": "test"})
assert result == '{"result": "ok"}'
def test_dispatch_tool_injects_parent_agent_from_cli_ref(self):
"""When _cli_ref has an agent, it's passed as parent_agent."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
mock_agent = MagicMock()
mock_cli = MagicMock()
mock_cli.agent = mock_agent
mgr._cli_ref = mock_cli
mock_registry = MagicMock()
mock_registry.dispatch.return_value = '{"ok": true}'
with patch("tools.registry.registry", mock_registry):
ctx.dispatch_tool("delegate_task", {"goal": "test"})
mock_registry.dispatch.assert_called_once()
call_kwargs = mock_registry.dispatch.call_args
assert call_kwargs[1].get("parent_agent") is mock_agent
def test_dispatch_tool_no_parent_agent_when_no_cli_ref(self):
"""When _cli_ref is None (gateway mode), no parent_agent is injected."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
mgr._cli_ref = None
mock_registry = MagicMock()
mock_registry.dispatch.return_value = '{"ok": true}'
with patch("tools.registry.registry", mock_registry):
ctx.dispatch_tool("delegate_task", {"goal": "test"})
call_kwargs = mock_registry.dispatch.call_args
assert "parent_agent" not in call_kwargs[1]
def test_dispatch_tool_no_parent_agent_when_agent_is_none(self):
"""When cli_ref exists but agent is None (not yet initialized), skip parent_agent."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
mock_cli = MagicMock()
mock_cli.agent = None
mgr._cli_ref = mock_cli
mock_registry = MagicMock()
mock_registry.dispatch.return_value = '{"ok": true}'
with patch("tools.registry.registry", mock_registry):
ctx.dispatch_tool("delegate_task", {"goal": "test"})
call_kwargs = mock_registry.dispatch.call_args
assert "parent_agent" not in call_kwargs[1]
def test_dispatch_tool_respects_explicit_parent_agent(self):
"""Explicit parent_agent kwarg is not overwritten by _cli_ref.agent."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
cli_agent = MagicMock(name="cli_agent")
mock_cli = MagicMock()
mock_cli.agent = cli_agent
mgr._cli_ref = mock_cli
explicit_agent = MagicMock(name="explicit_agent")
mock_registry = MagicMock()
mock_registry.dispatch.return_value = '{"ok": true}'
with patch("tools.registry.registry", mock_registry):
ctx.dispatch_tool("delegate_task", {"goal": "test"}, parent_agent=explicit_agent)
call_kwargs = mock_registry.dispatch.call_args
assert call_kwargs[1]["parent_agent"] is explicit_agent
def test_dispatch_tool_forwards_extra_kwargs(self):
"""Extra kwargs are forwarded to registry.dispatch()."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
mgr._cli_ref = None
mock_registry = MagicMock()
mock_registry.dispatch.return_value = '{"ok": true}'
with patch("tools.registry.registry", mock_registry):
ctx.dispatch_tool("some_tool", {"x": 1}, task_id="test-123")
call_kwargs = mock_registry.dispatch.call_args
assert call_kwargs[1]["task_id"] == "test-123"
def test_dispatch_tool_returns_json_string(self):
"""dispatch_tool() returns the raw JSON string from the registry."""
mgr = PluginManager()
manifest = PluginManifest(name="test-plugin", source="user")
ctx = PluginContext(manifest, mgr)
mgr._cli_ref = None
mock_registry = MagicMock()
mock_registry.dispatch.return_value = '{"error": "Unknown tool: fake"}'
with patch("tools.registry.registry", mock_registry):
result = ctx.dispatch_tool("fake", {})
assert '"error"' in result

View file

@ -126,59 +126,6 @@ class TestRepoNameFromUrl:
# ── plugins_command dispatch ──────────────────────────────────────────────
class TestPluginsCommandDispatch:
"""Verify alias routing in plugins_command()."""
def _make_args(self, action, **extras):
args = MagicMock()
args.plugins_action = action
for k, v in extras.items():
setattr(args, k, v)
return args
@patch("hermes_cli.plugins_cmd.cmd_remove")
def test_rm_alias(self, mock_remove):
args = self._make_args("rm", name="some-plugin")
plugins_command(args)
mock_remove.assert_called_once_with("some-plugin")
@patch("hermes_cli.plugins_cmd.cmd_remove")
def test_uninstall_alias(self, mock_remove):
args = self._make_args("uninstall", name="some-plugin")
plugins_command(args)
mock_remove.assert_called_once_with("some-plugin")
@patch("hermes_cli.plugins_cmd.cmd_list")
def test_ls_alias(self, mock_list):
args = self._make_args("ls")
plugins_command(args)
mock_list.assert_called_once()
@patch("hermes_cli.plugins_cmd.cmd_toggle")
def test_none_falls_through_to_toggle(self, mock_toggle):
args = self._make_args(None)
plugins_command(args)
mock_toggle.assert_called_once()
@patch("hermes_cli.plugins_cmd.cmd_install")
def test_install_dispatches(self, mock_install):
args = self._make_args("install", identifier="owner/repo", force=False)
plugins_command(args)
mock_install.assert_called_once_with("owner/repo", force=False)
@patch("hermes_cli.plugins_cmd.cmd_update")
def test_update_dispatches(self, mock_update):
args = self._make_args("update", name="foo")
plugins_command(args)
mock_update.assert_called_once_with("foo")
@patch("hermes_cli.plugins_cmd.cmd_remove")
def test_remove_dispatches(self, mock_remove):
args = self._make_args("remove", name="bar")
plugins_command(args)
mock_remove.assert_called_once_with("bar")
# ── _read_manifest ────────────────────────────────────────────────────────

View file

@ -799,35 +799,30 @@ class TestEdgeCases:
assert default.skill_count == 0
def test_gateway_running_check_with_pid_file(self, profile_env):
"""Verify _check_gateway_running reads pid file and probes os.kill."""
"""Verify _check_gateway_running uses the shared gateway PID validator."""
from hermes_cli.profiles import _check_gateway_running
tmp_path = profile_env
default_home = tmp_path / ".hermes"
# No pid file -> not running
assert _check_gateway_running(default_home) is False
# Write a PID file with a JSON payload
pid_file = default_home / "gateway.pid"
pid_file.write_text(json.dumps({"pid": 99999}))
# os.kill(99999, 0) should raise ProcessLookupError -> not running
assert _check_gateway_running(default_home) is False
# Mock os.kill to simulate a running process
with patch("os.kill", return_value=None):
with patch("gateway.status.get_running_pid", return_value=99999) as mock_get_running_pid:
assert _check_gateway_running(default_home) is True
mock_get_running_pid.assert_called_once_with(
default_home / "gateway.pid",
cleanup_stale=False,
)
def test_gateway_running_check_plain_pid(self, profile_env):
"""Pid file containing just a number (legacy format)."""
"""Shared PID validator returning None means the profile is not running."""
from hermes_cli.profiles import _check_gateway_running
tmp_path = profile_env
default_home = tmp_path / ".hermes"
pid_file = default_home / "gateway.pid"
pid_file.write_text("99999")
with patch("os.kill", return_value=None):
assert _check_gateway_running(default_home) is True
with patch("gateway.status.get_running_pid", return_value=None) as mock_get_running_pid:
assert _check_gateway_running(default_home) is False
mock_get_running_pid.assert_called_once_with(
default_home / "gateway.pid",
cleanup_stale=False,
)
def test_profile_name_boundary_single_char(self):
"""Single alphanumeric character is valid."""

View file

@ -363,7 +363,7 @@ def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, mon
def test_modal_setup_can_use_nous_subscription_without_modal_creds(tmp_path, monkeypatch, capsys):
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
monkeypatch.setattr("hermes_cli.setup.managed_nous_tools_enabled", lambda: True)
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
config = load_config()
@ -405,7 +405,7 @@ def test_modal_setup_can_use_nous_subscription_without_modal_creds(tmp_path, mon
def test_modal_setup_persists_direct_mode_when_user_chooses_their_own_account(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
monkeypatch.setattr("hermes_cli.setup.managed_nous_tools_enabled", lambda: True)
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.delenv("MODAL_TOKEN_ID", raising=False)
monkeypatch.delenv("MODAL_TOKEN_SECRET", raising=False)

View file

@ -2,7 +2,7 @@ from hermes_cli import setup as setup_mod
def test_prompt_choice_uses_curses_helper(monkeypatch):
monkeypatch.setattr(setup_mod, "_curses_prompt_choice", lambda question, choices, default=0: 1)
monkeypatch.setattr(setup_mod, "_curses_prompt_choice", lambda question, choices, default=0, description=None: 1)
idx = setup_mod.prompt_choice("Pick one", ["a", "b", "c"], default=0)
@ -10,7 +10,7 @@ def test_prompt_choice_uses_curses_helper(monkeypatch):
def test_prompt_choice_falls_back_to_numbered_input(monkeypatch):
monkeypatch.setattr(setup_mod, "_curses_prompt_choice", lambda question, choices, default=0: -1)
monkeypatch.setattr(setup_mod, "_curses_prompt_choice", lambda question, choices, default=0, description=None: -1)
monkeypatch.setattr("builtins.input", lambda _prompt="": "2")
idx = setup_mod.prompt_choice("Pick one", ["a", "b", "c"], default=0)

View file

@ -152,6 +152,24 @@ class TestSkinManagement:
init_skin_from_config({})
assert get_active_skin_name() == "default"
def test_init_skin_from_null_display(self):
"""display: null should fall back to default, not crash."""
from hermes_cli.skin_engine import init_skin_from_config, get_active_skin_name
init_skin_from_config({"display": None})
assert get_active_skin_name() == "default"
def test_init_skin_from_non_dict_display(self):
"""display: <non-dict> should fall back to default."""
from hermes_cli.skin_engine import init_skin_from_config, get_active_skin_name
init_skin_from_config({"display": "invalid"})
assert get_active_skin_name() == "default"
init_skin_from_config({"display": 42})
assert get_active_skin_name() == "default"
init_skin_from_config({"display": []})
assert get_active_skin_name() == "default"
class TestUserSkins:
def test_load_user_skin_from_yaml(self, tmp_path, monkeypatch):

View file

@ -64,7 +64,7 @@ def test_show_status_displays_legacy_string_model_and_custom_endpoint(monkeypatc
def test_show_status_reports_managed_nous_features(monkeypatch, capsys, tmp_path):
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
monkeypatch.setattr("hermes_cli.status.managed_nous_tools_enabled", lambda: True)
from hermes_cli import status as status_mod
_patch_common_status_deps(monkeypatch, status_mod, tmp_path)
@ -98,13 +98,13 @@ def test_show_status_reports_managed_nous_features(monkeypatch, capsys, tmp_path
status_mod.show_status(SimpleNamespace(all=False, deep=False))
out = capsys.readouterr().out
assert "Nous Subscription Features" in out
assert "Nous Tool Gateway" in out
assert "Browser automation" in out
assert "active via Nous subscription" in out
def test_show_status_hides_nous_subscription_section_when_feature_flag_is_off(monkeypatch, capsys, tmp_path):
monkeypatch.delenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", raising=False)
monkeypatch.setattr("hermes_cli.status.managed_nous_tools_enabled", lambda: False)
from hermes_cli import status as status_mod
_patch_common_status_deps(monkeypatch, status_mod, tmp_path)
@ -121,4 +121,4 @@ def test_show_status_hides_nous_subscription_section_when_feature_flag_is_off(mo
status_mod.show_status(SimpleNamespace(all=False, deep=False))
out = capsys.readouterr().out
assert "Nous Subscription Features" not in out
assert "Nous Tool Gateway" not in out

View file

@ -0,0 +1,66 @@
"""Tests for the defensive subparser routing workaround (bpo-9338).
The main() function in hermes_cli/main.py sets subparsers.required=True
when argv contains a known subcommand name. This forces deterministic
routing on Python versions where argparse fails to match subcommand tokens
when the parent parser has nargs='?' optional arguments (--continue).
If the subcommand token is consumed as a flag value (e.g. `hermes -c model`
to resume a session named 'model'), the required=True parse raises
SystemExit and the code falls back to the default required=False behaviour.
"""
import argparse
import io
import sys
import pytest
def _build_parser():
"""Build a minimal replica of the hermes top-level parser."""
parser = argparse.ArgumentParser(prog="hermes")
parser.add_argument("--version", "-V", action="store_true")
parser.add_argument("--resume", "-r", metavar="SESSION", default=None)
parser.add_argument(
"--continue", "-c",
dest="continue_last",
nargs="?",
const=True,
default=None,
metavar="SESSION_NAME",
)
parser.add_argument("--worktree", "-w", action="store_true", default=False)
parser.add_argument("--skills", "-s", action="append", default=None)
parser.add_argument("--yolo", action="store_true", default=False)
parser.add_argument("--pass-session-id", action="store_true", default=False)
subparsers = parser.add_subparsers(dest="command", help="Command to run")
chat_p = subparsers.add_parser("chat")
chat_p.add_argument("-q", "--query", default=None)
subparsers.add_parser("model")
subparsers.add_parser("gateway")
subparsers.add_parser("setup")
return parser, subparsers
def _safe_parse(parser, subparsers, argv):
"""Replica of the defensive parsing logic from main()."""
known_cmds = set(subparsers.choices.keys()) if hasattr(subparsers, "choices") else set()
has_cmd_token = any(t in known_cmds for t in argv if not t.startswith("-"))
if has_cmd_token:
subparsers.required = True
saved_stderr = sys.stderr
try:
sys.stderr = io.StringIO()
args = parser.parse_args(argv)
sys.stderr = saved_stderr
return args
except SystemExit:
sys.stderr = saved_stderr
subparsers.required = False
return parser.parse_args(argv)
else:
subparsers.required = False
return parser.parse_args(argv)

View file

@ -8,6 +8,7 @@ from hermes_cli.tools_config import (
_platform_toolset_summary,
_save_platform_tools,
_toolset_has_keys,
CONFIGURABLE_TOOLSETS,
TOOL_CATEGORIES,
_visible_providers,
tools_command,
@ -22,6 +23,15 @@ def test_get_platform_tools_uses_default_when_platform_not_configured():
assert enabled
def test_configurable_toolsets_include_messaging():
assert any(ts_key == "messaging" for ts_key, _, _ in CONFIGURABLE_TOOLSETS)
def test_get_platform_tools_default_telegram_includes_messaging():
enabled = _get_platform_tools({}, "telegram")
assert "messaging" in enabled
def test_get_platform_tools_preserves_explicit_empty_selection():
config = {"platform_toolsets": {"cli": []}}
@ -30,6 +40,19 @@ def test_get_platform_tools_preserves_explicit_empty_selection():
assert enabled == set()
def test_get_platform_tools_handles_null_platform_toolsets():
"""YAML `platform_toolsets:` with no value parses as None — the old
``config.get("platform_toolsets", {})`` pattern would then crash with
``NoneType has no attribute 'get'`` on the next line. Guard against that.
"""
config = {"platform_toolsets": None}
enabled = _get_platform_tools(config, "cli")
# Falls through to defaults instead of raising
assert enabled
def test_platform_toolset_summary_uses_explicit_platform_list():
config = {}
@ -286,7 +309,7 @@ def test_save_platform_tools_still_preserves_mcp_with_platform_default_present()
def test_visible_providers_include_nous_subscription_when_logged_in(monkeypatch):
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
monkeypatch.setattr("hermes_cli.tools_config.managed_nous_tools_enabled", lambda: True)
config = {"model": {"provider": "nous"}}
monkeypatch.setattr(
@ -300,7 +323,7 @@ def test_visible_providers_include_nous_subscription_when_logged_in(monkeypatch)
def test_visible_providers_hide_nous_subscription_when_feature_flag_is_off(monkeypatch):
monkeypatch.delenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", raising=False)
monkeypatch.setattr("hermes_cli.tools_config.managed_nous_tools_enabled", lambda: False)
config = {"model": {"provider": "nous"}}
monkeypatch.setattr(
@ -328,7 +351,8 @@ def test_local_browser_provider_is_saved_explicitly(monkeypatch):
def test_first_install_nous_auto_configures_managed_defaults(monkeypatch):
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
monkeypatch.setattr("hermes_cli.tools_config.managed_nous_tools_enabled", lambda: True)
monkeypatch.setattr("hermes_cli.nous_subscription.managed_nous_tools_enabled", lambda: True)
config = {
"model": {"provider": "nous"},
"platform_toolsets": {"cli": []},
@ -455,3 +479,90 @@ def test_numeric_mcp_server_name_does_not_crash_sorted():
# sorted() must not raise TypeError
sorted(enabled)
# ─── Imagegen Backend Picker Wiring ────────────────────────────────────────
class TestImagegenBackendRegistry:
"""IMAGEGEN_BACKENDS tags drive the model picker flow in tools_config."""
def test_fal_backend_registered(self):
from hermes_cli.tools_config import IMAGEGEN_BACKENDS
assert "fal" in IMAGEGEN_BACKENDS
def test_fal_catalog_loads_lazily(self):
"""catalog_fn should defer import to avoid import cycles."""
from hermes_cli.tools_config import IMAGEGEN_BACKENDS
catalog, default = IMAGEGEN_BACKENDS["fal"]["catalog_fn"]()
assert default == "fal-ai/flux-2/klein/9b"
assert "fal-ai/flux-2/klein/9b" in catalog
assert "fal-ai/flux-2-pro" in catalog
def test_image_gen_providers_tagged_with_fal_backend(self):
"""Both Nous Subscription and FAL.ai providers must carry the
imagegen_backend tag so _configure_provider fires the picker."""
from hermes_cli.tools_config import TOOL_CATEGORIES
providers = TOOL_CATEGORIES["image_gen"]["providers"]
for p in providers:
assert p.get("imagegen_backend") == "fal", (
f"{p['name']} missing imagegen_backend tag"
)
class TestImagegenModelPicker:
"""_configure_imagegen_model writes selection to config and respects
curses fallback semantics (returns default when stdin isn't a TTY)."""
def test_picker_writes_chosen_model_to_config(self):
from hermes_cli.tools_config import _configure_imagegen_model
config = {}
# Force _prompt_choice to pick index 1 (second-in-ordered-list).
with patch("hermes_cli.tools_config._prompt_choice", return_value=1):
_configure_imagegen_model("fal", config)
# ordered[0] == current (default klein), ordered[1] == first non-default
assert config["image_gen"]["model"] != "fal-ai/flux-2/klein/9b"
assert config["image_gen"]["model"].startswith("fal-ai/")
def test_picker_with_gpt_image_does_not_prompt_quality(self):
"""GPT-Image quality is pinned to medium in the tool's defaults —
no follow-up prompt, no config write for quality_setting."""
from hermes_cli.tools_config import (
_configure_imagegen_model,
IMAGEGEN_BACKENDS,
)
catalog, default_model = IMAGEGEN_BACKENDS["fal"]["catalog_fn"]()
model_ids = list(catalog.keys())
ordered = [default_model] + [m for m in model_ids if m != default_model]
gpt_idx = ordered.index("fal-ai/gpt-image-1.5")
# Only ONE picker call is expected (for model) — not two (model + quality).
call_count = {"n": 0}
def fake_prompt(*a, **kw):
call_count["n"] += 1
return gpt_idx
config = {}
with patch("hermes_cli.tools_config._prompt_choice", side_effect=fake_prompt):
_configure_imagegen_model("fal", config)
assert call_count["n"] == 1, (
f"Expected 1 picker call (model only), got {call_count['n']}"
)
assert config["image_gen"]["model"] == "fal-ai/gpt-image-1.5"
assert "quality_setting" not in config["image_gen"]
def test_picker_no_op_for_unknown_backend(self):
from hermes_cli.tools_config import _configure_imagegen_model
config = {}
_configure_imagegen_model("nonexistent-backend", config)
assert config == {} # untouched
def test_picker_repairs_corrupt_config_section(self):
"""When image_gen is a non-dict (user-edit YAML), the picker should
replace it with a fresh dict rather than crash."""
from hermes_cli.tools_config import _configure_imagegen_model
config = {"image_gen": "some-garbage-string"}
with patch("hermes_cli.tools_config._prompt_choice", return_value=0):
_configure_imagegen_model("fal", config)
assert isinstance(config["image_gen"], dict)
assert config["image_gen"]["model"] == "fal-ai/flux-2/klein/9b"

View file

@ -0,0 +1,53 @@
"""_tui_need_npm_install: auto npm when lockfile ahead of node_modules."""
import os
from pathlib import Path
import pytest
@pytest.fixture
def main_mod():
import hermes_cli.main as m
return m
def _touch_ink(root: Path) -> None:
ink = root / "node_modules" / "@hermes" / "ink" / "package.json"
ink.parent.mkdir(parents=True, exist_ok=True)
ink.write_text("{}")
def test_need_install_when_ink_missing(tmp_path: Path, main_mod) -> None:
(tmp_path / "package-lock.json").write_text("{}")
assert main_mod._tui_need_npm_install(tmp_path) is True
def test_need_install_when_lock_newer_than_marker(tmp_path: Path, main_mod) -> None:
_touch_ink(tmp_path)
(tmp_path / "package-lock.json").write_text("{}")
(tmp_path / "node_modules" / ".package-lock.json").write_text("{}")
os.utime(tmp_path / "package-lock.json", (200, 200))
os.utime(tmp_path / "node_modules" / ".package-lock.json", (100, 100))
assert main_mod._tui_need_npm_install(tmp_path) is True
def test_no_install_when_lock_older_than_marker(tmp_path: Path, main_mod) -> None:
_touch_ink(tmp_path)
(tmp_path / "package-lock.json").write_text("{}")
(tmp_path / "node_modules" / ".package-lock.json").write_text("{}")
os.utime(tmp_path / "package-lock.json", (100, 100))
os.utime(tmp_path / "node_modules" / ".package-lock.json", (200, 200))
assert main_mod._tui_need_npm_install(tmp_path) is False
def test_need_install_when_marker_missing(tmp_path: Path, main_mod) -> None:
_touch_ink(tmp_path)
(tmp_path / "package-lock.json").write_text("{}")
assert main_mod._tui_need_npm_install(tmp_path) is True
def test_no_install_without_lockfile_when_ink_present(tmp_path: Path, main_mod) -> None:
_touch_ink(tmp_path)
assert main_mod._tui_need_npm_install(tmp_path) is False

View file

@ -0,0 +1,121 @@
from argparse import Namespace
import sys
import types
import pytest
def _args(**overrides):
base = {
"continue_last": None,
"resume": None,
"tui": True,
}
base.update(overrides)
return Namespace(**base)
@pytest.fixture
def main_mod(monkeypatch):
import hermes_cli.main as mod
monkeypatch.setattr(mod, "_has_any_provider_configured", lambda: True)
return mod
def test_cmd_chat_tui_continue_uses_latest_tui_session(monkeypatch, main_mod):
calls = []
captured = {}
def fake_resolve_last(source="cli"):
calls.append(source)
return "20260408_235959_a1b2c3" if source == "tui" else None
def fake_launch(resume_session_id=None, tui_dev=False):
captured["resume"] = resume_session_id
raise SystemExit(0)
monkeypatch.setattr(main_mod, "_resolve_last_session", fake_resolve_last)
monkeypatch.setattr(main_mod, "_resolve_session_by_name_or_id", lambda val: val)
monkeypatch.setattr(main_mod, "_launch_tui", fake_launch)
with pytest.raises(SystemExit):
main_mod.cmd_chat(_args(continue_last=True))
assert calls == ["tui"]
assert captured["resume"] == "20260408_235959_a1b2c3"
def test_cmd_chat_tui_continue_falls_back_to_latest_cli_session(monkeypatch, main_mod):
calls = []
captured = {}
def fake_resolve_last(source="cli"):
calls.append(source)
if source == "tui":
return None
if source == "cli":
return "20260408_235959_d4e5f6"
return None
def fake_launch(resume_session_id=None, tui_dev=False):
captured["resume"] = resume_session_id
raise SystemExit(0)
monkeypatch.setattr(main_mod, "_resolve_last_session", fake_resolve_last)
monkeypatch.setattr(main_mod, "_resolve_session_by_name_or_id", lambda val: val)
monkeypatch.setattr(main_mod, "_launch_tui", fake_launch)
with pytest.raises(SystemExit):
main_mod.cmd_chat(_args(continue_last=True))
assert calls == ["tui", "cli"]
assert captured["resume"] == "20260408_235959_d4e5f6"
def test_cmd_chat_tui_resume_resolves_title_before_launch(monkeypatch, main_mod):
captured = {}
def fake_launch(resume_session_id=None, tui_dev=False):
captured["resume"] = resume_session_id
raise SystemExit(0)
monkeypatch.setattr(main_mod, "_resolve_session_by_name_or_id", lambda val: "20260409_000000_aa11bb")
monkeypatch.setattr(main_mod, "_launch_tui", fake_launch)
with pytest.raises(SystemExit):
main_mod.cmd_chat(_args(resume="my t0p session"))
assert captured["resume"] == "20260409_000000_aa11bb"
def test_print_tui_exit_summary_includes_resume_and_token_totals(monkeypatch, capsys):
import hermes_cli.main as main_mod
class _FakeDB:
def get_session(self, session_id):
assert session_id == "20260409_000001_abc123"
return {
"message_count": 2,
"input_tokens": 10,
"output_tokens": 6,
"cache_read_tokens": 2,
"cache_write_tokens": 2,
"reasoning_tokens": 1,
}
def get_session_title(self, _session_id):
return "demo title"
def close(self):
return None
monkeypatch.setitem(sys.modules, "hermes_state", types.SimpleNamespace(SessionDB=lambda: _FakeDB()))
main_mod._print_tui_exit_summary("20260409_000001_abc123")
out = capsys.readouterr().out
assert "Resume this session with:" in out
assert "hermes --tui --resume 20260409_000001_abc123" in out
assert 'hermes --tui -c "demo title"' in out
assert "Tokens: 21 (in 10, out 6, cache 4, reasoning 1)" in out

View file

@ -13,9 +13,29 @@ from unittest.mock import patch, MagicMock
import pytest
import hermes_cli.gateway as gateway_cli
import hermes_cli.main as cli_main
from hermes_cli.main import cmd_update
# ---------------------------------------------------------------------------
# Skip the real-time sleeps inside cmd_update's restart-verification path
# ---------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def _no_restart_verify_sleep(monkeypatch):
"""hermes_cli/main.py uses time.sleep(3) after systemctl restart to
verify the service survived. Tests mock subprocess.run nothing
actually restarts so the 3s wait is dead time.
main.py does ``import time as _time`` at both module level (line 167)
and inside functions (lines 3281, 4384, 4401). Patching the global
``time.sleep`` affects only the duration of this test.
"""
import time as _real_time
monkeypatch.setattr(_real_time, "sleep", lambda *_a, **_k: None)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
@ -915,3 +935,183 @@ class TestGatewayModeWritesExitCodeEarly:
assert exit_code_existed_at_restart, "systemctl restart was never called"
assert exit_code_existed_at_restart[0] is True, \
".update_exit_code must exist BEFORE systemctl restart (cgroup kill race)"
class TestCmdUpdateLegacyGatewayWarning:
"""Tests for the legacy hermes.service warning printed by `hermes update`.
Users who installed Hermes before the service rename often have a
dormant ``hermes.service`` that starts flap-fighting the current
``hermes-gateway.service`` after PR #5646. Every ``hermes update``
should remind them to run ``hermes gateway migrate-legacy`` until
they do.
"""
_OUR_UNIT_TEXT = (
"[Unit]\nDescription=Hermes Gateway\n[Service]\n"
"ExecStart=/usr/bin/python -m hermes_cli.main gateway run --replace\n"
)
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_prints_legacy_warning_when_detected(
self, mock_run, _mock_which, mock_args, capsys, tmp_path, monkeypatch,
):
"""Legacy units present → warning in update output with migrate command."""
user_dir = tmp_path / "user"
system_dir = tmp_path / "system"
user_dir.mkdir()
system_dir.mkdir()
legacy_path = user_dir / "hermes.service"
legacy_path.write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
monkeypatch.setattr(
gateway_cli,
"_legacy_unit_search_paths",
lambda: [(False, user_dir), (True, system_dir)],
)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
mock_run.side_effect = _make_run_side_effect(commit_count="3")
with patch.object(gateway_cli, "find_gateway_pids", return_value=[]):
cmd_update(mock_args)
captured = capsys.readouterr().out
assert "Legacy Hermes gateway unit(s) detected" in captured
assert "hermes.service" in captured
assert "hermes gateway migrate-legacy" in captured
assert "(user scope)" in captured
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_silent_when_no_legacy_units(
self, mock_run, _mock_which, mock_args, capsys, tmp_path, monkeypatch,
):
"""No legacy units → no warning printed."""
user_dir = tmp_path / "user"
system_dir = tmp_path / "system"
user_dir.mkdir()
system_dir.mkdir()
monkeypatch.setattr(
gateway_cli,
"_legacy_unit_search_paths",
lambda: [(False, user_dir), (True, system_dir)],
)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
mock_run.side_effect = _make_run_side_effect(commit_count="3")
with patch.object(gateway_cli, "find_gateway_pids", return_value=[]):
cmd_update(mock_args)
captured = capsys.readouterr().out
assert "Legacy Hermes gateway" not in captured
assert "migrate-legacy" not in captured
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_does_not_flag_profile_units(
self, mock_run, _mock_which, mock_args, capsys, tmp_path, monkeypatch,
):
"""Profile units (hermes-gateway-coder.service) must not trigger the warning.
This is the core safety invariant: the legacy allowlist is
``hermes.service`` only, no globs.
"""
user_dir = tmp_path / "user"
system_dir = tmp_path / "system"
user_dir.mkdir()
system_dir.mkdir()
# Drop a profile unit that an over-eager glob would match
(user_dir / "hermes-gateway-coder.service").write_text(
self._OUR_UNIT_TEXT, encoding="utf-8"
)
(user_dir / "hermes-gateway.service").write_text(
self._OUR_UNIT_TEXT, encoding="utf-8"
)
monkeypatch.setattr(
gateway_cli,
"_legacy_unit_search_paths",
lambda: [(False, user_dir), (True, system_dir)],
)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
mock_run.side_effect = _make_run_side_effect(commit_count="3")
with patch.object(gateway_cli, "find_gateway_pids", return_value=[]):
cmd_update(mock_args)
captured = capsys.readouterr().out
assert "Legacy Hermes gateway" not in captured
assert "hermes-gateway-coder.service" not in captured # not flagged
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_skips_legacy_check_on_non_systemd_platforms(
self, mock_run, _mock_which, mock_args, capsys, tmp_path, monkeypatch,
):
"""macOS / Windows / Termux — skip check entirely since the rename
is systemd-specific."""
user_dir = tmp_path / "user"
user_dir.mkdir()
# Put a file that WOULD match if the check ran
(user_dir / "hermes.service").write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
monkeypatch.setattr(
gateway_cli,
"_legacy_unit_search_paths",
lambda: [(False, user_dir), (True, tmp_path / "system")],
)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False)
mock_run.side_effect = _make_run_side_effect(
commit_count="3", launchctl_loaded=False,
)
with patch.object(gateway_cli, "find_gateway_pids", return_value=[]):
cmd_update(mock_args)
captured = capsys.readouterr().out
# Must not print the warning on non-systemd platforms
assert "Legacy Hermes gateway" not in captured
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_update_lists_system_scope_unit_with_sudo_hint(
self, mock_run, _mock_which, mock_args, capsys, tmp_path, monkeypatch,
):
"""System-scope legacy units need sudo — the warning must point that out."""
user_dir = tmp_path / "user"
system_dir = tmp_path / "system"
user_dir.mkdir()
system_dir.mkdir()
(system_dir / "hermes.service").write_text(self._OUR_UNIT_TEXT, encoding="utf-8")
monkeypatch.setattr(
gateway_cli,
"_legacy_unit_search_paths",
lambda: [(False, user_dir), (True, system_dir)],
)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
mock_run.side_effect = _make_run_side_effect(commit_count="3")
with patch.object(gateway_cli, "find_gateway_pids", return_value=[]):
cmd_update(mock_args)
captured = capsys.readouterr().out
assert "Legacy Hermes gateway" in captured
assert "(system scope)" in captured
assert "sudo" in captured

View file

@ -0,0 +1,325 @@
"""Tests for SIGHUP protection and stdout mirroring in ``hermes update``.
Covers ``_UpdateOutputStream``, ``_install_hangup_protection``, and
``_finalize_update_output`` in ``hermes_cli/main.py``. These exist so
that ``hermes update`` survives a terminal disconnect mid-install
(SSH drop, shell close) without leaving the venv half-installed.
"""
from __future__ import annotations
import io
import os
import signal
import sys
from pathlib import Path
from unittest.mock import patch
import pytest
from hermes_cli.main import (
_UpdateOutputStream,
_finalize_update_output,
_install_hangup_protection,
)
# -----------------------------------------------------------------------------
# _UpdateOutputStream
# -----------------------------------------------------------------------------
class TestUpdateOutputStream:
def test_write_mirrors_to_both_original_and_log(self):
original = io.StringIO()
log = io.StringIO()
stream = _UpdateOutputStream(original, log)
stream.write("hello world\n")
assert original.getvalue() == "hello world\n"
assert log.getvalue() == "hello world\n"
def test_write_continues_after_broken_original(self):
"""When the terminal disconnects, original.write raises BrokenPipeError.
The wrapper must catch it, flip the broken flag, and keep writing to
the log from then on.
"""
log = io.StringIO()
class _BrokenStream:
def write(self, data):
raise BrokenPipeError("terminal gone")
def flush(self):
raise BrokenPipeError("terminal gone")
stream = _UpdateOutputStream(_BrokenStream(), log)
# First write triggers the broken-pipe path.
stream.write("first line\n")
# Subsequent writes take the fast broken path (no exception).
stream.write("second line\n")
assert log.getvalue() == "first line\nsecond line\n"
assert stream._original_broken is True
def test_write_tolerates_oserror_and_valueerror(self):
"""OSError (EIO) and ValueError (closed file) should also be absorbed."""
log = io.StringIO()
class _RaisingStream:
def __init__(self, exc):
self._exc = exc
def write(self, data):
raise self._exc
def flush(self):
raise self._exc
for exc in (OSError("EIO"), ValueError("closed file")):
stream = _UpdateOutputStream(_RaisingStream(exc), log)
stream.write("x\n")
assert stream._original_broken is True
def test_log_failure_does_not_abort_write(self):
"""Even if the log file write raises, the original write must still happen."""
class _BrokenLog:
def write(self, data):
raise OSError("disk full")
def flush(self):
raise OSError("disk full")
original = io.StringIO()
stream = _UpdateOutputStream(original, _BrokenLog())
stream.write("data\n")
assert original.getvalue() == "data\n"
def test_flush_tolerates_broken_original(self):
class _BrokenStream:
def write(self, data):
return len(data)
def flush(self):
raise BrokenPipeError("gone")
log = io.StringIO()
stream = _UpdateOutputStream(_BrokenStream(), log)
stream.flush() # must not raise
assert stream._original_broken is True
def test_isatty_delegates_to_original(self):
class _TtyStream:
def isatty(self):
return True
def write(self, data):
return len(data)
def flush(self):
return None
stream = _UpdateOutputStream(_TtyStream(), io.StringIO())
assert stream.isatty() is True
def test_isatty_returns_false_after_broken(self):
class _BrokenStream:
def isatty(self):
return True
def write(self, data):
raise BrokenPipeError()
def flush(self):
return None
stream = _UpdateOutputStream(_BrokenStream(), io.StringIO())
stream.write("x") # marks broken
assert stream.isatty() is False
def test_getattr_delegates_unknown_attrs(self):
class _StreamWithEncoding:
encoding = "utf-8"
def write(self, data):
return len(data)
def flush(self):
return None
stream = _UpdateOutputStream(_StreamWithEncoding(), io.StringIO())
assert stream.encoding == "utf-8"
# -----------------------------------------------------------------------------
# _install_hangup_protection
# -----------------------------------------------------------------------------
class TestInstallHangupProtection:
def test_gateway_mode_is_noop(self):
"""In gateway mode the process is already detached — don't touch stdio or signals."""
prev_out, prev_err = sys.stdout, sys.stderr
prev_sighup = signal.getsignal(signal.SIGHUP) if hasattr(signal, "SIGHUP") else None
state = _install_hangup_protection(gateway_mode=True)
try:
assert sys.stdout is prev_out
assert sys.stderr is prev_err
assert state["log_file"] is None
assert state["installed"] is False
if hasattr(signal, "SIGHUP"):
assert signal.getsignal(signal.SIGHUP) == prev_sighup
finally:
_finalize_update_output(state)
@pytest.mark.skipif(
not hasattr(signal, "SIGHUP"), reason="SIGHUP not available on this platform"
)
def test_installs_sighup_ignore(self, tmp_path, monkeypatch):
"""SIGHUP should be set to SIG_IGN so SSH disconnect doesn't kill the update."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
# Clear cached get_hermes_home if present
import hermes_cli.config as _cfg
if hasattr(_cfg, "_HERMES_HOME_CACHE"):
_cfg._HERMES_HOME_CACHE = None # type: ignore[attr-defined]
original_handler = signal.getsignal(signal.SIGHUP)
state = _install_hangup_protection(gateway_mode=False)
try:
assert signal.getsignal(signal.SIGHUP) == signal.SIG_IGN
finally:
_finalize_update_output(state)
# Restore whatever was there before so we don't leak to other tests.
signal.signal(signal.SIGHUP, original_handler)
def test_wraps_stdout_and_stderr_with_mirror(self, tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
# Nuke any cached home path
import hermes_cli.config as _cfg
if hasattr(_cfg, "_HERMES_HOME_CACHE"):
_cfg._HERMES_HOME_CACHE = None # type: ignore[attr-defined]
prev_out, prev_err = sys.stdout, sys.stderr
state = _install_hangup_protection(gateway_mode=False)
try:
# On Windows (no SIGHUP) we still wrap stdio and create the log.
assert state["installed"] is True
assert isinstance(sys.stdout, _UpdateOutputStream)
assert isinstance(sys.stderr, _UpdateOutputStream)
assert state["log_file"] is not None
sys.stdout.write("checking mirror\n")
sys.stdout.flush()
log_path = tmp_path / "logs" / "update.log"
assert log_path.exists()
contents = log_path.read_text(encoding="utf-8")
assert "checking mirror" in contents
assert "hermes update started" in contents
finally:
_finalize_update_output(state)
# Sanity-check restoration
assert sys.stdout is prev_out
assert sys.stderr is prev_err
def test_logs_dir_created_if_missing(self, tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
import hermes_cli.config as _cfg
if hasattr(_cfg, "_HERMES_HOME_CACHE"):
_cfg._HERMES_HOME_CACHE = None # type: ignore[attr-defined]
# No logs/ dir yet.
assert not (tmp_path / "logs").exists()
state = _install_hangup_protection(gateway_mode=False)
try:
assert (tmp_path / "logs").is_dir()
assert (tmp_path / "logs" / "update.log").exists()
finally:
_finalize_update_output(state)
def test_non_fatal_if_log_setup_fails(self, monkeypatch):
"""If get_hermes_home() raises, stdio must be left untouched but SIGHUP still handled."""
prev_out, prev_err = sys.stdout, sys.stderr
def _boom():
raise RuntimeError("no home for you")
# Patch the import inside _install_hangup_protection.
monkeypatch.setattr(
"hermes_cli.config.get_hermes_home", _boom, raising=True
)
original_handler = (
signal.getsignal(signal.SIGHUP) if hasattr(signal, "SIGHUP") else None
)
state = _install_hangup_protection(gateway_mode=False)
try:
assert sys.stdout is prev_out
assert sys.stderr is prev_err
assert state["installed"] is False
# SIGHUP must still be installed even when log setup fails.
if hasattr(signal, "SIGHUP"):
assert signal.getsignal(signal.SIGHUP) == signal.SIG_IGN
finally:
_finalize_update_output(state)
if hasattr(signal, "SIGHUP") and original_handler is not None:
signal.signal(signal.SIGHUP, original_handler)
# -----------------------------------------------------------------------------
# _finalize_update_output
# -----------------------------------------------------------------------------
class TestFinalizeUpdateOutput:
def test_none_state_is_noop(self):
_finalize_update_output(None) # must not raise
def test_restores_streams_and_closes_log(self, tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
import hermes_cli.config as _cfg
if hasattr(_cfg, "_HERMES_HOME_CACHE"):
_cfg._HERMES_HOME_CACHE = None # type: ignore[attr-defined]
prev_out = sys.stdout
state = _install_hangup_protection(gateway_mode=False)
log_file = state["log_file"]
assert sys.stdout is not prev_out
assert log_file is not None
_finalize_update_output(state)
assert sys.stdout is prev_out
# The log file handle should be closed.
assert log_file.closed is True
def test_skipped_install_leaves_stdio_alone(self):
"""When install failed (state['installed']=False) finalize should not
touch sys.stdout / sys.stderr (they were never wrapped)."""
# Build a synthetic state that mimics a failed install.
sentinel_out = object()
state = {
"prev_stdout": sentinel_out,
"prev_stderr": sentinel_out,
"log_file": None,
"installed": False,
}
before_out, before_err = sys.stdout, sys.stderr
_finalize_update_output(state)
assert sys.stdout is before_out
assert sys.stderr is before_err

View file

@ -1122,6 +1122,7 @@ class TestStatusRemoteGateway:
assert data["gateway_running"] is True
assert data["gateway_pid"] == 999
assert data["gateway_state"] == "running"
assert data["gateway_health_url"] == "http://gw:8642"
def test_status_remote_probe_not_attempted_when_local_pid_found(self, monkeypatch):
"""When local PID check succeeds, the remote probe is never called."""
@ -1158,6 +1159,7 @@ class TestStatusRemoteGateway:
assert resp.status_code == 200
data = resp.json()
assert data["gateway_running"] is False
assert data["gateway_health_url"] is None
def test_status_remote_running_null_pid(self, monkeypatch):
"""Remote gateway running but PID not in response — pid should be None."""

View file

@ -1,17 +1,9 @@
"""Tests for Xiaomi MiMo provider support."""
import os
import sys
import types
import pytest
# Ensure dotenv doesn't interfere
if "dotenv" not in sys.modules:
fake_dotenv = types.ModuleType("dotenv")
fake_dotenv.load_dotenv = lambda *args, **kwargs: None
sys.modules["dotenv"] = fake_dotenv
from hermes_cli.auth import (
PROVIDER_REGISTRY,
resolve_provider,