Merge branch 'main' into rewbs/tool-use-charge-to-subscription

This commit is contained in:
Robin Fernandes 2026-03-31 08:48:54 +09:00
commit 6e4598ce1e
269 changed files with 33678 additions and 2273 deletions

View file

@ -105,3 +105,24 @@ class TestCmdUpdateBranchFallback:
commands = [" ".join(str(a) for a in c.args[0]) for c in mock_run.call_args_list]
pull_cmds = [c for c in commands if "pull" in c]
assert len(pull_cmds) == 0
def test_update_non_interactive_skips_migration_prompt(self, mock_args, capsys):
"""When stdin/stdout aren't TTYs, config migration prompt is skipped."""
with patch("shutil.which", return_value=None), patch(
"subprocess.run"
) as mock_run, patch("builtins.input") as mock_input, patch(
"hermes_cli.config.get_missing_env_vars", return_value=["MISSING_KEY"]
), patch("hermes_cli.config.get_missing_config_fields", return_value=[]), patch(
"hermes_cli.config.check_config_version", return_value=(1, 2)
), patch("hermes_cli.main.sys") as mock_sys:
mock_sys.stdin.isatty.return_value = False
mock_sys.stdout.isatty.return_value = False
mock_run.side_effect = _make_run_side_effect(
branch="main", verify_ok=True, commit_count="1"
)
cmd_update(mock_args)
mock_input.assert_not_called()
captured = capsys.readouterr()
assert "Non-interactive session" in captured.out

View file

@ -1,6 +1,7 @@
"""Tests for gateway service management helpers."""
import os
from pathlib import Path
from types import SimpleNamespace
import hermes_cli.gateway as gateway_cli
@ -152,12 +153,13 @@ class TestLaunchdServiceRecovery:
def test_launchd_start_reloads_unloaded_job_and_retries(self, tmp_path, monkeypatch):
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text(gateway_cli.generate_launchd_plist(), encoding="utf-8")
label = gateway_cli.get_launchd_label()
calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append(cmd)
if cmd == ["launchctl", "start", "ai.hermes.gateway"] and calls.count(cmd) == 1:
if cmd == ["launchctl", "start", label] and calls.count(cmd) == 1:
raise gateway_cli.subprocess.CalledProcessError(3, cmd, stderr="Could not find service")
return SimpleNamespace(returncode=0, stdout="", stderr="")
@ -167,9 +169,9 @@ class TestLaunchdServiceRecovery:
gateway_cli.launchd_start()
assert calls == [
["launchctl", "start", "ai.hermes.gateway"],
["launchctl", "start", label],
["launchctl", "load", str(plist_path)],
["launchctl", "start", "ai.hermes.gateway"],
["launchctl", "start", label],
]
def test_launchd_status_reports_local_stale_plist_when_unloaded(self, tmp_path, monkeypatch, capsys):
@ -354,6 +356,20 @@ class TestGeneratedUnitUsesDetectedVenv:
assert "/venv/" not in unit or "/.venv/" in unit
class TestGeneratedUnitIncludesLocalBin:
"""~/.local/bin must be in PATH so uvx/pipx tools are discoverable."""
def test_user_unit_includes_local_bin_in_path(self):
unit = gateway_cli.generate_systemd_unit(system=False)
home = str(Path.home())
assert f"{home}/.local/bin" in unit
def test_system_unit_includes_local_bin_in_path(self):
unit = gateway_cli.generate_systemd_unit(system=True)
# System unit uses the resolved home dir from _system_service_identity
assert "/.local/bin" in unit
class TestEnsureUserSystemdEnv:
"""Tests for _ensure_user_systemd_env() D-Bus session bus auto-detection."""

View file

@ -0,0 +1,44 @@
"""Tests for Nous subscription feature detection."""
from hermes_cli import nous_subscription as ns
def test_get_nous_subscription_features_recognizes_direct_exa_backend(monkeypatch):
env = {"EXA_API_KEY": "exa-test"}
monkeypatch.setattr(ns, "get_env_value", lambda name: env.get(name, ""))
monkeypatch.setattr(ns, "get_nous_auth_status", lambda: {})
monkeypatch.setattr(ns, "managed_nous_tools_enabled", lambda: False)
monkeypatch.setattr(ns, "_toolset_enabled", lambda config, key: key == "web")
monkeypatch.setattr(ns, "_has_agent_browser", lambda: False)
monkeypatch.setattr(ns, "resolve_openai_audio_api_key", lambda: "")
monkeypatch.setattr(ns, "has_direct_modal_credentials", lambda: False)
features = ns.get_nous_subscription_features({"web": {"backend": "exa"}})
assert features.web.available is True
assert features.web.active is True
assert features.web.managed_by_nous is False
assert features.web.direct_override is True
assert features.web.current_provider == "exa"
def test_get_nous_subscription_features_prefers_managed_modal_in_auto_mode(monkeypatch):
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
monkeypatch.setattr(ns, "get_env_value", lambda name: "")
monkeypatch.setattr(ns, "get_nous_auth_status", lambda: {"logged_in": True})
monkeypatch.setattr(ns, "managed_nous_tools_enabled", lambda: True)
monkeypatch.setattr(ns, "_toolset_enabled", lambda config, key: key == "terminal")
monkeypatch.setattr(ns, "_has_agent_browser", lambda: False)
monkeypatch.setattr(ns, "resolve_openai_audio_api_key", lambda: "")
monkeypatch.setattr(ns, "has_direct_modal_credentials", lambda: True)
monkeypatch.setattr(ns, "is_managed_tool_gateway_ready", lambda vendor: vendor == "modal")
features = ns.get_nous_subscription_features(
{"terminal": {"backend": "modal", "modal_mode": "auto"}}
)
assert features.modal.available is True
assert features.modal.active is True
assert features.modal.managed_by_nous is True
assert features.modal.direct_override is False

View file

@ -0,0 +1,622 @@
"""Comprehensive tests for hermes_cli.profiles module.
Tests cover: validation, directory resolution, CRUD operations, active profile
management, export/import, renaming, alias collision checks, profile isolation,
and shell completion generation.
"""
import json
import os
import tarfile
from pathlib import Path
from unittest.mock import patch, MagicMock
import pytest
from hermes_cli.profiles import (
validate_profile_name,
get_profile_dir,
create_profile,
delete_profile,
list_profiles,
set_active_profile,
get_active_profile,
get_active_profile_name,
resolve_profile_env,
check_alias_collision,
rename_profile,
export_profile,
import_profile,
generate_bash_completion,
generate_zsh_completion,
_get_profiles_root,
_get_default_hermes_home,
)
# ---------------------------------------------------------------------------
# Shared fixture: redirect Path.home() and HERMES_HOME for profile tests
# ---------------------------------------------------------------------------
@pytest.fixture()
def profile_env(tmp_path, monkeypatch):
"""Set up an isolated environment for profile tests.
* Path.home() -> tmp_path (so _get_profiles_root() = tmp_path/.hermes/profiles)
* HERMES_HOME -> tmp_path/.hermes (so get_hermes_home() agrees)
* Creates the bare-minimum ~/.hermes directory.
"""
monkeypatch.setattr(Path, "home", lambda: tmp_path)
default_home = tmp_path / ".hermes"
default_home.mkdir(exist_ok=True)
monkeypatch.setenv("HERMES_HOME", str(default_home))
return tmp_path
# ===================================================================
# TestValidateProfileName
# ===================================================================
class TestValidateProfileName:
"""Tests for validate_profile_name()."""
@pytest.mark.parametrize("name", ["coder", "work-bot", "a1", "my_agent"])
def test_valid_names_accepted(self, name):
# Should not raise
validate_profile_name(name)
@pytest.mark.parametrize("name", ["UPPER", "has space", ".hidden", "-leading"])
def test_invalid_names_rejected(self, name):
with pytest.raises(ValueError):
validate_profile_name(name)
def test_too_long_rejected(self):
long_name = "a" * 65
with pytest.raises(ValueError):
validate_profile_name(long_name)
def test_max_length_accepted(self):
# 64 chars total: 1 leading + 63 remaining = 64, within [0,63] range
name = "a" * 64
validate_profile_name(name)
def test_default_accepted(self):
# 'default' is a special-case pass-through
validate_profile_name("default")
def test_empty_string_rejected(self):
with pytest.raises(ValueError):
validate_profile_name("")
# ===================================================================
# TestGetProfileDir
# ===================================================================
class TestGetProfileDir:
"""Tests for get_profile_dir()."""
def test_default_returns_hermes_home(self, profile_env):
tmp_path = profile_env
result = get_profile_dir("default")
assert result == tmp_path / ".hermes"
def test_named_profile_returns_profiles_subdir(self, profile_env):
tmp_path = profile_env
result = get_profile_dir("coder")
assert result == tmp_path / ".hermes" / "profiles" / "coder"
# ===================================================================
# TestCreateProfile
# ===================================================================
class TestCreateProfile:
"""Tests for create_profile()."""
def test_creates_directory_with_subdirs(self, profile_env):
profile_dir = create_profile("coder", no_alias=True)
assert profile_dir.is_dir()
for subdir in ["memories", "sessions", "skills", "skins", "logs",
"plans", "workspace", "cron"]:
assert (profile_dir / subdir).is_dir(), f"Missing subdir: {subdir}"
def test_duplicate_raises_file_exists(self, profile_env):
create_profile("coder", no_alias=True)
with pytest.raises(FileExistsError):
create_profile("coder", no_alias=True)
def test_default_raises_value_error(self, profile_env):
with pytest.raises(ValueError, match="default"):
create_profile("default", no_alias=True)
def test_invalid_name_raises_value_error(self, profile_env):
with pytest.raises(ValueError):
create_profile("INVALID!", no_alias=True)
def test_clone_config_copies_files(self, profile_env):
tmp_path = profile_env
default_home = tmp_path / ".hermes"
# Create source config files in default profile
(default_home / "config.yaml").write_text("model: test")
(default_home / ".env").write_text("KEY=val")
(default_home / "SOUL.md").write_text("Be helpful.")
profile_dir = create_profile("coder", clone_config=True, no_alias=True)
assert (profile_dir / "config.yaml").read_text() == "model: test"
assert (profile_dir / ".env").read_text() == "KEY=val"
assert (profile_dir / "SOUL.md").read_text() == "Be helpful."
def test_clone_all_copies_entire_tree(self, profile_env):
tmp_path = profile_env
default_home = tmp_path / ".hermes"
# Populate default with some content
(default_home / "memories").mkdir(exist_ok=True)
(default_home / "memories" / "note.md").write_text("remember this")
(default_home / "config.yaml").write_text("model: gpt-4")
# Runtime files that should be stripped
(default_home / "gateway.pid").write_text("12345")
(default_home / "gateway_state.json").write_text("{}")
(default_home / "processes.json").write_text("[]")
profile_dir = create_profile("coder", clone_all=True, no_alias=True)
# Content should be copied
assert (profile_dir / "memories" / "note.md").read_text() == "remember this"
assert (profile_dir / "config.yaml").read_text() == "model: gpt-4"
# Runtime files should be stripped
assert not (profile_dir / "gateway.pid").exists()
assert not (profile_dir / "gateway_state.json").exists()
assert not (profile_dir / "processes.json").exists()
def test_clone_config_missing_files_skipped(self, profile_env):
"""Clone config gracefully skips files that don't exist in source."""
profile_dir = create_profile("coder", clone_config=True, no_alias=True)
# No error; optional files just not copied
assert not (profile_dir / "config.yaml").exists()
assert not (profile_dir / ".env").exists()
assert not (profile_dir / "SOUL.md").exists()
# ===================================================================
# TestDeleteProfile
# ===================================================================
class TestDeleteProfile:
"""Tests for delete_profile()."""
def test_removes_directory(self, profile_env):
profile_dir = create_profile("coder", no_alias=True)
assert profile_dir.is_dir()
# Mock gateway import to avoid real systemd/launchd interaction
with patch("hermes_cli.profiles._cleanup_gateway_service"):
delete_profile("coder", yes=True)
assert not profile_dir.is_dir()
def test_default_raises_value_error(self, profile_env):
with pytest.raises(ValueError, match="default"):
delete_profile("default", yes=True)
def test_nonexistent_raises_file_not_found(self, profile_env):
with pytest.raises(FileNotFoundError):
delete_profile("nonexistent", yes=True)
# ===================================================================
# TestListProfiles
# ===================================================================
class TestListProfiles:
"""Tests for list_profiles()."""
def test_returns_default_when_no_named_profiles(self, profile_env):
profiles = list_profiles()
names = [p.name for p in profiles]
assert "default" in names
def test_includes_named_profiles(self, profile_env):
create_profile("alpha", no_alias=True)
create_profile("beta", no_alias=True)
profiles = list_profiles()
names = [p.name for p in profiles]
assert "alpha" in names
assert "beta" in names
def test_sorted_alphabetically(self, profile_env):
create_profile("zebra", no_alias=True)
create_profile("alpha", no_alias=True)
create_profile("middle", no_alias=True)
profiles = list_profiles()
named = [p.name for p in profiles if not p.is_default]
assert named == sorted(named)
def test_default_is_first(self, profile_env):
create_profile("alpha", no_alias=True)
profiles = list_profiles()
assert profiles[0].name == "default"
assert profiles[0].is_default is True
# ===================================================================
# TestActiveProfile
# ===================================================================
class TestActiveProfile:
"""Tests for set_active_profile() / get_active_profile()."""
def test_set_and_get_roundtrip(self, profile_env):
create_profile("coder", no_alias=True)
set_active_profile("coder")
assert get_active_profile() == "coder"
def test_no_file_returns_default(self, profile_env):
assert get_active_profile() == "default"
def test_empty_file_returns_default(self, profile_env):
tmp_path = profile_env
active_path = tmp_path / ".hermes" / "active_profile"
active_path.write_text("")
assert get_active_profile() == "default"
def test_set_to_default_removes_file(self, profile_env):
tmp_path = profile_env
create_profile("coder", no_alias=True)
set_active_profile("coder")
active_path = tmp_path / ".hermes" / "active_profile"
assert active_path.exists()
set_active_profile("default")
assert not active_path.exists()
def test_set_nonexistent_raises(self, profile_env):
with pytest.raises(FileNotFoundError):
set_active_profile("nonexistent")
# ===================================================================
# TestGetActiveProfileName
# ===================================================================
class TestGetActiveProfileName:
"""Tests for get_active_profile_name()."""
def test_default_hermes_home_returns_default(self, profile_env):
# HERMES_HOME points to tmp_path/.hermes which is the default
assert get_active_profile_name() == "default"
def test_profile_path_returns_profile_name(self, profile_env, monkeypatch):
tmp_path = profile_env
create_profile("coder", no_alias=True)
profile_dir = tmp_path / ".hermes" / "profiles" / "coder"
monkeypatch.setenv("HERMES_HOME", str(profile_dir))
assert get_active_profile_name() == "coder"
def test_custom_path_returns_custom(self, profile_env, monkeypatch):
tmp_path = profile_env
custom = tmp_path / "some" / "other" / "path"
custom.mkdir(parents=True)
monkeypatch.setenv("HERMES_HOME", str(custom))
assert get_active_profile_name() == "custom"
# ===================================================================
# TestResolveProfileEnv
# ===================================================================
class TestResolveProfileEnv:
"""Tests for resolve_profile_env()."""
def test_existing_profile_returns_path(self, profile_env):
tmp_path = profile_env
create_profile("coder", no_alias=True)
result = resolve_profile_env("coder")
assert result == str(tmp_path / ".hermes" / "profiles" / "coder")
def test_default_returns_default_home(self, profile_env):
tmp_path = profile_env
result = resolve_profile_env("default")
assert result == str(tmp_path / ".hermes")
def test_nonexistent_raises_file_not_found(self, profile_env):
with pytest.raises(FileNotFoundError):
resolve_profile_env("nonexistent")
def test_invalid_name_raises_value_error(self, profile_env):
with pytest.raises(ValueError):
resolve_profile_env("INVALID!")
# ===================================================================
# TestAliasCollision
# ===================================================================
class TestAliasCollision:
"""Tests for check_alias_collision()."""
def test_normal_name_returns_none(self, profile_env):
# Mock 'which' to return not-found
with patch("subprocess.run") as mock_run:
mock_run.return_value = MagicMock(returncode=1, stdout="")
result = check_alias_collision("mybot")
assert result is None
def test_reserved_name_returns_message(self, profile_env):
result = check_alias_collision("hermes")
assert result is not None
assert "reserved" in result.lower()
def test_subcommand_returns_message(self, profile_env):
result = check_alias_collision("chat")
assert result is not None
assert "subcommand" in result.lower()
def test_default_is_reserved(self, profile_env):
result = check_alias_collision("default")
assert result is not None
assert "reserved" in result.lower()
# ===================================================================
# TestRenameProfile
# ===================================================================
class TestRenameProfile:
"""Tests for rename_profile()."""
def test_renames_directory(self, profile_env):
tmp_path = profile_env
create_profile("oldname", no_alias=True)
old_dir = tmp_path / ".hermes" / "profiles" / "oldname"
assert old_dir.is_dir()
# Mock alias collision to avoid subprocess calls
with patch("hermes_cli.profiles.check_alias_collision", return_value="skip"):
new_dir = rename_profile("oldname", "newname")
assert not old_dir.is_dir()
assert new_dir.is_dir()
assert new_dir == tmp_path / ".hermes" / "profiles" / "newname"
def test_default_raises_value_error(self, profile_env):
with pytest.raises(ValueError, match="default"):
rename_profile("default", "newname")
def test_rename_to_default_raises_value_error(self, profile_env):
create_profile("coder", no_alias=True)
with pytest.raises(ValueError, match="default"):
rename_profile("coder", "default")
def test_nonexistent_raises_file_not_found(self, profile_env):
with pytest.raises(FileNotFoundError):
rename_profile("nonexistent", "newname")
def test_target_exists_raises_file_exists(self, profile_env):
create_profile("alpha", no_alias=True)
create_profile("beta", no_alias=True)
with pytest.raises(FileExistsError):
rename_profile("alpha", "beta")
# ===================================================================
# TestExportImport
# ===================================================================
class TestExportImport:
"""Tests for export_profile() / import_profile()."""
def test_export_creates_tar_gz(self, profile_env, tmp_path):
create_profile("coder", no_alias=True)
# Put a marker file so we can verify content
profile_dir = get_profile_dir("coder")
(profile_dir / "marker.txt").write_text("hello")
output = tmp_path / "export" / "coder.tar.gz"
output.parent.mkdir(parents=True, exist_ok=True)
result = export_profile("coder", str(output))
assert Path(result).exists()
assert tarfile.is_tarfile(str(result))
def test_import_restores_from_archive(self, profile_env, tmp_path):
# Create and export a profile
create_profile("coder", no_alias=True)
profile_dir = get_profile_dir("coder")
(profile_dir / "marker.txt").write_text("hello")
archive_path = tmp_path / "export" / "coder.tar.gz"
archive_path.parent.mkdir(parents=True, exist_ok=True)
export_profile("coder", str(archive_path))
# Delete the profile, then import it back under a new name
import shutil
shutil.rmtree(profile_dir)
assert not profile_dir.is_dir()
imported = import_profile(str(archive_path), name="coder")
assert imported.is_dir()
assert (imported / "marker.txt").read_text() == "hello"
def test_import_to_existing_name_raises(self, profile_env, tmp_path):
create_profile("coder", no_alias=True)
profile_dir = get_profile_dir("coder")
archive_path = tmp_path / "export" / "coder.tar.gz"
archive_path.parent.mkdir(parents=True, exist_ok=True)
export_profile("coder", str(archive_path))
# Importing to same existing name should fail
with pytest.raises(FileExistsError):
import_profile(str(archive_path), name="coder")
def test_export_nonexistent_raises(self, profile_env, tmp_path):
with pytest.raises(FileNotFoundError):
export_profile("nonexistent", str(tmp_path / "out.tar.gz"))
# ===================================================================
# TestProfileIsolation
# ===================================================================
class TestProfileIsolation:
"""Verify that two profiles have completely separate paths."""
def test_separate_config_paths(self, profile_env):
create_profile("alpha", no_alias=True)
create_profile("beta", no_alias=True)
alpha_dir = get_profile_dir("alpha")
beta_dir = get_profile_dir("beta")
assert alpha_dir / "config.yaml" != beta_dir / "config.yaml"
assert str(alpha_dir) not in str(beta_dir)
def test_separate_state_db_paths(self, profile_env):
alpha_dir = get_profile_dir("alpha")
beta_dir = get_profile_dir("beta")
assert alpha_dir / "state.db" != beta_dir / "state.db"
def test_separate_skills_paths(self, profile_env):
create_profile("alpha", no_alias=True)
create_profile("beta", no_alias=True)
alpha_dir = get_profile_dir("alpha")
beta_dir = get_profile_dir("beta")
assert alpha_dir / "skills" != beta_dir / "skills"
# Verify both exist and are independent dirs
assert (alpha_dir / "skills").is_dir()
assert (beta_dir / "skills").is_dir()
# ===================================================================
# TestCompletion
# ===================================================================
class TestCompletion:
"""Tests for bash/zsh completion generators."""
def test_bash_completion_contains_complete(self):
script = generate_bash_completion()
assert len(script) > 0
assert "complete" in script
def test_zsh_completion_contains_compdef(self):
script = generate_zsh_completion()
assert len(script) > 0
assert "compdef" in script
def test_bash_completion_has_hermes_profiles_function(self):
script = generate_bash_completion()
assert "_hermes_profiles" in script
def test_zsh_completion_has_hermes_function(self):
script = generate_zsh_completion()
assert "_hermes" in script
# ===================================================================
# TestGetProfilesRoot / TestGetDefaultHermesHome (internal helpers)
# ===================================================================
class TestInternalHelpers:
"""Tests for _get_profiles_root() and _get_default_hermes_home()."""
def test_profiles_root_under_home(self, profile_env):
tmp_path = profile_env
root = _get_profiles_root()
assert root == tmp_path / ".hermes" / "profiles"
def test_default_hermes_home(self, profile_env):
tmp_path = profile_env
home = _get_default_hermes_home()
assert home == tmp_path / ".hermes"
# ===================================================================
# Edge cases and additional coverage
# ===================================================================
class TestEdgeCases:
"""Additional edge-case tests."""
def test_create_profile_returns_correct_path(self, profile_env):
tmp_path = profile_env
result = create_profile("mybot", no_alias=True)
expected = tmp_path / ".hermes" / "profiles" / "mybot"
assert result == expected
def test_list_profiles_default_info_fields(self, profile_env):
profiles = list_profiles()
default = [p for p in profiles if p.name == "default"][0]
assert default.is_default is True
assert default.gateway_running is False
assert default.skill_count == 0
def test_gateway_running_check_with_pid_file(self, profile_env):
"""Verify _check_gateway_running reads pid file and probes os.kill."""
from hermes_cli.profiles import _check_gateway_running
tmp_path = profile_env
default_home = tmp_path / ".hermes"
# No pid file -> not running
assert _check_gateway_running(default_home) is False
# Write a PID file with a JSON payload
pid_file = default_home / "gateway.pid"
pid_file.write_text(json.dumps({"pid": 99999}))
# os.kill(99999, 0) should raise ProcessLookupError -> not running
assert _check_gateway_running(default_home) is False
# Mock os.kill to simulate a running process
with patch("os.kill", return_value=None):
assert _check_gateway_running(default_home) is True
def test_gateway_running_check_plain_pid(self, profile_env):
"""Pid file containing just a number (legacy format)."""
from hermes_cli.profiles import _check_gateway_running
tmp_path = profile_env
default_home = tmp_path / ".hermes"
pid_file = default_home / "gateway.pid"
pid_file.write_text("99999")
with patch("os.kill", return_value=None):
assert _check_gateway_running(default_home) is True
def test_profile_name_boundary_single_char(self):
"""Single alphanumeric character is valid."""
validate_profile_name("a")
validate_profile_name("1")
def test_profile_name_boundary_all_hyphens(self):
"""Name starting with hyphen is invalid."""
with pytest.raises(ValueError):
validate_profile_name("-abc")
def test_profile_name_underscore_start(self):
"""Name starting with underscore is invalid (must start with [a-z0-9])."""
with pytest.raises(ValueError):
validate_profile_name("_abc")
def test_clone_from_named_profile(self, profile_env):
"""Clone config from a named (non-default) profile."""
tmp_path = profile_env
# Create source profile with config
source_dir = create_profile("source", no_alias=True)
(source_dir / "config.yaml").write_text("model: cloned")
(source_dir / ".env").write_text("SECRET=yes")
target_dir = create_profile(
"target", clone_from="source", clone_config=True, no_alias=True,
)
assert (target_dir / "config.yaml").read_text() == "model: cloned"
assert (target_dir / ".env").read_text() == "SECRET=yes"
def test_delete_clears_active_profile(self, profile_env):
"""Deleting the active profile resets active to default."""
tmp_path = profile_env
create_profile("coder", no_alias=True)
set_active_profile("coder")
assert get_active_profile() == "coder"
with patch("hermes_cli.profiles._cleanup_gateway_service"):
delete_profile("coder", yes=True)
assert get_active_profile() == "default"

View file

@ -94,7 +94,7 @@ class TestOfferOpenclawMigration:
fake_mod.Migrator.assert_called_once()
call_kwargs = fake_mod.Migrator.call_args[1]
assert call_kwargs["execute"] is True
assert call_kwargs["overwrite"] is False
assert call_kwargs["overwrite"] is True
assert call_kwargs["migrate_secrets"] is True
assert call_kwargs["preset_name"] == "full"
fake_migrator.migrate.assert_called_once()
@ -285,3 +285,182 @@ class TestSetupWizardOpenclawIntegration:
setup_mod.run_setup_wizard(args)
mock_migration.assert_not_called()
# ---------------------------------------------------------------------------
# _get_section_config_summary / _skip_configured_section — unit tests
# ---------------------------------------------------------------------------
class TestGetSectionConfigSummary:
"""Test the _get_section_config_summary helper."""
def test_model_returns_none_without_api_key(self):
with patch.object(setup_mod, "get_env_value", return_value=""):
result = setup_mod._get_section_config_summary({}, "model")
assert result is None
def test_model_returns_summary_with_api_key(self):
def env_side(key):
return "sk-xxx" if key == "OPENROUTER_API_KEY" else ""
with patch.object(setup_mod, "get_env_value", side_effect=env_side):
result = setup_mod._get_section_config_summary(
{"model": "openai/gpt-4"}, "model"
)
assert result == "openai/gpt-4"
def test_model_returns_dict_default_key(self):
def env_side(key):
return "sk-xxx" if key == "OPENAI_API_KEY" else ""
with patch.object(setup_mod, "get_env_value", side_effect=env_side):
result = setup_mod._get_section_config_summary(
{"model": {"default": "claude-opus-4", "provider": "anthropic"}},
"model",
)
assert result == "claude-opus-4"
def test_terminal_always_returns(self):
with patch.object(setup_mod, "get_env_value", return_value=""):
result = setup_mod._get_section_config_summary(
{"terminal": {"backend": "docker"}}, "terminal"
)
assert result == "backend: docker"
def test_agent_always_returns(self):
with patch.object(setup_mod, "get_env_value", return_value=""):
result = setup_mod._get_section_config_summary(
{"agent": {"max_turns": 120}}, "agent"
)
assert result == "max turns: 120"
def test_gateway_returns_none_without_tokens(self):
with patch.object(setup_mod, "get_env_value", return_value=""):
result = setup_mod._get_section_config_summary({}, "gateway")
assert result is None
def test_gateway_lists_platforms(self):
def env_side(key):
if key == "TELEGRAM_BOT_TOKEN":
return "tok123"
if key == "DISCORD_BOT_TOKEN":
return "disc456"
return ""
with patch.object(setup_mod, "get_env_value", side_effect=env_side):
result = setup_mod._get_section_config_summary({}, "gateway")
assert "Telegram" in result
assert "Discord" in result
def test_tools_returns_none_without_keys(self):
with patch.object(setup_mod, "get_env_value", return_value=""):
result = setup_mod._get_section_config_summary({}, "tools")
assert result is None
def test_tools_lists_configured(self):
def env_side(key):
return "key" if key == "BROWSERBASE_API_KEY" else ""
with patch.object(setup_mod, "get_env_value", side_effect=env_side):
result = setup_mod._get_section_config_summary({}, "tools")
assert "Browser" in result
class TestSkipConfiguredSection:
"""Test the _skip_configured_section helper."""
def test_returns_false_when_not_configured(self):
with patch.object(setup_mod, "get_env_value", return_value=""):
result = setup_mod._skip_configured_section({}, "model", "Model")
assert result is False
def test_returns_true_when_user_skips(self):
def env_side(key):
return "sk-xxx" if key == "OPENROUTER_API_KEY" else ""
with (
patch.object(setup_mod, "get_env_value", side_effect=env_side),
patch.object(setup_mod, "prompt_yes_no", return_value=False),
):
result = setup_mod._skip_configured_section(
{"model": "openai/gpt-4"}, "model", "Model"
)
assert result is True
def test_returns_false_when_user_wants_reconfig(self):
def env_side(key):
return "sk-xxx" if key == "OPENROUTER_API_KEY" else ""
with (
patch.object(setup_mod, "get_env_value", side_effect=env_side),
patch.object(setup_mod, "prompt_yes_no", return_value=True),
):
result = setup_mod._skip_configured_section(
{"model": "openai/gpt-4"}, "model", "Model"
)
assert result is False
class TestSetupWizardSkipsConfiguredSections:
"""After migration, already-configured sections should offer skip."""
def test_sections_skipped_when_migration_imported_settings(self, tmp_path):
"""When migration ran and API key exists, model section should be skippable.
Simulates the real flow: get_env_value returns "" during the is_existing
check (before migration), then returns a key after migration imported it.
"""
args = _first_time_args()
# Track whether migration has "run" — after it does, API key is available
migration_done = {"value": False}
def env_side(key):
if migration_done["value"] and key == "OPENROUTER_API_KEY":
return "sk-xxx"
return ""
def fake_migration(hermes_home):
migration_done["value"] = True
return True
reloaded_config = {"model": "openai/gpt-4"}
with (
patch.object(setup_mod, "ensure_hermes_home"),
patch.object(
setup_mod, "load_config",
side_effect=[{}, reloaded_config],
),
patch.object(setup_mod, "get_hermes_home", return_value=tmp_path),
patch.object(setup_mod, "get_env_value", side_effect=env_side),
patch.object(setup_mod, "is_interactive_stdin", return_value=True),
patch("hermes_cli.auth.get_active_provider", return_value=None),
patch("builtins.input", return_value=""),
# Migration succeeds and flips the env_side flag
patch.object(
setup_mod, "_offer_openclaw_migration",
side_effect=fake_migration,
),
# User says No to all reconfig prompts
patch.object(setup_mod, "prompt_yes_no", return_value=False),
patch.object(setup_mod, "setup_model_provider") as mock_model,
patch.object(setup_mod, "setup_terminal_backend") as mock_terminal,
patch.object(setup_mod, "setup_agent_settings") as mock_agent,
patch.object(setup_mod, "setup_gateway") as mock_gateway,
patch.object(setup_mod, "setup_tools") as mock_tools,
patch.object(setup_mod, "save_config"),
patch.object(setup_mod, "_print_setup_summary"),
):
setup_mod.run_setup_wizard(args)
# Model has API key → skip offered, user said No → section NOT called
mock_model.assert_not_called()
# Terminal/agent always have a summary → skip offered, user said No
mock_terminal.assert_not_called()
mock_agent.assert_not_called()
# Gateway has no tokens (env_side returns "" for gateway keys) → section runs
mock_gateway.assert_called_once()
# Tools have no keys → section runs
mock_tools.assert_called_once()

View file

@ -1,10 +1,13 @@
"""
Tests for skip_confirm behavior in /skills install and /skills uninstall.
Tests for skip_confirm and invalidate_cache behavior in /skills install
and /skills uninstall slash commands.
Verifies that --yes / -y bypasses the interactive confirmation prompt
that hangs inside prompt_toolkit's TUI.
Slash commands always skip confirmation (input() hangs in TUI).
Cache invalidation is deferred by default; --now opts into immediate
invalidation (at the cost of breaking prompt cache mid-session).
Based on PR #1595 by 333Alden333 (salvaged).
Updated for PR #3586 (cache-aware install/uninstall).
"""
from unittest.mock import patch, MagicMock
@ -32,23 +35,43 @@ class TestHandleSkillsSlashInstallFlags:
_, kwargs = mock_install.call_args
assert kwargs.get("skip_confirm") is True
def test_force_flag_sets_force_not_skip(self):
def test_force_flag_sets_force(self):
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_install") as mock_install:
handle_skills_slash("/skills install test/skill --force")
mock_install.assert_called_once()
_, kwargs = mock_install.call_args
assert kwargs.get("force") is True
assert kwargs.get("skip_confirm") is False
# Slash commands always skip confirmation (input() hangs in TUI)
assert kwargs.get("skip_confirm") is True
def test_no_flags(self):
def test_no_flags_still_skips_confirm(self):
"""Slash commands always skip confirmation — input() hangs in TUI."""
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_install") as mock_install:
handle_skills_slash("/skills install test/skill")
mock_install.assert_called_once()
_, kwargs = mock_install.call_args
assert kwargs.get("force") is False
assert kwargs.get("skip_confirm") is False
assert kwargs.get("skip_confirm") is True
def test_default_defers_cache_invalidation(self):
"""Without --now, cache invalidation is deferred to next session."""
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_install") as mock_install:
handle_skills_slash("/skills install test/skill")
mock_install.assert_called_once()
_, kwargs = mock_install.call_args
assert kwargs.get("invalidate_cache") is False
def test_now_flag_invalidates_cache(self):
"""--now opts into immediate cache invalidation."""
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_install") as mock_install:
handle_skills_slash("/skills install test/skill --now")
mock_install.assert_called_once()
_, kwargs = mock_install.call_args
assert kwargs.get("invalidate_cache") is True
class TestHandleSkillsSlashUninstallFlags:
@ -70,13 +93,32 @@ class TestHandleSkillsSlashUninstallFlags:
_, kwargs = mock_uninstall.call_args
assert kwargs.get("skip_confirm") is True
def test_no_flags(self):
def test_no_flags_still_skips_confirm(self):
"""Slash commands always skip confirmation — input() hangs in TUI."""
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_uninstall") as mock_uninstall:
handle_skills_slash("/skills uninstall test-skill")
mock_uninstall.assert_called_once()
_, kwargs = mock_uninstall.call_args
assert kwargs.get("skip_confirm", False) is False
assert kwargs.get("skip_confirm") is True
def test_default_defers_cache_invalidation(self):
"""Without --now, cache invalidation is deferred to next session."""
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_uninstall") as mock_uninstall:
handle_skills_slash("/skills uninstall test-skill")
mock_uninstall.assert_called_once()
_, kwargs = mock_uninstall.call_args
assert kwargs.get("invalidate_cache") is False
def test_now_flag_invalidates_cache(self):
"""--now opts into immediate cache invalidation."""
from hermes_cli.skills_hub import handle_skills_slash
with patch("hermes_cli.skills_hub.do_uninstall") as mock_uninstall:
handle_skills_slash("/skills uninstall test-skill --now")
mock_uninstall.assert_called_once()
_, kwargs = mock_uninstall.call_args
assert kwargs.get("invalidate_cache") is True
class TestDoInstallSkipConfirm:

View file

@ -0,0 +1,283 @@
"""Tests for tool token estimation and curses_ui status_fn support."""
from unittest.mock import patch
import pytest
# tiktoken is not in core/[all] deps — skip estimation tests when unavailable
_has_tiktoken = True
try:
import tiktoken # noqa: F401
except ImportError:
_has_tiktoken = False
_needs_tiktoken = pytest.mark.skipif(not _has_tiktoken, reason="tiktoken not installed")
# ─── Token Estimation Tests ──────────────────────────────────────────────────
@_needs_tiktoken
def test_estimate_tool_tokens_returns_positive_counts():
"""_estimate_tool_tokens should return a non-empty dict with positive values."""
from hermes_cli.tools_config import _estimate_tool_tokens, _tool_token_cache
# Clear cache to force fresh computation
import hermes_cli.tools_config as tc
tc._tool_token_cache = None
tokens = _estimate_tool_tokens()
assert isinstance(tokens, dict)
assert len(tokens) > 0
for name, count in tokens.items():
assert isinstance(name, str)
assert isinstance(count, int)
assert count > 0, f"Tool {name} has non-positive token count: {count}"
@_needs_tiktoken
def test_estimate_tool_tokens_is_cached():
"""Second call should return the same cached dict object."""
import hermes_cli.tools_config as tc
tc._tool_token_cache = None
first = tc._estimate_tool_tokens()
second = tc._estimate_tool_tokens()
assert first is second
def test_estimate_tool_tokens_returns_empty_when_tiktoken_unavailable(monkeypatch):
"""Graceful degradation when tiktoken cannot be imported."""
import hermes_cli.tools_config as tc
tc._tool_token_cache = None
import builtins
real_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "tiktoken":
raise ImportError("mocked")
return real_import(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mock_import)
result = tc._estimate_tool_tokens()
assert result == {}
# Reset cache for other tests
tc._tool_token_cache = None
@_needs_tiktoken
def test_estimate_tool_tokens_covers_known_tools():
"""Should include schemas for well-known tools like terminal, web_search."""
import hermes_cli.tools_config as tc
tc._tool_token_cache = None
tokens = tc._estimate_tool_tokens()
# These tools should always be discoverable
for expected in ("terminal", "web_search", "read_file"):
assert expected in tokens, f"Expected {expected!r} in token estimates"
# ─── Status Function Tests ───────────────────────────────────────────────────
def test_prompt_toolset_checklist_passes_status_fn(monkeypatch):
"""_prompt_toolset_checklist should pass a status_fn to curses_checklist."""
import hermes_cli.tools_config as tc
captured_kwargs = {}
def fake_checklist(title, items, selected, *, cancel_returns=None, status_fn=None):
captured_kwargs["status_fn"] = status_fn
captured_kwargs["title"] = title
return selected # Return pre-selected unchanged
monkeypatch.setattr("hermes_cli.curses_ui.curses_checklist", fake_checklist)
tc._prompt_toolset_checklist("CLI", {"web", "terminal"})
assert "status_fn" in captured_kwargs
# If tiktoken is available, status_fn should be set
tokens = tc._estimate_tool_tokens()
if tokens:
assert captured_kwargs["status_fn"] is not None
def test_status_fn_returns_formatted_token_count(monkeypatch):
"""The status_fn should return a human-readable token count string."""
import hermes_cli.tools_config as tc
from hermes_cli.tools_config import CONFIGURABLE_TOOLSETS
captured = {}
def fake_checklist(title, items, selected, *, cancel_returns=None, status_fn=None):
captured["status_fn"] = status_fn
return selected
monkeypatch.setattr("hermes_cli.curses_ui.curses_checklist", fake_checklist)
tc._prompt_toolset_checklist("CLI", {"web", "terminal"})
status_fn = captured.get("status_fn")
if status_fn is None:
pytest.skip("tiktoken unavailable; status_fn not created")
# Find the indices for web and terminal
idx_map = {ts_key: i for i, (ts_key, _, _) in enumerate(CONFIGURABLE_TOOLSETS)}
# Call status_fn with web + terminal selected
result = status_fn({idx_map["web"], idx_map["terminal"]})
assert "tokens" in result
assert "Est. tool context" in result
def test_status_fn_deduplicates_overlapping_tools(monkeypatch):
"""When toolsets overlap (browser includes web_search), tokens should not double-count."""
import hermes_cli.tools_config as tc
from hermes_cli.tools_config import CONFIGURABLE_TOOLSETS
captured = {}
def fake_checklist(title, items, selected, *, cancel_returns=None, status_fn=None):
captured["status_fn"] = status_fn
return selected
monkeypatch.setattr("hermes_cli.curses_ui.curses_checklist", fake_checklist)
tc._prompt_toolset_checklist("CLI", {"web"})
status_fn = captured.get("status_fn")
if status_fn is None:
pytest.skip("tiktoken unavailable; status_fn not created")
idx_map = {ts_key: i for i, (ts_key, _, _) in enumerate(CONFIGURABLE_TOOLSETS)}
# web alone
web_only = status_fn({idx_map["web"]})
# browser includes web_search, so browser + web should not double-count web_search
browser_only = status_fn({idx_map["browser"]})
both = status_fn({idx_map["web"], idx_map["browser"]})
# Extract numeric token counts from strings like "~8.3k tokens" or "~350 tokens"
import re
def parse_tokens(s):
m = re.search(r"~([\d.]+)k?\s+tokens", s)
if not m:
return 0
val = float(m.group(1))
if "k" in s[m.start():m.end()]:
val *= 1000
return val
web_tok = parse_tokens(web_only)
browser_tok = parse_tokens(browser_only)
both_tok = parse_tokens(both)
# Both together should be LESS than naive sum (due to web_search dedup)
naive_sum = web_tok + browser_tok
assert both_tok < naive_sum, (
f"Expected deduplication: web({web_tok}) + browser({browser_tok}) = {naive_sum} "
f"but combined = {both_tok}"
)
def test_status_fn_empty_selection():
"""Status function with no tools selected should return ~0 tokens."""
import hermes_cli.tools_config as tc
tc._tool_token_cache = None
tokens = tc._estimate_tool_tokens()
if not tokens:
pytest.skip("tiktoken unavailable")
from hermes_cli.tools_config import CONFIGURABLE_TOOLSETS
from toolsets import resolve_toolset
ts_keys = [ts_key for ts_key, _, _ in CONFIGURABLE_TOOLSETS]
def status_fn(chosen: set) -> str:
all_tools: set = set()
for idx in chosen:
all_tools.update(resolve_toolset(ts_keys[idx]))
total = sum(tokens.get(name, 0) for name in all_tools)
if total >= 1000:
return f"Est. tool context: ~{total / 1000:.1f}k tokens"
return f"Est. tool context: ~{total} tokens"
result = status_fn(set())
assert "~0 tokens" in result
# ─── Curses UI Status Bar Tests ──────────────────────────────────────────────
def test_curses_checklist_numbered_fallback_shows_status(monkeypatch, capsys):
"""The numbered fallback should print the status_fn output."""
from hermes_cli.curses_ui import _numbered_fallback
def my_status(chosen):
return f"Selected {len(chosen)} items"
# Simulate user pressing Enter immediately (empty input → confirm)
monkeypatch.setattr("builtins.input", lambda _prompt="": "")
result = _numbered_fallback(
"Test title",
["Item A", "Item B", "Item C"],
{0, 2},
{0, 2},
status_fn=my_status,
)
captured = capsys.readouterr()
assert "Selected 2 items" in captured.out
assert result == {0, 2}
def test_curses_checklist_numbered_fallback_without_status(monkeypatch, capsys):
"""The numbered fallback should work fine without status_fn."""
from hermes_cli.curses_ui import _numbered_fallback
monkeypatch.setattr("builtins.input", lambda _prompt="": "")
result = _numbered_fallback(
"Test title",
["Item A", "Item B"],
{0},
{0},
)
captured = capsys.readouterr()
assert "Est. tool context" not in captured.out
assert result == {0}
# ─── Registry get_schema Tests ───────────────────────────────────────────────
def test_registry_get_schema_returns_schema():
"""registry.get_schema() should return a tool's schema dict."""
from tools.registry import registry
# Import to trigger discovery
import model_tools # noqa: F401
schema = registry.get_schema("terminal")
assert schema is not None
assert "name" in schema
assert schema["name"] == "terminal"
assert "parameters" in schema
def test_registry_get_schema_returns_none_for_unknown():
"""registry.get_schema() should return None for unknown tools."""
from tools.registry import registry
assert registry.get_schema("nonexistent_tool_xyz") is None

View file

@ -332,3 +332,52 @@ def test_first_install_nous_auto_configures_managed_defaults(monkeypatch):
assert config["tts"]["provider"] == "openai"
assert config["browser"]["cloud_provider"] == "browserbase"
assert configured == []
# ── Platform / toolset consistency ────────────────────────────────────────────
class TestPlatformToolsetConsistency:
"""Every platform in tools_config.PLATFORMS must have a matching toolset."""
def test_all_platforms_have_toolset_definitions(self):
"""Each platform's default_toolset must exist in TOOLSETS."""
from hermes_cli.tools_config import PLATFORMS
from toolsets import TOOLSETS
for platform, meta in PLATFORMS.items():
ts_name = meta["default_toolset"]
assert ts_name in TOOLSETS, (
f"Platform {platform!r} references toolset {ts_name!r} "
f"which is not defined in toolsets.py"
)
def test_gateway_toolset_includes_all_messaging_platforms(self):
"""hermes-gateway includes list should cover all messaging platforms."""
from hermes_cli.tools_config import PLATFORMS
from toolsets import TOOLSETS
gateway_includes = set(TOOLSETS["hermes-gateway"]["includes"])
# Exclude non-messaging platforms from the check
non_messaging = {"cli", "api_server"}
for platform, meta in PLATFORMS.items():
if platform in non_messaging:
continue
ts_name = meta["default_toolset"]
assert ts_name in gateway_includes, (
f"Platform {platform!r} toolset {ts_name!r} missing from "
f"hermes-gateway includes"
)
def test_skills_config_covers_tools_config_platforms(self):
"""skills_config.PLATFORMS should have entries for all gateway platforms."""
from hermes_cli.tools_config import PLATFORMS as TOOLS_PLATFORMS
from hermes_cli.skills_config import PLATFORMS as SKILLS_PLATFORMS
non_messaging = {"api_server"}
for platform in TOOLS_PLATFORMS:
if platform in non_messaging:
continue
assert platform in SKILLS_PLATFORMS, (
f"Platform {platform!r} in tools_config but missing from "
f"skills_config PLATFORMS"
)

View file

@ -267,7 +267,8 @@ def test_restore_stashed_changes_user_declines_reset(monkeypatch, tmp_path, caps
def test_restore_stashed_changes_auto_resets_non_interactive(monkeypatch, tmp_path, capsys):
"""Non-interactive mode auto-resets without prompting."""
"""Non-interactive mode auto-resets without prompting and returns False
instead of sys.exit(1) so the update can continue (gateway /update path)."""
calls = []
def fake_run(cmd, **kwargs):
@ -282,9 +283,9 @@ def test_restore_stashed_changes_auto_resets_non_interactive(monkeypatch, tmp_pa
monkeypatch.setattr(hermes_main.subprocess, "run", fake_run)
with pytest.raises(SystemExit, match="1"):
hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=False)
result = hermes_main._restore_stashed_changes(["git"], tmp_path, "abc123", prompt_user=False)
assert result is False
out = capsys.readouterr().out
assert "Working tree reset to clean state" in out
reset_calls = [c for c, _ in calls if c[1:3] == ["reset", "--hard"]]
@ -384,3 +385,236 @@ def test_cmd_update_succeeds_with_extras(monkeypatch, tmp_path):
install_cmds = [c for c in recorded if "pip" in c and "install" in c]
assert len(install_cmds) == 1
assert ".[all]" in install_cmds[0]
# ---------------------------------------------------------------------------
# ff-only fallback to reset --hard on diverged history
# ---------------------------------------------------------------------------
def _make_update_side_effect(
current_branch="main",
commit_count="3",
ff_only_fails=False,
reset_fails=False,
fetch_fails=False,
fetch_stderr="",
):
"""Build a subprocess.run side_effect for cmd_update tests."""
recorded = []
def side_effect(cmd, **kwargs):
recorded.append(cmd)
joined = " ".join(str(c) for c in cmd)
if "fetch" in joined and "origin" in joined:
if fetch_fails:
return SimpleNamespace(stdout="", stderr=fetch_stderr, returncode=128)
return SimpleNamespace(stdout="", stderr="", returncode=0)
if "rev-parse" in joined and "--abbrev-ref" in joined:
return SimpleNamespace(stdout=f"{current_branch}\n", stderr="", returncode=0)
if "checkout" in joined and "main" in joined:
return SimpleNamespace(stdout="", stderr="", returncode=0)
if "rev-list" in joined:
return SimpleNamespace(stdout=f"{commit_count}\n", stderr="", returncode=0)
if "--ff-only" in joined:
if ff_only_fails:
return SimpleNamespace(
stdout="",
stderr="fatal: Not possible to fast-forward, aborting.\n",
returncode=128,
)
return SimpleNamespace(stdout="Updating abc..def\n", stderr="", returncode=0)
if "reset" in joined and "--hard" in joined:
if reset_fails:
return SimpleNamespace(stdout="", stderr="error: unable to write\n", returncode=1)
return SimpleNamespace(stdout="HEAD is now at abc123\n", stderr="", returncode=0)
return SimpleNamespace(returncode=0, stdout="", stderr="")
return side_effect, recorded
def test_cmd_update_falls_back_to_reset_when_ff_only_fails(monkeypatch, tmp_path, capsys):
"""When --ff-only fails (diverged history), update resets to origin/{branch}."""
_setup_update_mocks(monkeypatch, tmp_path)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/uv" if name == "uv" else None)
side_effect, recorded = _make_update_side_effect(ff_only_fails=True)
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
hermes_main.cmd_update(SimpleNamespace())
reset_calls = [c for c in recorded if "reset" in c and "--hard" in c]
assert len(reset_calls) == 1
assert reset_calls[0] == ["git", "reset", "--hard", "origin/main"]
out = capsys.readouterr().out
assert "Fast-forward not possible" in out
def test_cmd_update_no_reset_when_ff_only_succeeds(monkeypatch, tmp_path):
"""When --ff-only succeeds, no reset is attempted."""
_setup_update_mocks(monkeypatch, tmp_path)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/uv" if name == "uv" else None)
side_effect, recorded = _make_update_side_effect()
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
hermes_main.cmd_update(SimpleNamespace())
reset_calls = [c for c in recorded if "reset" in c and "--hard" in c]
assert len(reset_calls) == 0
# ---------------------------------------------------------------------------
# Non-main branch → auto-checkout main
# ---------------------------------------------------------------------------
def test_cmd_update_switches_to_main_from_feature_branch(monkeypatch, tmp_path, capsys):
"""When on a feature branch, update checks out main before pulling."""
_setup_update_mocks(monkeypatch, tmp_path)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/uv" if name == "uv" else None)
side_effect, recorded = _make_update_side_effect(current_branch="fix/something")
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
hermes_main.cmd_update(SimpleNamespace())
checkout_calls = [c for c in recorded if "checkout" in c and "main" in c]
assert len(checkout_calls) == 1
out = capsys.readouterr().out
assert "fix/something" in out
assert "switching to main" in out
def test_cmd_update_switches_to_main_from_detached_head(monkeypatch, tmp_path, capsys):
"""When in detached HEAD state, update checks out main before pulling."""
_setup_update_mocks(monkeypatch, tmp_path)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/uv" if name == "uv" else None)
side_effect, recorded = _make_update_side_effect(current_branch="HEAD")
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
hermes_main.cmd_update(SimpleNamespace())
checkout_calls = [c for c in recorded if "checkout" in c and "main" in c]
assert len(checkout_calls) == 1
out = capsys.readouterr().out
assert "detached HEAD" in out
def test_cmd_update_restores_stash_and_branch_when_already_up_to_date(monkeypatch, tmp_path, capsys):
"""When on a feature branch with no updates, stash is restored and branch switched back."""
_setup_update_mocks(monkeypatch, tmp_path)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/uv" if name == "uv" else None)
# Enable stash so it returns a ref
monkeypatch.setattr(
hermes_main, "_stash_local_changes_if_needed",
lambda *a, **kw: "abc123deadbeef",
)
restore_calls = []
monkeypatch.setattr(
hermes_main, "_restore_stashed_changes",
lambda *a, **kw: restore_calls.append(1) or True,
)
side_effect, recorded = _make_update_side_effect(
current_branch="fix/something", commit_count="0",
)
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
hermes_main.cmd_update(SimpleNamespace())
# Stash should have been restored
assert len(restore_calls) == 1
# Should have checked out back to the original branch
checkout_back = [c for c in recorded if "checkout" in c and "fix/something" in c]
assert len(checkout_back) == 1
out = capsys.readouterr().out
assert "Already up to date" in out
def test_cmd_update_no_checkout_when_already_on_main(monkeypatch, tmp_path):
"""When already on main, no checkout is needed."""
_setup_update_mocks(monkeypatch, tmp_path)
monkeypatch.setattr("shutil.which", lambda name: "/usr/bin/uv" if name == "uv" else None)
side_effect, recorded = _make_update_side_effect()
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
hermes_main.cmd_update(SimpleNamespace())
checkout_calls = [c for c in recorded if "checkout" in c]
assert len(checkout_calls) == 0
# ---------------------------------------------------------------------------
# Fetch failure — friendly error messages
# ---------------------------------------------------------------------------
def test_cmd_update_network_error_shows_friendly_message(monkeypatch, tmp_path, capsys):
"""Network failures during fetch show a user-friendly message."""
_setup_update_mocks(monkeypatch, tmp_path)
side_effect, _ = _make_update_side_effect(
fetch_fails=True,
fetch_stderr="fatal: unable to access 'https://...': Could not resolve host: github.com",
)
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
with pytest.raises(SystemExit, match="1"):
hermes_main.cmd_update(SimpleNamespace())
out = capsys.readouterr().out
assert "Network error" in out
def test_cmd_update_auth_error_shows_friendly_message(monkeypatch, tmp_path, capsys):
"""Auth failures during fetch show a user-friendly message."""
_setup_update_mocks(monkeypatch, tmp_path)
side_effect, _ = _make_update_side_effect(
fetch_fails=True,
fetch_stderr="fatal: Authentication failed for 'https://...'",
)
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
with pytest.raises(SystemExit, match="1"):
hermes_main.cmd_update(SimpleNamespace())
out = capsys.readouterr().out
assert "Authentication failed" in out
# ---------------------------------------------------------------------------
# reset --hard failure — don't attempt stash restore
# ---------------------------------------------------------------------------
def test_cmd_update_skips_stash_restore_when_reset_fails(monkeypatch, tmp_path, capsys):
"""When reset --hard fails, stash restore is skipped with a helpful message."""
_setup_update_mocks(monkeypatch, tmp_path)
# Re-enable stash so it actually returns a ref
monkeypatch.setattr(
hermes_main, "_stash_local_changes_if_needed",
lambda *a, **kw: "abc123deadbeef",
)
restore_calls = []
monkeypatch.setattr(
hermes_main, "_restore_stashed_changes",
lambda *a, **kw: restore_calls.append(1) or True,
)
side_effect, _ = _make_update_side_effect(ff_only_fails=True, reset_fails=True)
monkeypatch.setattr(hermes_main.subprocess, "run", side_effect)
with pytest.raises(SystemExit, match="1"):
hermes_main.cmd_update(SimpleNamespace())
# Stash restore should NOT have been called
assert len(restore_calls) == 0
out = capsys.readouterr().out
assert "preserved in stash" in out

View file

@ -101,6 +101,69 @@ class TestLaunchdPlistReplace:
assert replace_idx == run_idx + 1
class TestLaunchdPlistPath:
def test_plist_contains_environment_variables(self):
plist = gateway_cli.generate_launchd_plist()
assert "<key>EnvironmentVariables</key>" in plist
assert "<key>PATH</key>" in plist
assert "<key>VIRTUAL_ENV</key>" in plist
assert "<key>HERMES_HOME</key>" in plist
def test_plist_path_includes_venv_bin(self):
plist = gateway_cli.generate_launchd_plist()
detected = gateway_cli._detect_venv_dir()
venv_bin = str(detected / "bin") if detected else str(gateway_cli.PROJECT_ROOT / "venv" / "bin")
assert venv_bin in plist
def test_plist_path_starts_with_venv_bin(self):
plist = gateway_cli.generate_launchd_plist()
lines = plist.splitlines()
for i, line in enumerate(lines):
if "<key>PATH</key>" in line.strip():
path_value = lines[i + 1].strip()
path_value = path_value.replace("<string>", "").replace("</string>", "")
detected = gateway_cli._detect_venv_dir()
venv_bin = str(detected / "bin") if detected else str(gateway_cli.PROJECT_ROOT / "venv" / "bin")
assert path_value.startswith(venv_bin + ":")
break
else:
raise AssertionError("PATH key not found in plist")
def test_plist_path_includes_node_modules_bin(self):
plist = gateway_cli.generate_launchd_plist()
node_bin = str(gateway_cli.PROJECT_ROOT / "node_modules" / ".bin")
lines = plist.splitlines()
for i, line in enumerate(lines):
if "<key>PATH</key>" in line.strip():
path_value = lines[i + 1].strip()
path_value = path_value.replace("<string>", "").replace("</string>", "")
assert node_bin in path_value.split(":")
break
else:
raise AssertionError("PATH key not found in plist")
def test_plist_path_includes_current_env_path(self, monkeypatch):
monkeypatch.setenv("PATH", "/custom/bin:/usr/bin:/bin")
plist = gateway_cli.generate_launchd_plist()
assert "/custom/bin" in plist
def test_plist_path_deduplicates_venv_bin_when_already_in_path(self, monkeypatch):
detected = gateway_cli._detect_venv_dir()
venv_bin = str(detected / "bin") if detected else str(gateway_cli.PROJECT_ROOT / "venv" / "bin")
monkeypatch.setenv("PATH", f"{venv_bin}:/usr/bin:/bin")
plist = gateway_cli.generate_launchd_plist()
lines = plist.splitlines()
for i, line in enumerate(lines):
if "<key>PATH</key>" in line.strip():
path_value = lines[i + 1].strip()
path_value = path_value.replace("<string>", "").replace("</string>", "")
parts = path_value.split(":")
assert parts.count(venv_bin) == 1
break
else:
raise AssertionError("PATH key not found in plist")
# ---------------------------------------------------------------------------
# cmd_update — macOS launchd detection
# ---------------------------------------------------------------------------
@ -177,6 +240,33 @@ class TestLaunchdPlistRefresh:
assert any("unload" in s for s in cmd_strs)
assert any("start" in s for s in cmd_strs)
def test_launchd_start_recreates_missing_plist_and_loads_service(self, tmp_path, monkeypatch):
"""launchd_start self-heals when the plist file is missing entirely."""
plist_path = tmp_path / "ai.hermes.gateway.plist"
assert not plist_path.exists()
monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path)
calls = []
def fake_run(cmd, check=False, **kwargs):
calls.append(cmd)
return SimpleNamespace(returncode=0, stdout="", stderr="")
monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run)
gateway_cli.launchd_start()
# Should have created the plist
assert plist_path.exists()
assert "--replace" in plist_path.read_text()
cmd_strs = [" ".join(c) for c in calls]
# Should load the new plist, then start
assert any("load" in s for s in cmd_strs)
assert any("start" in s for s in cmd_strs)
# Should NOT call unload (nothing to unload)
assert not any("unload" in s for s in cmd_strs)
class TestCmdUpdateLaunchdRestart:
"""cmd_update correctly detects and handles launchd on macOS."""

View file

@ -0,0 +1,189 @@
"""Tests for hermes_cli/webhook.py — webhook subscription CLI."""
import json
import os
import pytest
from argparse import Namespace
from pathlib import Path
from hermes_cli.webhook import (
webhook_command,
_load_subscriptions,
_save_subscriptions,
_subscriptions_path,
_is_webhook_enabled,
)
@pytest.fixture(autouse=True)
def _isolate(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
# Default: webhooks enabled (most tests need this)
monkeypatch.setattr(
"hermes_cli.webhook._is_webhook_enabled", lambda: True
)
def _make_args(**kwargs):
defaults = {
"webhook_action": None,
"name": "",
"prompt": "",
"events": "",
"description": "",
"skills": "",
"deliver": "log",
"deliver_chat_id": "",
"secret": "",
"payload": "",
}
defaults.update(kwargs)
return Namespace(**defaults)
class TestSubscribe:
def test_basic_create(self, capsys):
webhook_command(_make_args(webhook_action="subscribe", name="test-hook"))
out = capsys.readouterr().out
assert "Created" in out
assert "/webhooks/test-hook" in out
subs = _load_subscriptions()
assert "test-hook" in subs
def test_with_options(self, capsys):
webhook_command(_make_args(
webhook_action="subscribe",
name="gh-issues",
events="issues,pull_request",
prompt="Issue: {issue.title}",
deliver="telegram",
deliver_chat_id="12345",
description="Watch GitHub",
))
subs = _load_subscriptions()
route = subs["gh-issues"]
assert route["events"] == ["issues", "pull_request"]
assert route["prompt"] == "Issue: {issue.title}"
assert route["deliver"] == "telegram"
assert route["deliver_extra"] == {"chat_id": "12345"}
def test_custom_secret(self):
webhook_command(_make_args(
webhook_action="subscribe", name="s", secret="my-secret"
))
assert _load_subscriptions()["s"]["secret"] == "my-secret"
def test_auto_secret(self):
webhook_command(_make_args(webhook_action="subscribe", name="s"))
secret = _load_subscriptions()["s"]["secret"]
assert len(secret) > 20
def test_update(self, capsys):
webhook_command(_make_args(webhook_action="subscribe", name="x", prompt="v1"))
webhook_command(_make_args(webhook_action="subscribe", name="x", prompt="v2"))
out = capsys.readouterr().out
assert "Updated" in out
assert _load_subscriptions()["x"]["prompt"] == "v2"
def test_invalid_name(self, capsys):
webhook_command(_make_args(webhook_action="subscribe", name="bad name!"))
out = capsys.readouterr().out
assert "Error" in out or "Invalid" in out
assert _load_subscriptions() == {}
class TestList:
def test_empty(self, capsys):
webhook_command(_make_args(webhook_action="list"))
out = capsys.readouterr().out
assert "No dynamic" in out
def test_with_entries(self, capsys):
webhook_command(_make_args(webhook_action="subscribe", name="a"))
webhook_command(_make_args(webhook_action="subscribe", name="b"))
capsys.readouterr() # clear
webhook_command(_make_args(webhook_action="list"))
out = capsys.readouterr().out
assert "2 webhook" in out
assert "a" in out
assert "b" in out
class TestRemove:
def test_remove_existing(self, capsys):
webhook_command(_make_args(webhook_action="subscribe", name="temp"))
webhook_command(_make_args(webhook_action="remove", name="temp"))
out = capsys.readouterr().out
assert "Removed" in out
assert _load_subscriptions() == {}
def test_remove_nonexistent(self, capsys):
webhook_command(_make_args(webhook_action="remove", name="nope"))
out = capsys.readouterr().out
assert "No subscription" in out
def test_selective_remove(self):
webhook_command(_make_args(webhook_action="subscribe", name="keep"))
webhook_command(_make_args(webhook_action="subscribe", name="drop"))
webhook_command(_make_args(webhook_action="remove", name="drop"))
subs = _load_subscriptions()
assert "keep" in subs
assert "drop" not in subs
class TestPersistence:
def test_file_written(self):
webhook_command(_make_args(webhook_action="subscribe", name="persist"))
path = _subscriptions_path()
assert path.exists()
data = json.loads(path.read_text())
assert "persist" in data
def test_corrupted_file(self):
path = _subscriptions_path()
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text("broken{{{")
assert _load_subscriptions() == {}
class TestWebhookEnabledGate:
def test_blocks_when_disabled(self, capsys, monkeypatch):
monkeypatch.setattr("hermes_cli.webhook._is_webhook_enabled", lambda: False)
webhook_command(_make_args(webhook_action="subscribe", name="blocked"))
out = capsys.readouterr().out
assert "not enabled" in out.lower()
assert "hermes gateway setup" in out
assert _load_subscriptions() == {}
def test_blocks_list_when_disabled(self, capsys, monkeypatch):
monkeypatch.setattr("hermes_cli.webhook._is_webhook_enabled", lambda: False)
webhook_command(_make_args(webhook_action="list"))
out = capsys.readouterr().out
assert "not enabled" in out.lower()
def test_allows_when_enabled(self, capsys):
# _is_webhook_enabled already patched to True by autouse fixture
webhook_command(_make_args(webhook_action="subscribe", name="allowed"))
out = capsys.readouterr().out
assert "Created" in out
assert "allowed" in _load_subscriptions()
def test_real_check_disabled(self, monkeypatch):
monkeypatch.setattr(
"hermes_cli.webhook._get_webhook_config",
lambda: {},
)
monkeypatch.setattr(
"hermes_cli.webhook._is_webhook_enabled",
lambda: bool({}.get("enabled")),
)
import hermes_cli.webhook as wh_mod
assert wh_mod._is_webhook_enabled() is False
def test_real_check_enabled(self, monkeypatch):
monkeypatch.setattr(
"hermes_cli.webhook._is_webhook_enabled",
lambda: True,
)
import hermes_cli.webhook as wh_mod
assert wh_mod._is_webhook_enabled() is True