mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
feat(honcho): context injection overhaul, 5-tool surface, cost safety, session isolation (#10619)
Salvaged from PR #9884 by erosika. Cherry-picked plugin changes onto current main with minimal core modifications. Plugin changes (plugins/memory/honcho/): - New honcho_reasoning tool (5th tool, splits LLM calls from honcho_context) - Two-layer context injection: base context (summary + representation + card) on contextCadence, dialectic supplement on dialecticCadence - Multi-pass dialectic depth (1-3 passes) with early bail-out on strong signal - Cold/warm prompt selection based on session state - dialecticCadence defaults to 3 (was 1) — ~66% fewer Honcho LLM calls - Session summary injection for conversational continuity - Bidirectional peer targeting on all 5 tools - Correctness fixes: peer param fallback, None guard on set_peer_card, schema validation, signal_sufficient anchored regex, mid->medium level fix Core changes (~20 lines across 3 files): - agent/memory_manager.py: Enhanced sanitize_context() to strip full <memory-context> blocks and system notes (prevents leak from saveMessages) - run_agent.py: gateway_session_key param for stable per-chat Honcho sessions, on_turn_start() call before prefetch_all() for cadence tracking, sanitize_context() on user messages to strip leaked memory blocks - gateway/run.py: skip_memory=True on 2 temp agents (prevents orphan sessions), gateway_session_key threading to main agent Tests: 509 passed (3 skipped — honcho SDK not installed locally) Docs: Updated honcho.md, memory-providers.md, tools-reference.md, SKILL.md Co-authored-by: erosika <erosika@users.noreply.github.com>
This commit is contained in:
parent
00ff9a26cd
commit
cc6e8941db
17 changed files with 2632 additions and 396 deletions
56
tests/honcho_plugin/test_cli.py
Normal file
56
tests/honcho_plugin/test_cli.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
"""Tests for plugins/memory/honcho/cli.py."""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
|
||||
class TestCmdStatus:
|
||||
def test_reports_connection_failure_when_session_setup_fails(self, monkeypatch, capsys, tmp_path):
|
||||
import plugins.memory.honcho.cli as honcho_cli
|
||||
|
||||
cfg_path = tmp_path / "honcho.json"
|
||||
cfg_path.write_text("{}")
|
||||
|
||||
class FakeConfig:
|
||||
enabled = True
|
||||
api_key = "root-key"
|
||||
workspace_id = "hermes"
|
||||
host = "hermes"
|
||||
base_url = None
|
||||
ai_peer = "hermes"
|
||||
peer_name = "eri"
|
||||
recall_mode = "hybrid"
|
||||
user_observe_me = True
|
||||
user_observe_others = False
|
||||
ai_observe_me = False
|
||||
ai_observe_others = True
|
||||
write_frequency = "async"
|
||||
session_strategy = "per-session"
|
||||
context_tokens = 800
|
||||
|
||||
def resolve_session_name(self):
|
||||
return "hermes"
|
||||
|
||||
monkeypatch.setattr(honcho_cli, "_read_config", lambda: {"apiKey": "***"})
|
||||
monkeypatch.setattr(honcho_cli, "_config_path", lambda: cfg_path)
|
||||
monkeypatch.setattr(honcho_cli, "_local_config_path", lambda: cfg_path)
|
||||
monkeypatch.setattr(honcho_cli, "_active_profile_name", lambda: "default")
|
||||
monkeypatch.setattr(
|
||||
"plugins.memory.honcho.client.HonchoClientConfig.from_global_config",
|
||||
lambda host=None: FakeConfig(),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"plugins.memory.honcho.client.get_honcho_client",
|
||||
lambda cfg: object(),
|
||||
)
|
||||
|
||||
def _boom(hcfg, client):
|
||||
raise RuntimeError("Invalid API key")
|
||||
|
||||
monkeypatch.setattr(honcho_cli, "_show_peer_cards", _boom)
|
||||
monkeypatch.setitem(__import__("sys").modules, "honcho", SimpleNamespace())
|
||||
|
||||
honcho_cli.cmd_status(SimpleNamespace(all=False))
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert "FAILED (Invalid API key)" in out
|
||||
assert "Connection... OK" not in out
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
"""Tests for plugins/memory/honcho/client.py — Honcho client configuration."""
|
||||
|
||||
import importlib.util
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
|
@ -25,6 +26,7 @@ class TestHonchoClientConfigDefaults:
|
|||
assert config.workspace_id == "hermes"
|
||||
assert config.api_key is None
|
||||
assert config.environment == "production"
|
||||
assert config.timeout is None
|
||||
assert config.enabled is False
|
||||
assert config.save_messages is True
|
||||
assert config.session_strategy == "per-directory"
|
||||
|
|
@ -76,6 +78,11 @@ class TestFromEnv:
|
|||
assert config.base_url == "http://localhost:8000"
|
||||
assert config.enabled is True
|
||||
|
||||
def test_reads_timeout_from_env(self):
|
||||
with patch.dict(os.environ, {"HONCHO_TIMEOUT": "90"}, clear=True):
|
||||
config = HonchoClientConfig.from_env()
|
||||
assert config.timeout == 90.0
|
||||
|
||||
|
||||
class TestFromGlobalConfig:
|
||||
def test_missing_config_falls_back_to_env(self, tmp_path):
|
||||
|
|
@ -87,10 +94,10 @@ class TestFromGlobalConfig:
|
|||
assert config.enabled is False
|
||||
assert config.api_key is None
|
||||
|
||||
def test_reads_full_config(self, tmp_path):
|
||||
def test_reads_full_config(self, tmp_path, monkeypatch):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({
|
||||
"apiKey": "my-honcho-key",
|
||||
"apiKey": "***",
|
||||
"workspace": "my-workspace",
|
||||
"environment": "staging",
|
||||
"peerName": "alice",
|
||||
|
|
@ -108,9 +115,11 @@ class TestFromGlobalConfig:
|
|||
}
|
||||
}
|
||||
}))
|
||||
# Isolate from real ~/.hermes/honcho.json
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "isolated"))
|
||||
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.api_key == "my-honcho-key"
|
||||
assert config.api_key == "***"
|
||||
# Host block workspace overrides root workspace
|
||||
assert config.workspace_id == "override-ws"
|
||||
assert config.ai_peer == "override-ai"
|
||||
|
|
@ -154,10 +163,31 @@ class TestFromGlobalConfig:
|
|||
def test_session_strategy_default_from_global_config(self, tmp_path):
|
||||
"""from_global_config with no sessionStrategy should match dataclass default."""
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "key"}))
|
||||
config_file.write_text(json.dumps({"apiKey": "***"}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.session_strategy == "per-directory"
|
||||
|
||||
def test_context_tokens_default_is_none(self, tmp_path):
|
||||
"""Default context_tokens should be None (uncapped) unless explicitly set."""
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "***"}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.context_tokens is None
|
||||
|
||||
def test_context_tokens_explicit_sets_cap(self, tmp_path):
|
||||
"""Explicit contextTokens in config sets the cap."""
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "***", "contextTokens": 1200}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.context_tokens == 1200
|
||||
|
||||
def test_context_tokens_explicit_overrides_default(self, tmp_path):
|
||||
"""Explicit contextTokens in config should override the default."""
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "***", "contextTokens": 2000}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.context_tokens == 2000
|
||||
|
||||
def test_context_tokens_host_block_wins(self, tmp_path):
|
||||
"""Host block contextTokens should override root."""
|
||||
config_file = tmp_path / "config.json"
|
||||
|
|
@ -232,6 +262,20 @@ class TestFromGlobalConfig:
|
|||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.base_url == "http://root:9000"
|
||||
|
||||
def test_timeout_from_config_root(self, tmp_path):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"timeout": 75}))
|
||||
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.timeout == 75.0
|
||||
|
||||
def test_request_timeout_alias_from_config_root(self, tmp_path):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"requestTimeout": "82.5"}))
|
||||
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.timeout == 82.5
|
||||
|
||||
|
||||
class TestResolveSessionName:
|
||||
def test_manual_override(self):
|
||||
|
|
@ -333,13 +377,14 @@ class TestResolveConfigPath:
|
|||
hermes_home.mkdir()
|
||||
local_cfg = hermes_home / "honcho.json"
|
||||
local_cfg.write_text(json.dumps({
|
||||
"apiKey": "local-key",
|
||||
"apiKey": "***",
|
||||
"workspace": "local-ws",
|
||||
}))
|
||||
|
||||
with patch.dict(os.environ, {"HERMES_HOME": str(hermes_home)}):
|
||||
with patch.dict(os.environ, {"HERMES_HOME": str(hermes_home)}), \
|
||||
patch.object(Path, "home", return_value=tmp_path):
|
||||
config = HonchoClientConfig.from_global_config()
|
||||
assert config.api_key == "local-key"
|
||||
assert config.api_key == "***"
|
||||
assert config.workspace_id == "local-ws"
|
||||
|
||||
|
||||
|
|
@ -500,46 +545,115 @@ class TestObservationModeMigration:
|
|||
assert cfg.ai_observe_others is True
|
||||
|
||||
|
||||
class TestInitOnSessionStart:
|
||||
"""Tests for the initOnSessionStart config field."""
|
||||
class TestGetHonchoClient:
|
||||
def teardown_method(self):
|
||||
reset_honcho_client()
|
||||
|
||||
def test_default_is_false(self):
|
||||
@pytest.mark.skipif(
|
||||
not importlib.util.find_spec("honcho"),
|
||||
reason="honcho SDK not installed"
|
||||
)
|
||||
def test_passes_timeout_from_config(self):
|
||||
fake_honcho = MagicMock(name="Honcho")
|
||||
cfg = HonchoClientConfig(
|
||||
api_key="test-key",
|
||||
timeout=91.0,
|
||||
workspace_id="hermes",
|
||||
environment="production",
|
||||
)
|
||||
|
||||
with patch("honcho.Honcho", return_value=fake_honcho) as mock_honcho:
|
||||
client = get_honcho_client(cfg)
|
||||
|
||||
assert client is fake_honcho
|
||||
mock_honcho.assert_called_once()
|
||||
assert mock_honcho.call_args.kwargs["timeout"] == 91.0
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not importlib.util.find_spec("honcho"),
|
||||
reason="honcho SDK not installed"
|
||||
)
|
||||
def test_hermes_config_timeout_override_used_when_config_timeout_missing(self):
|
||||
fake_honcho = MagicMock(name="Honcho")
|
||||
cfg = HonchoClientConfig(
|
||||
api_key="test-key",
|
||||
workspace_id="hermes",
|
||||
environment="production",
|
||||
)
|
||||
|
||||
with patch("honcho.Honcho", return_value=fake_honcho) as mock_honcho, \
|
||||
patch("hermes_cli.config.load_config", return_value={"honcho": {"timeout": 88}}):
|
||||
client = get_honcho_client(cfg)
|
||||
|
||||
assert client is fake_honcho
|
||||
mock_honcho.assert_called_once()
|
||||
assert mock_honcho.call_args.kwargs["timeout"] == 88.0
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not importlib.util.find_spec("honcho"),
|
||||
reason="honcho SDK not installed"
|
||||
)
|
||||
def test_hermes_request_timeout_alias_used(self):
|
||||
fake_honcho = MagicMock(name="Honcho")
|
||||
cfg = HonchoClientConfig(
|
||||
api_key="test-key",
|
||||
workspace_id="hermes",
|
||||
environment="production",
|
||||
)
|
||||
|
||||
with patch("honcho.Honcho", return_value=fake_honcho) as mock_honcho, \
|
||||
patch("hermes_cli.config.load_config", return_value={"honcho": {"request_timeout": "77.5"}}):
|
||||
client = get_honcho_client(cfg)
|
||||
|
||||
assert client is fake_honcho
|
||||
mock_honcho.assert_called_once()
|
||||
assert mock_honcho.call_args.kwargs["timeout"] == 77.5
|
||||
|
||||
|
||||
class TestResolveSessionNameGatewayKey:
|
||||
"""Regression tests for gateway_session_key priority in resolve_session_name.
|
||||
|
||||
Ensures gateway platforms get stable per-chat Honcho sessions even when
|
||||
sessionStrategy=per-session would otherwise create ephemeral sessions.
|
||||
Regression: plugin refactor 924bc67e dropped gateway key plumbing.
|
||||
"""
|
||||
|
||||
def test_gateway_key_overrides_per_session_strategy(self):
|
||||
"""gateway_session_key must win over per-session session_id."""
|
||||
config = HonchoClientConfig(session_strategy="per-session")
|
||||
result = config.resolve_session_name(
|
||||
session_id="20260412_171002_69bb38",
|
||||
gateway_session_key="agent:main:telegram:dm:8439114563",
|
||||
)
|
||||
assert result == "agent-main-telegram-dm-8439114563"
|
||||
|
||||
def test_session_title_still_wins_over_gateway_key(self):
|
||||
"""Explicit /title remap takes priority over gateway_session_key."""
|
||||
config = HonchoClientConfig(session_strategy="per-session")
|
||||
result = config.resolve_session_name(
|
||||
session_title="my-custom-title",
|
||||
session_id="20260412_171002_69bb38",
|
||||
gateway_session_key="agent:main:telegram:dm:8439114563",
|
||||
)
|
||||
assert result == "my-custom-title"
|
||||
|
||||
def test_per_session_fallback_without_gateway_key(self):
|
||||
"""Without gateway_session_key, per-session returns session_id (CLI path)."""
|
||||
config = HonchoClientConfig(session_strategy="per-session")
|
||||
result = config.resolve_session_name(
|
||||
session_id="20260412_171002_69bb38",
|
||||
gateway_session_key=None,
|
||||
)
|
||||
assert result == "20260412_171002_69bb38"
|
||||
|
||||
def test_gateway_key_sanitizes_special_chars(self):
|
||||
"""Colons and other non-alphanumeric chars are replaced with hyphens."""
|
||||
config = HonchoClientConfig()
|
||||
assert config.init_on_session_start is False
|
||||
|
||||
def test_root_level_true(self, tmp_path):
|
||||
cfg_file = tmp_path / "config.json"
|
||||
cfg_file.write_text(json.dumps({
|
||||
"apiKey": "k",
|
||||
"initOnSessionStart": True,
|
||||
}))
|
||||
cfg = HonchoClientConfig.from_global_config(config_path=cfg_file)
|
||||
assert cfg.init_on_session_start is True
|
||||
|
||||
def test_host_block_overrides_root(self, tmp_path):
|
||||
cfg_file = tmp_path / "config.json"
|
||||
cfg_file.write_text(json.dumps({
|
||||
"apiKey": "k",
|
||||
"initOnSessionStart": True,
|
||||
"hosts": {"hermes": {"initOnSessionStart": False}},
|
||||
}))
|
||||
cfg = HonchoClientConfig.from_global_config(config_path=cfg_file)
|
||||
assert cfg.init_on_session_start is False
|
||||
|
||||
def test_host_block_true_overrides_root_absent(self, tmp_path):
|
||||
cfg_file = tmp_path / "config.json"
|
||||
cfg_file.write_text(json.dumps({
|
||||
"apiKey": "k",
|
||||
"hosts": {"hermes": {"initOnSessionStart": True}},
|
||||
}))
|
||||
cfg = HonchoClientConfig.from_global_config(config_path=cfg_file)
|
||||
assert cfg.init_on_session_start is True
|
||||
|
||||
def test_absent_everywhere_defaults_false(self, tmp_path):
|
||||
cfg_file = tmp_path / "config.json"
|
||||
cfg_file.write_text(json.dumps({"apiKey": "k"}))
|
||||
cfg = HonchoClientConfig.from_global_config(config_path=cfg_file)
|
||||
assert cfg.init_on_session_start is False
|
||||
result = config.resolve_session_name(
|
||||
gateway_session_key="agent:main:telegram:dm:8439114563",
|
||||
)
|
||||
assert result == "agent-main-telegram-dm-8439114563"
|
||||
assert ":" not in result
|
||||
|
||||
|
||||
class TestResetHonchoClient:
|
||||
|
|
@ -549,3 +663,91 @@ class TestResetHonchoClient:
|
|||
assert mod._honcho_client is not None
|
||||
reset_honcho_client()
|
||||
assert mod._honcho_client is None
|
||||
|
||||
|
||||
class TestDialecticDepthParsing:
|
||||
"""Tests for _parse_dialectic_depth and _parse_dialectic_depth_levels."""
|
||||
|
||||
def test_default_depth_is_1(self, tmp_path):
|
||||
"""Default dialecticDepth should be 1."""
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "***"}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth == 1
|
||||
|
||||
def test_depth_from_root(self, tmp_path):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "***", "dialecticDepth": 2}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth == 2
|
||||
|
||||
def test_depth_host_block_wins(self, tmp_path):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({
|
||||
"apiKey": "***",
|
||||
"dialecticDepth": 1,
|
||||
"hosts": {"hermes": {"dialecticDepth": 3}},
|
||||
}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth == 3
|
||||
|
||||
def test_depth_clamped_high(self, tmp_path):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "***", "dialecticDepth": 10}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth == 3
|
||||
|
||||
def test_depth_clamped_low(self, tmp_path):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "***", "dialecticDepth": -1}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth == 1
|
||||
|
||||
def test_depth_levels_default_none(self, tmp_path):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({"apiKey": "***"}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth_levels is None
|
||||
|
||||
def test_depth_levels_from_config(self, tmp_path):
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({
|
||||
"apiKey": "***",
|
||||
"dialecticDepth": 2,
|
||||
"dialecticDepthLevels": ["minimal", "high"],
|
||||
}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth_levels == ["minimal", "high"]
|
||||
|
||||
def test_depth_levels_padded_if_short(self, tmp_path):
|
||||
"""Array shorter than depth gets padded with 'low'."""
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({
|
||||
"apiKey": "***",
|
||||
"dialecticDepth": 3,
|
||||
"dialecticDepthLevels": ["high"],
|
||||
}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth_levels == ["high", "low", "low"]
|
||||
|
||||
def test_depth_levels_truncated_if_long(self, tmp_path):
|
||||
"""Array longer than depth gets truncated."""
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({
|
||||
"apiKey": "***",
|
||||
"dialecticDepth": 1,
|
||||
"dialecticDepthLevels": ["high", "max", "medium"],
|
||||
}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth_levels == ["high"]
|
||||
|
||||
def test_depth_levels_invalid_values_default_to_low(self, tmp_path):
|
||||
"""Invalid reasoning levels in the array fall back to 'low'."""
|
||||
config_file = tmp_path / "config.json"
|
||||
config_file.write_text(json.dumps({
|
||||
"apiKey": "***",
|
||||
"dialecticDepth": 2,
|
||||
"dialecticDepthLevels": ["invalid", "high"],
|
||||
}))
|
||||
config = HonchoClientConfig.from_global_config(config_path=config_file)
|
||||
assert config.dialectic_depth_levels == ["low", "high"]
|
||||
|
|
|
|||
|
|
@ -205,27 +205,62 @@ class TestPeerLookupHelpers:
|
|||
|
||||
def test_get_peer_card_uses_direct_peer_lookup(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
user_peer = MagicMock()
|
||||
user_peer.get_card.return_value = ["Name: Robert"]
|
||||
mgr._get_or_create_peer = MagicMock(return_value=user_peer)
|
||||
assistant_peer = MagicMock()
|
||||
assistant_peer.get_card.return_value = ["Name: Robert"]
|
||||
mgr._get_or_create_peer = MagicMock(return_value=assistant_peer)
|
||||
|
||||
assert mgr.get_peer_card(session.key) == ["Name: Robert"]
|
||||
user_peer.get_card.assert_called_once_with()
|
||||
assistant_peer.get_card.assert_called_once_with(target=session.user_peer_id)
|
||||
|
||||
def test_search_context_uses_peer_context_response(self):
|
||||
def test_search_context_uses_assistant_perspective_with_target(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
user_peer = MagicMock()
|
||||
user_peer.context.return_value = SimpleNamespace(
|
||||
assistant_peer = MagicMock()
|
||||
assistant_peer.context.return_value = SimpleNamespace(
|
||||
representation="Robert runs neuralancer",
|
||||
peer_card=["Location: Melbourne"],
|
||||
)
|
||||
mgr._get_or_create_peer = MagicMock(return_value=user_peer)
|
||||
mgr._get_or_create_peer = MagicMock(return_value=assistant_peer)
|
||||
|
||||
result = mgr.search_context(session.key, "neuralancer")
|
||||
|
||||
assert "Robert runs neuralancer" in result
|
||||
assert "- Location: Melbourne" in result
|
||||
user_peer.context.assert_called_once_with(search_query="neuralancer")
|
||||
assistant_peer.context.assert_called_once_with(
|
||||
target=session.user_peer_id,
|
||||
search_query="neuralancer",
|
||||
)
|
||||
|
||||
def test_search_context_unified_mode_uses_user_self_context(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
mgr._ai_observe_others = False
|
||||
user_peer = MagicMock()
|
||||
user_peer.context.return_value = SimpleNamespace(
|
||||
representation="Unified self context",
|
||||
peer_card=["Name: Robert"],
|
||||
)
|
||||
mgr._get_or_create_peer = MagicMock(return_value=user_peer)
|
||||
|
||||
result = mgr.search_context(session.key, "self")
|
||||
|
||||
assert "Unified self context" in result
|
||||
user_peer.context.assert_called_once_with(search_query="self")
|
||||
|
||||
def test_search_context_accepts_explicit_ai_peer_id(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
ai_peer = MagicMock()
|
||||
ai_peer.context.return_value = SimpleNamespace(
|
||||
representation="Assistant self context",
|
||||
peer_card=["Role: Assistant"],
|
||||
)
|
||||
mgr._get_or_create_peer = MagicMock(return_value=ai_peer)
|
||||
|
||||
result = mgr.search_context(session.key, "assistant", peer=session.assistant_peer_id)
|
||||
|
||||
assert "Assistant self context" in result
|
||||
ai_peer.context.assert_called_once_with(
|
||||
target=session.assistant_peer_id,
|
||||
search_query="assistant",
|
||||
)
|
||||
|
||||
def test_get_prefetch_context_fetches_user_and_ai_from_peer_api(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
|
|
@ -235,9 +270,15 @@ class TestPeerLookupHelpers:
|
|||
peer_card=["Name: Robert"],
|
||||
)
|
||||
ai_peer = MagicMock()
|
||||
ai_peer.context.return_value = SimpleNamespace(
|
||||
representation="AI representation",
|
||||
peer_card=["Owner: Robert"],
|
||||
ai_peer.context.side_effect = lambda **kwargs: SimpleNamespace(
|
||||
representation=(
|
||||
"AI representation" if kwargs.get("target") == session.assistant_peer_id
|
||||
else "Mixed representation"
|
||||
),
|
||||
peer_card=(
|
||||
["Role: Assistant"] if kwargs.get("target") == session.assistant_peer_id
|
||||
else ["Name: Robert"]
|
||||
),
|
||||
)
|
||||
mgr._get_or_create_peer = MagicMock(side_effect=[user_peer, ai_peer])
|
||||
|
||||
|
|
@ -247,17 +288,23 @@ class TestPeerLookupHelpers:
|
|||
"representation": "User representation",
|
||||
"card": "Name: Robert",
|
||||
"ai_representation": "AI representation",
|
||||
"ai_card": "Owner: Robert",
|
||||
"ai_card": "Role: Assistant",
|
||||
}
|
||||
user_peer.context.assert_called_once_with()
|
||||
ai_peer.context.assert_called_once_with()
|
||||
user_peer.context.assert_called_once_with(target=session.user_peer_id)
|
||||
ai_peer.context.assert_called_once_with(target=session.assistant_peer_id)
|
||||
|
||||
def test_get_ai_representation_uses_peer_api(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
ai_peer = MagicMock()
|
||||
ai_peer.context.return_value = SimpleNamespace(
|
||||
representation="AI representation",
|
||||
peer_card=["Owner: Robert"],
|
||||
ai_peer.context.side_effect = lambda **kwargs: SimpleNamespace(
|
||||
representation=(
|
||||
"AI representation" if kwargs.get("target") == session.assistant_peer_id
|
||||
else "Mixed representation"
|
||||
),
|
||||
peer_card=(
|
||||
["Role: Assistant"] if kwargs.get("target") == session.assistant_peer_id
|
||||
else ["Name: Robert"]
|
||||
),
|
||||
)
|
||||
mgr._get_or_create_peer = MagicMock(return_value=ai_peer)
|
||||
|
||||
|
|
@ -265,9 +312,167 @@ class TestPeerLookupHelpers:
|
|||
|
||||
assert result == {
|
||||
"representation": "AI representation",
|
||||
"card": "Owner: Robert",
|
||||
"card": "Role: Assistant",
|
||||
}
|
||||
ai_peer.context.assert_called_once_with()
|
||||
ai_peer.context.assert_called_once_with(target=session.assistant_peer_id)
|
||||
|
||||
def test_create_conclusion_defaults_to_user_target(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
assistant_peer = MagicMock()
|
||||
scope = MagicMock()
|
||||
assistant_peer.conclusions_of.return_value = scope
|
||||
mgr._get_or_create_peer = MagicMock(return_value=assistant_peer)
|
||||
|
||||
ok = mgr.create_conclusion(session.key, "User prefers dark mode")
|
||||
|
||||
assert ok is True
|
||||
assistant_peer.conclusions_of.assert_called_once_with(session.user_peer_id)
|
||||
scope.create.assert_called_once_with([{
|
||||
"content": "User prefers dark mode",
|
||||
"session_id": session.honcho_session_id,
|
||||
}])
|
||||
|
||||
def test_create_conclusion_can_target_ai_peer(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
assistant_peer = MagicMock()
|
||||
scope = MagicMock()
|
||||
assistant_peer.conclusions_of.return_value = scope
|
||||
mgr._get_or_create_peer = MagicMock(return_value=assistant_peer)
|
||||
|
||||
ok = mgr.create_conclusion(session.key, "Assistant prefers terse summaries", peer="ai")
|
||||
|
||||
assert ok is True
|
||||
assistant_peer.conclusions_of.assert_called_once_with(session.assistant_peer_id)
|
||||
scope.create.assert_called_once_with([{
|
||||
"content": "Assistant prefers terse summaries",
|
||||
"session_id": session.honcho_session_id,
|
||||
}])
|
||||
|
||||
def test_create_conclusion_accepts_explicit_user_peer_id(self):
|
||||
mgr, session = self._make_cached_manager()
|
||||
assistant_peer = MagicMock()
|
||||
scope = MagicMock()
|
||||
assistant_peer.conclusions_of.return_value = scope
|
||||
mgr._get_or_create_peer = MagicMock(return_value=assistant_peer)
|
||||
|
||||
ok = mgr.create_conclusion(session.key, "Robert prefers vinyl", peer=session.user_peer_id)
|
||||
|
||||
assert ok is True
|
||||
assistant_peer.conclusions_of.assert_called_once_with(session.user_peer_id)
|
||||
scope.create.assert_called_once_with([{
|
||||
"content": "Robert prefers vinyl",
|
||||
"session_id": session.honcho_session_id,
|
||||
}])
|
||||
|
||||
|
||||
class TestConcludeToolDispatch:
|
||||
def test_honcho_conclude_defaults_to_user_peer(self):
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._session_initialized = True
|
||||
provider._session_key = "telegram:123"
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.create_conclusion.return_value = True
|
||||
|
||||
result = provider.handle_tool_call(
|
||||
"honcho_conclude",
|
||||
{"conclusion": "User prefers dark mode"},
|
||||
)
|
||||
|
||||
assert "Conclusion saved for user" in result
|
||||
provider._manager.create_conclusion.assert_called_once_with(
|
||||
"telegram:123",
|
||||
"User prefers dark mode",
|
||||
peer="user",
|
||||
)
|
||||
|
||||
def test_honcho_conclude_can_target_ai_peer(self):
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._session_initialized = True
|
||||
provider._session_key = "telegram:123"
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.create_conclusion.return_value = True
|
||||
|
||||
result = provider.handle_tool_call(
|
||||
"honcho_conclude",
|
||||
{"conclusion": "Assistant likes terse replies", "peer": "ai"},
|
||||
)
|
||||
|
||||
assert "Conclusion saved for ai" in result
|
||||
provider._manager.create_conclusion.assert_called_once_with(
|
||||
"telegram:123",
|
||||
"Assistant likes terse replies",
|
||||
peer="ai",
|
||||
)
|
||||
|
||||
def test_honcho_profile_can_target_explicit_peer_id(self):
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._session_initialized = True
|
||||
provider._session_key = "telegram:123"
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.get_peer_card.return_value = ["Role: Assistant"]
|
||||
|
||||
result = provider.handle_tool_call(
|
||||
"honcho_profile",
|
||||
{"peer": "hermes"},
|
||||
)
|
||||
|
||||
assert "Role: Assistant" in result
|
||||
provider._manager.get_peer_card.assert_called_once_with("telegram:123", peer="hermes")
|
||||
|
||||
def test_honcho_search_can_target_explicit_peer_id(self):
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._session_initialized = True
|
||||
provider._session_key = "telegram:123"
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.search_context.return_value = "Assistant self context"
|
||||
|
||||
result = provider.handle_tool_call(
|
||||
"honcho_search",
|
||||
{"query": "assistant", "peer": "hermes"},
|
||||
)
|
||||
|
||||
assert "Assistant self context" in result
|
||||
provider._manager.search_context.assert_called_once_with(
|
||||
"telegram:123",
|
||||
"assistant",
|
||||
max_tokens=800,
|
||||
peer="hermes",
|
||||
)
|
||||
|
||||
def test_honcho_reasoning_can_target_explicit_peer_id(self):
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._session_initialized = True
|
||||
provider._session_key = "telegram:123"
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.dialectic_query.return_value = "Assistant answer"
|
||||
|
||||
result = provider.handle_tool_call(
|
||||
"honcho_reasoning",
|
||||
{"query": "who are you", "peer": "hermes"},
|
||||
)
|
||||
|
||||
assert "Assistant answer" in result
|
||||
provider._manager.dialectic_query.assert_called_once_with(
|
||||
"telegram:123",
|
||||
"who are you",
|
||||
reasoning_level=None,
|
||||
peer="hermes",
|
||||
)
|
||||
|
||||
def test_honcho_conclude_missing_both_params_returns_error(self):
|
||||
"""Calling honcho_conclude with neither conclusion nor delete_id returns a tool error."""
|
||||
import json
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._session_initialized = True
|
||||
provider._session_key = "telegram:123"
|
||||
provider._manager = MagicMock()
|
||||
|
||||
result = provider.handle_tool_call("honcho_conclude", {})
|
||||
|
||||
parsed = json.loads(result)
|
||||
assert "error" in parsed or "Missing required" in parsed.get("result", "")
|
||||
provider._manager.create_conclusion.assert_not_called()
|
||||
provider._manager.delete_conclusion.assert_not_called()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
|
@ -366,6 +571,54 @@ class TestToolsModeInitBehavior:
|
|||
assert cfg.peer_name == "8439114563"
|
||||
|
||||
|
||||
class TestPerSessionMigrateGuard:
|
||||
"""Verify migrate_memory_files is skipped under per-session strategy.
|
||||
|
||||
per-session creates a fresh Honcho session every Hermes run. Uploading
|
||||
MEMORY.md/USER.md/SOUL.md to each short-lived session floods the backend
|
||||
with duplicate content. The guard was added to prevent orphan sessions
|
||||
containing only <prior_memory_file> wrappers.
|
||||
"""
|
||||
|
||||
def _make_provider_with_strategy(self, strategy, init_on_session_start=True):
|
||||
"""Create a HonchoMemoryProvider and track migrate_memory_files calls."""
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
cfg = HonchoClientConfig(
|
||||
api_key="test-key",
|
||||
enabled=True,
|
||||
recall_mode="tools",
|
||||
init_on_session_start=init_on_session_start,
|
||||
session_strategy=strategy,
|
||||
)
|
||||
|
||||
provider = HonchoMemoryProvider()
|
||||
|
||||
mock_manager = MagicMock()
|
||||
mock_session = MagicMock()
|
||||
mock_session.messages = [] # empty = new session → triggers migration path
|
||||
mock_manager.get_or_create.return_value = mock_session
|
||||
|
||||
with patch("plugins.memory.honcho.client.HonchoClientConfig.from_global_config", return_value=cfg), \
|
||||
patch("plugins.memory.honcho.client.get_honcho_client", return_value=MagicMock()), \
|
||||
patch("plugins.memory.honcho.session.HonchoSessionManager", return_value=mock_manager), \
|
||||
patch("hermes_constants.get_hermes_home", return_value=MagicMock()):
|
||||
provider.initialize(session_id="test-session-001")
|
||||
|
||||
return provider, mock_manager
|
||||
|
||||
def test_migrate_skipped_for_per_session(self):
|
||||
"""per-session strategy must NOT call migrate_memory_files."""
|
||||
_, mock_manager = self._make_provider_with_strategy("per-session")
|
||||
mock_manager.migrate_memory_files.assert_not_called()
|
||||
|
||||
def test_migrate_runs_for_per_directory(self):
|
||||
"""per-directory strategy with empty session SHOULD call migrate_memory_files."""
|
||||
_, mock_manager = self._make_provider_with_strategy("per-directory")
|
||||
mock_manager.migrate_memory_files.assert_called_once()
|
||||
|
||||
|
||||
class TestChunkMessage:
|
||||
def test_short_message_single_chunk(self):
|
||||
result = HonchoMemoryProvider._chunk_message("hello world", 100)
|
||||
|
|
@ -420,6 +673,60 @@ class TestChunkMessage:
|
|||
assert len(chunk) <= 25000
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Context token budget enforcement
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestTruncateToBudget:
|
||||
def test_truncates_oversized_context(self):
|
||||
"""Text exceeding context_tokens budget is truncated at a word boundary."""
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._config = HonchoClientConfig(context_tokens=10)
|
||||
|
||||
long_text = "word " * 200 # ~1000 chars, well over 10*4=40 char budget
|
||||
result = provider._truncate_to_budget(long_text)
|
||||
|
||||
assert len(result) <= 50 # budget_chars + ellipsis + word boundary slack
|
||||
assert result.endswith(" …")
|
||||
|
||||
def test_no_truncation_within_budget(self):
|
||||
"""Text within budget passes through unchanged."""
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._config = HonchoClientConfig(context_tokens=1000)
|
||||
|
||||
short_text = "Name: Robert, Location: Melbourne"
|
||||
assert provider._truncate_to_budget(short_text) == short_text
|
||||
|
||||
def test_no_truncation_when_context_tokens_none(self):
|
||||
"""When context_tokens is None (explicit opt-out), no truncation."""
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._config = HonchoClientConfig(context_tokens=None)
|
||||
|
||||
long_text = "word " * 500
|
||||
assert provider._truncate_to_budget(long_text) == long_text
|
||||
|
||||
def test_context_tokens_cap_bounds_prefetch(self):
|
||||
"""With an explicit token budget, oversized prefetch is bounded."""
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
|
||||
provider = HonchoMemoryProvider()
|
||||
provider._config = HonchoClientConfig(context_tokens=1200)
|
||||
|
||||
# Simulate a massive representation (10k chars)
|
||||
huge_text = "x" * 10000
|
||||
result = provider._truncate_to_budget(huge_text)
|
||||
|
||||
# 1200 tokens * 4 chars = 4800 chars + " …"
|
||||
assert len(result) <= 4805
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Dialectic input guard
|
||||
# ---------------------------------------------------------------------------
|
||||
|
|
@ -452,3 +759,387 @@ class TestDialecticInputGuard:
|
|||
# The query passed to chat() should be truncated
|
||||
actual_query = mock_peer.chat.call_args[0][0]
|
||||
assert len(actual_query) <= 100
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDialecticCadenceDefaults:
|
||||
"""Regression tests for dialectic_cadence default value."""
|
||||
|
||||
@staticmethod
|
||||
def _make_provider(cfg_extra=None):
|
||||
"""Create a HonchoMemoryProvider with mocked dependencies."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
|
||||
defaults = dict(api_key="test-key", enabled=True, recall_mode="hybrid")
|
||||
if cfg_extra:
|
||||
defaults.update(cfg_extra)
|
||||
cfg = HonchoClientConfig(**defaults)
|
||||
provider = HonchoMemoryProvider()
|
||||
mock_manager = MagicMock()
|
||||
mock_session = MagicMock()
|
||||
mock_session.messages = []
|
||||
mock_manager.get_or_create.return_value = mock_session
|
||||
|
||||
with patch("plugins.memory.honcho.client.HonchoClientConfig.from_global_config", return_value=cfg), \
|
||||
patch("plugins.memory.honcho.client.get_honcho_client", return_value=MagicMock()), \
|
||||
patch("plugins.memory.honcho.session.HonchoSessionManager", return_value=mock_manager), \
|
||||
patch("hermes_constants.get_hermes_home", return_value=MagicMock()):
|
||||
provider.initialize(session_id="test-session-001")
|
||||
|
||||
return provider
|
||||
|
||||
def test_default_is_3(self):
|
||||
"""Default dialectic_cadence should be 3 to avoid per-turn LLM calls."""
|
||||
provider = self._make_provider()
|
||||
assert provider._dialectic_cadence == 3
|
||||
|
||||
def test_config_override(self):
|
||||
"""dialecticCadence from config overrides the default."""
|
||||
provider = self._make_provider(cfg_extra={"raw": {"dialecticCadence": 5}})
|
||||
assert provider._dialectic_cadence == 5
|
||||
|
||||
|
||||
class TestBaseContextSummary:
|
||||
"""Base context injection should include session summary when available."""
|
||||
|
||||
def test_format_includes_summary(self):
|
||||
"""Session summary should appear first in the formatted context."""
|
||||
provider = HonchoMemoryProvider()
|
||||
ctx = {
|
||||
"summary": "Testing Honcho tools and dialectic depth.",
|
||||
"representation": "Eri is a developer.",
|
||||
"card": "Name: Eri Barrett",
|
||||
}
|
||||
formatted = provider._format_first_turn_context(ctx)
|
||||
assert "## Session Summary" in formatted
|
||||
assert formatted.index("Session Summary") < formatted.index("User Representation")
|
||||
|
||||
def test_format_without_summary(self):
|
||||
"""No summary key means no summary section."""
|
||||
provider = HonchoMemoryProvider()
|
||||
ctx = {"representation": "Eri is a developer.", "card": "Name: Eri"}
|
||||
formatted = provider._format_first_turn_context(ctx)
|
||||
assert "Session Summary" not in formatted
|
||||
assert "User Representation" in formatted
|
||||
|
||||
def test_format_empty_summary_skipped(self):
|
||||
"""Empty summary string should not produce a section."""
|
||||
provider = HonchoMemoryProvider()
|
||||
ctx = {"summary": "", "representation": "rep", "card": "card"}
|
||||
formatted = provider._format_first_turn_context(ctx)
|
||||
assert "Session Summary" not in formatted
|
||||
|
||||
|
||||
class TestDialecticDepth:
|
||||
"""Tests for the dialecticDepth multi-pass system."""
|
||||
|
||||
@staticmethod
|
||||
def _make_provider(cfg_extra=None):
|
||||
from unittest.mock import patch, MagicMock
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
|
||||
defaults = dict(api_key="test-key", enabled=True, recall_mode="hybrid")
|
||||
if cfg_extra:
|
||||
defaults.update(cfg_extra)
|
||||
cfg = HonchoClientConfig(**defaults)
|
||||
provider = HonchoMemoryProvider()
|
||||
mock_manager = MagicMock()
|
||||
mock_session = MagicMock()
|
||||
mock_session.messages = []
|
||||
mock_manager.get_or_create.return_value = mock_session
|
||||
|
||||
with patch("plugins.memory.honcho.client.HonchoClientConfig.from_global_config", return_value=cfg), \
|
||||
patch("plugins.memory.honcho.client.get_honcho_client", return_value=MagicMock()), \
|
||||
patch("plugins.memory.honcho.session.HonchoSessionManager", return_value=mock_manager), \
|
||||
patch("hermes_constants.get_hermes_home", return_value=MagicMock()):
|
||||
provider.initialize(session_id="test-session-001")
|
||||
|
||||
return provider
|
||||
|
||||
def test_default_depth_is_1(self):
|
||||
"""Default dialecticDepth should be 1 — single .chat() call."""
|
||||
provider = self._make_provider()
|
||||
assert provider._dialectic_depth == 1
|
||||
|
||||
def test_depth_from_config(self):
|
||||
"""dialecticDepth from config sets the depth."""
|
||||
provider = self._make_provider(cfg_extra={"dialectic_depth": 2})
|
||||
assert provider._dialectic_depth == 2
|
||||
|
||||
def test_depth_clamped_to_3(self):
|
||||
"""dialecticDepth > 3 gets clamped to 3."""
|
||||
provider = self._make_provider(cfg_extra={"dialectic_depth": 7})
|
||||
assert provider._dialectic_depth == 3
|
||||
|
||||
def test_depth_clamped_to_1(self):
|
||||
"""dialecticDepth < 1 gets clamped to 1."""
|
||||
provider = self._make_provider(cfg_extra={"dialectic_depth": 0})
|
||||
assert provider._dialectic_depth == 1
|
||||
|
||||
def test_depth_levels_from_config(self):
|
||||
"""dialecticDepthLevels array is read from config."""
|
||||
provider = self._make_provider(cfg_extra={
|
||||
"dialectic_depth": 2,
|
||||
"dialectic_depth_levels": ["minimal", "high"],
|
||||
})
|
||||
assert provider._dialectic_depth_levels == ["minimal", "high"]
|
||||
|
||||
def test_depth_levels_none_by_default(self):
|
||||
"""When dialecticDepthLevels is not configured, it's None."""
|
||||
provider = self._make_provider()
|
||||
assert provider._dialectic_depth_levels is None
|
||||
|
||||
def test_resolve_pass_level_uses_depth_levels(self):
|
||||
"""Per-pass levels from dialecticDepthLevels override proportional."""
|
||||
provider = self._make_provider(cfg_extra={
|
||||
"dialectic_depth": 2,
|
||||
"dialectic_depth_levels": ["minimal", "high"],
|
||||
})
|
||||
assert provider._resolve_pass_level(0) == "minimal"
|
||||
assert provider._resolve_pass_level(1) == "high"
|
||||
|
||||
def test_resolve_pass_level_proportional_depth_1(self):
|
||||
"""Depth 1 pass 0 uses the base reasoning level."""
|
||||
provider = self._make_provider(cfg_extra={
|
||||
"dialectic_depth": 1,
|
||||
"dialectic_reasoning_level": "medium",
|
||||
})
|
||||
assert provider._resolve_pass_level(0) == "medium"
|
||||
|
||||
def test_resolve_pass_level_proportional_depth_2(self):
|
||||
"""Depth 2: pass 0 is minimal, pass 1 is base level."""
|
||||
provider = self._make_provider(cfg_extra={
|
||||
"dialectic_depth": 2,
|
||||
"dialectic_reasoning_level": "high",
|
||||
})
|
||||
assert provider._resolve_pass_level(0) == "minimal"
|
||||
assert provider._resolve_pass_level(1) == "high"
|
||||
|
||||
def test_cold_start_prompt(self):
|
||||
"""Cold start (no base context) uses general user query."""
|
||||
provider = self._make_provider()
|
||||
prompt = provider._build_dialectic_prompt(0, [], is_cold=True)
|
||||
assert "preferences" in prompt.lower()
|
||||
assert "session" not in prompt.lower()
|
||||
|
||||
def test_warm_session_prompt(self):
|
||||
"""Warm session (has context) uses session-scoped query."""
|
||||
provider = self._make_provider()
|
||||
prompt = provider._build_dialectic_prompt(0, [], is_cold=False)
|
||||
assert "session" in prompt.lower()
|
||||
assert "current conversation" in prompt.lower()
|
||||
|
||||
def test_signal_sufficient_short_response(self):
|
||||
"""Short responses are not sufficient signal."""
|
||||
assert not HonchoMemoryProvider._signal_sufficient("ok")
|
||||
assert not HonchoMemoryProvider._signal_sufficient("")
|
||||
assert not HonchoMemoryProvider._signal_sufficient(None)
|
||||
|
||||
def test_signal_sufficient_structured_response(self):
|
||||
"""Structured responses with bullets/headers are sufficient."""
|
||||
result = "## Current State\n- Working on Honcho PR\n- Testing dialectic depth\n" + "x" * 50
|
||||
assert HonchoMemoryProvider._signal_sufficient(result)
|
||||
|
||||
def test_signal_sufficient_long_unstructured(self):
|
||||
"""Long responses are sufficient even without structure."""
|
||||
assert HonchoMemoryProvider._signal_sufficient("a" * 301)
|
||||
|
||||
def test_run_dialectic_depth_single_pass(self):
|
||||
"""Depth 1 makes exactly one .chat() call."""
|
||||
from unittest.mock import MagicMock
|
||||
provider = self._make_provider(cfg_extra={"dialectic_depth": 1})
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.dialectic_query.return_value = "user prefers zero-fluff"
|
||||
provider._session_key = "test"
|
||||
provider._base_context_cache = None # cold start
|
||||
|
||||
result = provider._run_dialectic_depth("hello")
|
||||
assert result == "user prefers zero-fluff"
|
||||
assert provider._manager.dialectic_query.call_count == 1
|
||||
|
||||
def test_run_dialectic_depth_two_passes(self):
|
||||
"""Depth 2 makes two .chat() calls when pass 1 signal is weak."""
|
||||
from unittest.mock import MagicMock
|
||||
provider = self._make_provider(cfg_extra={"dialectic_depth": 2})
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.dialectic_query.side_effect = [
|
||||
"thin response", # pass 0: weak signal
|
||||
"## Synthesis\n- Grounded in evidence\n- Current PR work\n" + "x" * 100, # pass 1: strong
|
||||
]
|
||||
provider._session_key = "test"
|
||||
provider._base_context_cache = "existing context"
|
||||
|
||||
result = provider._run_dialectic_depth("test query")
|
||||
assert provider._manager.dialectic_query.call_count == 2
|
||||
assert "Synthesis" in result
|
||||
|
||||
def test_first_turn_runs_dialectic_synchronously(self):
|
||||
"""First turn should fire the dialectic synchronously (cold start)."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
provider = self._make_provider(cfg_extra={"dialectic_depth": 1})
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.dialectic_query.return_value = "cold start synthesis"
|
||||
provider._manager.get_prefetch_context.return_value = None
|
||||
provider._manager.pop_context_result.return_value = None
|
||||
provider._session_key = "test"
|
||||
provider._base_context_cache = "" # cold start
|
||||
provider._last_dialectic_turn = -999 # never fired
|
||||
|
||||
result = provider.prefetch("hello world")
|
||||
assert "cold start synthesis" in result
|
||||
assert provider._manager.dialectic_query.call_count == 1
|
||||
# After first-turn sync, _last_dialectic_turn should be updated
|
||||
assert provider._last_dialectic_turn != -999
|
||||
|
||||
def test_first_turn_dialectic_does_not_double_fire(self):
|
||||
"""After first-turn sync dialectic, queue_prefetch should skip (cadence)."""
|
||||
from unittest.mock import MagicMock
|
||||
provider = self._make_provider(cfg_extra={"dialectic_depth": 1})
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.dialectic_query.return_value = "cold start synthesis"
|
||||
provider._manager.get_prefetch_context.return_value = None
|
||||
provider._manager.pop_context_result.return_value = None
|
||||
provider._session_key = "test"
|
||||
provider._base_context_cache = ""
|
||||
provider._last_dialectic_turn = -999
|
||||
provider._turn_count = 0
|
||||
|
||||
# First turn fires sync dialectic
|
||||
provider.prefetch("hello")
|
||||
assert provider._manager.dialectic_query.call_count == 1
|
||||
|
||||
# Now queue_prefetch on same turn should skip (cadence: 0 - 0 < 3)
|
||||
provider._manager.dialectic_query.reset_mock()
|
||||
provider.queue_prefetch("hello")
|
||||
assert provider._manager.dialectic_query.call_count == 0
|
||||
|
||||
def test_run_dialectic_depth_bails_early_on_strong_signal(self):
|
||||
"""Depth 2 skips pass 1 when pass 0 returns strong signal."""
|
||||
from unittest.mock import MagicMock
|
||||
provider = self._make_provider(cfg_extra={"dialectic_depth": 2})
|
||||
provider._manager = MagicMock()
|
||||
provider._manager.dialectic_query.return_value = (
|
||||
"## Full Assessment\n- Strong structured response\n- With evidence\n" + "x" * 200
|
||||
)
|
||||
provider._session_key = "test"
|
||||
provider._base_context_cache = "existing context"
|
||||
|
||||
result = provider._run_dialectic_depth("test query")
|
||||
# Only 1 call because pass 0 had sufficient signal
|
||||
assert provider._manager.dialectic_query.call_count == 1
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# set_peer_card None guard
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSetPeerCardNoneGuard:
|
||||
"""set_peer_card must return None (not raise) when peer ID cannot be resolved."""
|
||||
|
||||
def _make_manager(self):
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
from plugins.memory.honcho.session import HonchoSessionManager
|
||||
|
||||
cfg = HonchoClientConfig(api_key="test-key", enabled=True)
|
||||
mgr = HonchoSessionManager.__new__(HonchoSessionManager)
|
||||
mgr._cache = {}
|
||||
mgr._sessions_cache = {}
|
||||
mgr._config = cfg
|
||||
return mgr
|
||||
|
||||
def test_returns_none_when_peer_resolves_to_none(self):
|
||||
"""set_peer_card returns None when _resolve_peer_id returns None."""
|
||||
from unittest.mock import patch
|
||||
mgr = self._make_manager()
|
||||
|
||||
session = HonchoSession(
|
||||
key="test",
|
||||
honcho_session_id="sid",
|
||||
user_peer_id="user-peer",
|
||||
assistant_peer_id="ai-peer",
|
||||
)
|
||||
mgr._cache["test"] = session
|
||||
|
||||
with patch.object(mgr, "_resolve_peer_id", return_value=None):
|
||||
result = mgr.set_peer_card("test", ["fact 1", "fact 2"], peer="ghost")
|
||||
|
||||
assert result is None
|
||||
|
||||
def test_returns_none_when_session_missing(self):
|
||||
"""set_peer_card returns None when session key is not in cache."""
|
||||
mgr = self._make_manager()
|
||||
result = mgr.set_peer_card("nonexistent", ["fact"], peer="user")
|
||||
assert result is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_session_context cache-miss fallback respects peer param
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetSessionContextFallback:
|
||||
"""get_session_context fallback must honour the peer param when honcho_session is absent."""
|
||||
|
||||
def _make_manager_with_session(self, user_peer_id="user-peer", assistant_peer_id="ai-peer"):
|
||||
from plugins.memory.honcho.client import HonchoClientConfig
|
||||
from plugins.memory.honcho.session import HonchoSessionManager
|
||||
|
||||
cfg = HonchoClientConfig(api_key="test-key", enabled=True)
|
||||
mgr = HonchoSessionManager.__new__(HonchoSessionManager)
|
||||
mgr._cache = {}
|
||||
mgr._sessions_cache = {}
|
||||
mgr._config = cfg
|
||||
mgr._dialectic_dynamic = True
|
||||
mgr._dialectic_reasoning_level = "low"
|
||||
mgr._dialectic_max_input_chars = 10000
|
||||
mgr._ai_observe_others = True
|
||||
|
||||
session = HonchoSession(
|
||||
key="test",
|
||||
honcho_session_id="sid-missing-from-sessions-cache",
|
||||
user_peer_id=user_peer_id,
|
||||
assistant_peer_id=assistant_peer_id,
|
||||
)
|
||||
mgr._cache["test"] = session
|
||||
# Deliberately NOT adding to _sessions_cache to trigger fallback path
|
||||
return mgr
|
||||
|
||||
def test_fallback_uses_user_peer_for_user(self):
|
||||
"""On cache miss, peer='user' fetches user peer context."""
|
||||
mgr = self._make_manager_with_session()
|
||||
fetch_calls = []
|
||||
|
||||
def _fake_fetch(peer_id, search_query=None, *, target=None):
|
||||
fetch_calls.append((peer_id, target))
|
||||
return {"representation": "user rep", "card": []}
|
||||
|
||||
mgr._fetch_peer_context = _fake_fetch
|
||||
|
||||
mgr.get_session_context("test", peer="user")
|
||||
|
||||
assert len(fetch_calls) == 1
|
||||
peer_id, target = fetch_calls[0]
|
||||
assert peer_id == "user-peer"
|
||||
assert target == "user-peer"
|
||||
|
||||
def test_fallback_uses_ai_peer_for_ai(self):
|
||||
"""On cache miss, peer='ai' fetches assistant peer context, not user."""
|
||||
mgr = self._make_manager_with_session()
|
||||
fetch_calls = []
|
||||
|
||||
def _fake_fetch(peer_id, search_query=None, *, target=None):
|
||||
fetch_calls.append((peer_id, target))
|
||||
return {"representation": "ai rep", "card": []}
|
||||
|
||||
mgr._fetch_peer_context = _fake_fetch
|
||||
|
||||
mgr.get_session_context("test", peer="ai")
|
||||
|
||||
assert len(fetch_calls) == 1
|
||||
peer_id, target = fetch_calls[0]
|
||||
assert peer_id == "ai-peer", f"expected ai-peer, got {peer_id}"
|
||||
assert target == "ai-peer"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue