fix(honcho): plugin drift overhaul -- observation config, chunking, setup wizard, docs, dead code cleanup

Salvaged from PR #5045 by erosika.

- Replace memoryMode/peer_memory_modes with granular per-peer observation config
- Add message chunking for Honcho API limits (25k chars default)
- Add dialectic input guard (10k chars default)
- Add dialecticDynamic toggle for reasoning level auto-bump
- Rewrite setup wizard with cloud/local deployment picker
- Switch peer card/profile/search from session.context() to direct peer APIs
- Add server-side observation sync via get_peer_configuration()
- Fix base_url/baseUrl config mismatch for self-hosted setups
- Fix local auth leak (cloud API keys no longer sent to local instances)
- Remove dead code: memoryMode, peer_memory_modes, linkedHosts, suppress flags, SOUL.md aiPeer sync
- Add post_setup hook to memory_setup.py for provider-specific setup wizards
- Comprehensive README rewrite with full config reference
- New optional skill: autonomous-ai-agents/honcho
- Expanded memory-providers.md with multi-profile docs
- 9 new tests (chunking, dialectic guard, peer lookups), 14 dead tests removed
- Fix 2 pre-existing TestResolveConfigPath filesystem isolation failures
This commit is contained in:
erosika 2026-04-05 12:03:15 -07:00 committed by Teknium
parent 12724e6295
commit c02c3dc723
12 changed files with 1265 additions and 443 deletions

View file

@ -1,12 +1,14 @@
"""Tests for plugins/memory/honcho/session.py — HonchoSession and helpers."""
from datetime import datetime
from types import SimpleNamespace
from unittest.mock import MagicMock
from plugins.memory.honcho.session import (
HonchoSession,
HonchoSessionManager,
)
from plugins.memory.honcho import HonchoMemoryProvider
# ---------------------------------------------------------------------------
@ -187,3 +189,175 @@ class TestManagerCacheOps:
assert keys == {"k1", "k2"}
s1_info = next(s for s in sessions if s["key"] == "k1")
assert s1_info["message_count"] == 1
class TestPeerLookupHelpers:
def _make_cached_manager(self):
mgr = HonchoSessionManager()
session = HonchoSession(
key="telegram:123",
user_peer_id="robert",
assistant_peer_id="hermes",
honcho_session_id="telegram-123",
)
mgr._cache[session.key] = session
return mgr, session
def test_get_peer_card_uses_direct_peer_lookup(self):
mgr, session = self._make_cached_manager()
user_peer = MagicMock()
user_peer.get_card.return_value = ["Name: Robert"]
mgr._get_or_create_peer = MagicMock(return_value=user_peer)
assert mgr.get_peer_card(session.key) == ["Name: Robert"]
user_peer.get_card.assert_called_once_with()
def test_search_context_uses_peer_context_response(self):
mgr, session = self._make_cached_manager()
user_peer = MagicMock()
user_peer.context.return_value = SimpleNamespace(
representation="Robert runs neuralancer",
peer_card=["Location: Melbourne"],
)
mgr._get_or_create_peer = MagicMock(return_value=user_peer)
result = mgr.search_context(session.key, "neuralancer")
assert "Robert runs neuralancer" in result
assert "- Location: Melbourne" in result
user_peer.context.assert_called_once_with(search_query="neuralancer")
def test_get_prefetch_context_fetches_user_and_ai_from_peer_api(self):
mgr, session = self._make_cached_manager()
user_peer = MagicMock()
user_peer.context.return_value = SimpleNamespace(
representation="User representation",
peer_card=["Name: Robert"],
)
ai_peer = MagicMock()
ai_peer.context.return_value = SimpleNamespace(
representation="AI representation",
peer_card=["Owner: Robert"],
)
mgr._get_or_create_peer = MagicMock(side_effect=[user_peer, ai_peer])
result = mgr.get_prefetch_context(session.key)
assert result == {
"representation": "User representation",
"card": "Name: Robert",
"ai_representation": "AI representation",
"ai_card": "Owner: Robert",
}
user_peer.context.assert_called_once_with()
ai_peer.context.assert_called_once_with()
def test_get_ai_representation_uses_peer_api(self):
mgr, session = self._make_cached_manager()
ai_peer = MagicMock()
ai_peer.context.return_value = SimpleNamespace(
representation="AI representation",
peer_card=["Owner: Robert"],
)
mgr._get_or_create_peer = MagicMock(return_value=ai_peer)
result = mgr.get_ai_representation(session.key)
assert result == {
"representation": "AI representation",
"card": "Owner: Robert",
}
ai_peer.context.assert_called_once_with()
# ---------------------------------------------------------------------------
# Message chunking
# ---------------------------------------------------------------------------
class TestChunkMessage:
def test_short_message_single_chunk(self):
result = HonchoMemoryProvider._chunk_message("hello world", 100)
assert result == ["hello world"]
def test_exact_limit_single_chunk(self):
msg = "x" * 100
result = HonchoMemoryProvider._chunk_message(msg, 100)
assert result == [msg]
def test_splits_at_paragraph_boundary(self):
msg = "first paragraph.\n\nsecond paragraph."
# limit=30: total is 35, forces split; second chunk with prefix is 29, fits
result = HonchoMemoryProvider._chunk_message(msg, 30)
assert len(result) == 2
assert result[0] == "first paragraph."
assert result[1] == "[continued] second paragraph."
def test_splits_at_sentence_boundary(self):
msg = "First sentence. Second sentence. Third sentence is here."
result = HonchoMemoryProvider._chunk_message(msg, 35)
assert len(result) >= 2
# First chunk should end at a sentence boundary (rstripped)
assert result[0].rstrip().endswith(".")
def test_splits_at_word_boundary(self):
msg = "word " * 20 # 100 chars
result = HonchoMemoryProvider._chunk_message(msg, 30)
assert len(result) >= 2
# No words should be split mid-word
for chunk in result:
clean = chunk.replace("[continued] ", "")
assert not clean.startswith(" ")
def test_continuation_prefix(self):
msg = "a" * 200
result = HonchoMemoryProvider._chunk_message(msg, 50)
assert len(result) >= 2
assert not result[0].startswith("[continued]")
for chunk in result[1:]:
assert chunk.startswith("[continued] ")
def test_empty_message(self):
result = HonchoMemoryProvider._chunk_message("", 100)
assert result == [""]
def test_large_message_many_chunks(self):
msg = "word " * 10000 # 50k chars
result = HonchoMemoryProvider._chunk_message(msg, 25000)
assert len(result) >= 2
for chunk in result:
assert len(chunk) <= 25000
# ---------------------------------------------------------------------------
# Dialectic input guard
# ---------------------------------------------------------------------------
class TestDialecticInputGuard:
def test_long_query_truncated(self):
"""Queries exceeding dialectic_max_input_chars are truncated."""
from plugins.memory.honcho.client import HonchoClientConfig
cfg = HonchoClientConfig(dialectic_max_input_chars=100)
mgr = HonchoSessionManager(config=cfg)
mgr._dialectic_max_input_chars = 100
# Create a cached session so dialectic_query doesn't bail early
session = HonchoSession(
key="test", user_peer_id="u", assistant_peer_id="a",
honcho_session_id="s",
)
mgr._cache["test"] = session
# Mock the peer to capture the query
mock_peer = MagicMock()
mock_peer.chat.return_value = "answer"
mgr._get_or_create_peer = MagicMock(return_value=mock_peer)
long_query = "word " * 100 # 500 chars, exceeds 100 limit
mgr.dialectic_query("test", long_query)
# The query passed to chat() should be truncated
actual_query = mock_peer.chat.call_args[0][0]
assert len(actual_query) <= 100