mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
Merge branch 'main' of github.com:NousResearch/hermes-agent into feat/ink-refactor
# Conflicts: # gateway/platforms/base.py # gateway/run.py # tests/gateway/test_command_bypass_active_session.py
This commit is contained in:
commit
b04248f4d5
319 changed files with 25283 additions and 7048 deletions
85
tests/cli/test_cli_status_command.py
Normal file
85
tests/cli/test_cli_status_command.py
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
"""Tests for CLI /status command behavior."""
|
||||
from datetime import datetime
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from cli import HermesCLI
|
||||
from hermes_cli.commands import resolve_command
|
||||
|
||||
|
||||
def _make_cli():
|
||||
cli_obj = HermesCLI.__new__(HermesCLI)
|
||||
cli_obj.config = {}
|
||||
cli_obj.console = MagicMock()
|
||||
cli_obj.agent = None
|
||||
cli_obj.conversation_history = []
|
||||
cli_obj.session_id = "session-123"
|
||||
cli_obj._pending_input = MagicMock()
|
||||
cli_obj._status_bar_visible = True
|
||||
cli_obj.model = "openai/gpt-5.4"
|
||||
cli_obj.provider = "openai"
|
||||
cli_obj.session_start = datetime(2026, 4, 9, 19, 24)
|
||||
cli_obj._agent_running = False
|
||||
cli_obj._session_db = MagicMock()
|
||||
cli_obj._session_db.get_session.return_value = None
|
||||
return cli_obj
|
||||
|
||||
|
||||
def test_status_command_is_available_in_cli_registry():
|
||||
cmd = resolve_command("status")
|
||||
assert cmd is not None
|
||||
assert cmd.gateway_only is False
|
||||
|
||||
|
||||
def test_process_command_status_dispatches_without_toggling_status_bar():
|
||||
cli_obj = _make_cli()
|
||||
|
||||
with patch.object(cli_obj, "_show_session_status", create=True) as mock_status:
|
||||
assert cli_obj.process_command("/status") is True
|
||||
|
||||
mock_status.assert_called_once_with()
|
||||
assert cli_obj._status_bar_visible is True
|
||||
|
||||
|
||||
def test_statusbar_still_toggles_visibility():
|
||||
cli_obj = _make_cli()
|
||||
|
||||
assert cli_obj.process_command("/statusbar") is True
|
||||
assert cli_obj._status_bar_visible is False
|
||||
|
||||
|
||||
def test_status_prefix_prefers_status_command_over_statusbar_toggle():
|
||||
cli_obj = _make_cli()
|
||||
|
||||
with patch.object(cli_obj, "_show_session_status") as mock_status:
|
||||
assert cli_obj.process_command("/sta") is True
|
||||
|
||||
mock_status.assert_called_once_with()
|
||||
assert cli_obj._status_bar_visible is True
|
||||
|
||||
|
||||
def test_show_session_status_prints_gateway_style_summary():
|
||||
cli_obj = _make_cli()
|
||||
cli_obj.agent = SimpleNamespace(
|
||||
session_total_tokens=321,
|
||||
session_api_calls=4,
|
||||
)
|
||||
cli_obj._session_db.get_session.return_value = {
|
||||
"title": "My titled session",
|
||||
"started_at": 1775791440,
|
||||
}
|
||||
|
||||
with patch("cli.display_hermes_home", return_value="~/.hermes"):
|
||||
cli_obj._show_session_status()
|
||||
|
||||
printed = "\n".join(str(call.args[0]) for call in cli_obj.console.print.call_args_list)
|
||||
assert "Hermes CLI Status" in printed
|
||||
assert "Session ID: session-123" in printed
|
||||
assert "Path: ~/.hermes" in printed
|
||||
assert "Title: My titled session" in printed
|
||||
assert "Model: openai/gpt-5.4 (openai)" in printed
|
||||
assert "Tokens: 321" in printed
|
||||
assert "Agent Running: No" in printed
|
||||
_, kwargs = cli_obj.console.print.call_args
|
||||
assert kwargs.get("highlight") is False
|
||||
assert kwargs.get("markup") is False
|
||||
413
tests/cli/test_fast_command.py
Normal file
413
tests/cli/test_fast_command.py
Normal file
|
|
@ -0,0 +1,413 @@
|
|||
"""Tests for the /fast CLI command and service-tier config handling."""
|
||||
|
||||
import unittest
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
|
||||
def _import_cli():
|
||||
import hermes_cli.config as config_mod
|
||||
|
||||
if not hasattr(config_mod, "save_env_value_secure"):
|
||||
config_mod.save_env_value_secure = lambda key, value: {
|
||||
"success": True,
|
||||
"stored_as": key,
|
||||
"validated": False,
|
||||
}
|
||||
|
||||
import cli as cli_mod
|
||||
|
||||
return cli_mod
|
||||
|
||||
|
||||
class TestParseServiceTierConfig(unittest.TestCase):
|
||||
def _parse(self, raw):
|
||||
cli_mod = _import_cli()
|
||||
return cli_mod._parse_service_tier_config(raw)
|
||||
|
||||
def test_fast_maps_to_priority(self):
|
||||
self.assertEqual(self._parse("fast"), "priority")
|
||||
self.assertEqual(self._parse("priority"), "priority")
|
||||
|
||||
def test_normal_disables_service_tier(self):
|
||||
self.assertIsNone(self._parse("normal"))
|
||||
self.assertIsNone(self._parse("off"))
|
||||
self.assertIsNone(self._parse(""))
|
||||
|
||||
|
||||
class TestHandleFastCommand(unittest.TestCase):
|
||||
def _make_cli(self, service_tier=None):
|
||||
return SimpleNamespace(
|
||||
service_tier=service_tier,
|
||||
provider="openai-codex",
|
||||
requested_provider="openai-codex",
|
||||
model="gpt-5.4",
|
||||
_fast_command_available=lambda: True,
|
||||
agent=MagicMock(),
|
||||
)
|
||||
|
||||
def test_no_args_shows_status(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = self._make_cli(service_tier=None)
|
||||
with (
|
||||
patch.object(cli_mod, "_cprint") as mock_cprint,
|
||||
patch.object(cli_mod, "save_config_value") as mock_save,
|
||||
):
|
||||
cli_mod.HermesCLI._handle_fast_command(stub, "/fast")
|
||||
|
||||
# Bare /fast shows status, does not change config
|
||||
mock_save.assert_not_called()
|
||||
# Should have printed the status line
|
||||
printed = " ".join(str(c) for c in mock_cprint.call_args_list)
|
||||
self.assertIn("normal", printed)
|
||||
|
||||
def test_no_args_shows_fast_when_enabled(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = self._make_cli(service_tier="priority")
|
||||
with (
|
||||
patch.object(cli_mod, "_cprint") as mock_cprint,
|
||||
patch.object(cli_mod, "save_config_value") as mock_save,
|
||||
):
|
||||
cli_mod.HermesCLI._handle_fast_command(stub, "/fast")
|
||||
|
||||
mock_save.assert_not_called()
|
||||
printed = " ".join(str(c) for c in mock_cprint.call_args_list)
|
||||
self.assertIn("fast", printed)
|
||||
|
||||
def test_normal_argument_clears_service_tier(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = self._make_cli(service_tier="priority")
|
||||
with (
|
||||
patch.object(cli_mod, "_cprint"),
|
||||
patch.object(cli_mod, "save_config_value", return_value=True) as mock_save,
|
||||
):
|
||||
cli_mod.HermesCLI._handle_fast_command(stub, "/fast normal")
|
||||
|
||||
mock_save.assert_called_once_with("agent.service_tier", "normal")
|
||||
self.assertIsNone(stub.service_tier)
|
||||
self.assertIsNone(stub.agent)
|
||||
|
||||
def test_unsupported_model_does_not_expose_fast(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = SimpleNamespace(
|
||||
service_tier=None,
|
||||
provider="openai-codex",
|
||||
requested_provider="openai-codex",
|
||||
model="gpt-5.3-codex",
|
||||
_fast_command_available=lambda: False,
|
||||
agent=MagicMock(),
|
||||
)
|
||||
|
||||
with (
|
||||
patch.object(cli_mod, "_cprint") as mock_cprint,
|
||||
patch.object(cli_mod, "save_config_value") as mock_save,
|
||||
):
|
||||
cli_mod.HermesCLI._handle_fast_command(stub, "/fast")
|
||||
|
||||
mock_save.assert_not_called()
|
||||
self.assertTrue(mock_cprint.called)
|
||||
|
||||
|
||||
class TestPriorityProcessingModels(unittest.TestCase):
|
||||
"""Verify the expanded Priority Processing model registry."""
|
||||
|
||||
def test_all_documented_models_supported(self):
|
||||
from hermes_cli.models import model_supports_fast_mode
|
||||
|
||||
# All models from OpenAI's Priority Processing pricing table
|
||||
supported = [
|
||||
"gpt-5.4", "gpt-5.4-mini", "gpt-5.2",
|
||||
"gpt-5.1", "gpt-5", "gpt-5-mini",
|
||||
"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano",
|
||||
"gpt-4o", "gpt-4o-mini",
|
||||
"o3", "o4-mini",
|
||||
]
|
||||
for model in supported:
|
||||
assert model_supports_fast_mode(model), f"{model} should support fast mode"
|
||||
|
||||
def test_vendor_prefix_stripped(self):
|
||||
from hermes_cli.models import model_supports_fast_mode
|
||||
|
||||
assert model_supports_fast_mode("openai/gpt-5.4") is True
|
||||
assert model_supports_fast_mode("openai/gpt-4.1") is True
|
||||
assert model_supports_fast_mode("openai/o3") is True
|
||||
|
||||
def test_non_priority_models_rejected(self):
|
||||
from hermes_cli.models import model_supports_fast_mode
|
||||
|
||||
assert model_supports_fast_mode("gpt-5.3-codex") is False
|
||||
assert model_supports_fast_mode("claude-sonnet-4") is False
|
||||
assert model_supports_fast_mode("") is False
|
||||
assert model_supports_fast_mode(None) is False
|
||||
|
||||
def test_resolve_overrides_returns_service_tier(self):
|
||||
from hermes_cli.models import resolve_fast_mode_overrides
|
||||
|
||||
result = resolve_fast_mode_overrides("gpt-5.4")
|
||||
assert result == {"service_tier": "priority"}
|
||||
|
||||
result = resolve_fast_mode_overrides("gpt-4.1")
|
||||
assert result == {"service_tier": "priority"}
|
||||
|
||||
def test_resolve_overrides_none_for_unsupported(self):
|
||||
from hermes_cli.models import resolve_fast_mode_overrides
|
||||
|
||||
assert resolve_fast_mode_overrides("gpt-5.3-codex") is None
|
||||
assert resolve_fast_mode_overrides("claude-sonnet-4") is None
|
||||
|
||||
|
||||
class TestFastModeRouting(unittest.TestCase):
|
||||
def test_fast_command_exposed_for_model_even_when_provider_is_auto(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = SimpleNamespace(provider="auto", requested_provider="auto", model="gpt-5.4", agent=None)
|
||||
|
||||
assert cli_mod.HermesCLI._fast_command_available(stub) is True
|
||||
|
||||
def test_fast_command_exposed_for_non_codex_models(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = SimpleNamespace(provider="openai", requested_provider="openai", model="gpt-4.1", agent=None)
|
||||
assert cli_mod.HermesCLI._fast_command_available(stub) is True
|
||||
|
||||
stub = SimpleNamespace(provider="openrouter", requested_provider="openrouter", model="o3", agent=None)
|
||||
assert cli_mod.HermesCLI._fast_command_available(stub) is True
|
||||
|
||||
def test_turn_route_injects_overrides_without_provider_switch(self):
|
||||
"""Fast mode should add request_overrides but NOT change the provider/runtime."""
|
||||
cli_mod = _import_cli()
|
||||
stub = SimpleNamespace(
|
||||
model="gpt-5.4",
|
||||
api_key="primary-key",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
provider="openrouter",
|
||||
api_mode="chat_completions",
|
||||
acp_command=None,
|
||||
acp_args=[],
|
||||
_credential_pool=None,
|
||||
_smart_model_routing={},
|
||||
service_tier="priority",
|
||||
)
|
||||
|
||||
original_runtime = {
|
||||
"api_key": "***",
|
||||
"base_url": "https://openrouter.ai/api/v1",
|
||||
"provider": "openrouter",
|
||||
"api_mode": "chat_completions",
|
||||
"command": None,
|
||||
"args": [],
|
||||
"credential_pool": None,
|
||||
}
|
||||
|
||||
with patch("agent.smart_model_routing.resolve_turn_route", return_value={
|
||||
"model": "gpt-5.4",
|
||||
"runtime": dict(original_runtime),
|
||||
"label": None,
|
||||
"signature": ("gpt-5.4", "openrouter", "https://openrouter.ai/api/v1", "chat_completions", None, ()),
|
||||
}):
|
||||
route = cli_mod.HermesCLI._resolve_turn_agent_config(stub, "hi")
|
||||
|
||||
# Provider should NOT have changed
|
||||
assert route["runtime"]["provider"] == "openrouter"
|
||||
assert route["runtime"]["api_mode"] == "chat_completions"
|
||||
# But request_overrides should be set
|
||||
assert route["request_overrides"] == {"service_tier": "priority"}
|
||||
|
||||
def test_turn_route_keeps_primary_runtime_when_model_has_no_fast_backend(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = SimpleNamespace(
|
||||
model="gpt-5.3-codex",
|
||||
api_key="primary-key",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
provider="openrouter",
|
||||
api_mode="chat_completions",
|
||||
acp_command=None,
|
||||
acp_args=[],
|
||||
_credential_pool=None,
|
||||
_smart_model_routing={},
|
||||
service_tier="priority",
|
||||
)
|
||||
|
||||
primary_route = {
|
||||
"model": "gpt-5.3-codex",
|
||||
"runtime": {
|
||||
"api_key": "***",
|
||||
"base_url": "https://openrouter.ai/api/v1",
|
||||
"provider": "openrouter",
|
||||
"api_mode": "chat_completions",
|
||||
"command": None,
|
||||
"args": [],
|
||||
"credential_pool": None,
|
||||
},
|
||||
"label": None,
|
||||
"signature": ("gpt-5.3-codex", "openrouter", "https://openrouter.ai/api/v1", "chat_completions", None, ()),
|
||||
}
|
||||
with patch("agent.smart_model_routing.resolve_turn_route", return_value=primary_route):
|
||||
route = cli_mod.HermesCLI._resolve_turn_agent_config(stub, "hi")
|
||||
|
||||
assert route["runtime"]["provider"] == "openrouter"
|
||||
assert route.get("request_overrides") is None
|
||||
|
||||
|
||||
class TestAnthropicFastMode(unittest.TestCase):
|
||||
"""Verify Anthropic Fast Mode model support and override resolution."""
|
||||
|
||||
def test_anthropic_opus_supported(self):
|
||||
from hermes_cli.models import model_supports_fast_mode
|
||||
|
||||
# Native Anthropic format (hyphens)
|
||||
assert model_supports_fast_mode("claude-opus-4-6") is True
|
||||
# OpenRouter format (dots)
|
||||
assert model_supports_fast_mode("claude-opus-4.6") is True
|
||||
# With vendor prefix
|
||||
assert model_supports_fast_mode("anthropic/claude-opus-4-6") is True
|
||||
assert model_supports_fast_mode("anthropic/claude-opus-4.6") is True
|
||||
|
||||
def test_anthropic_non_opus_rejected(self):
|
||||
from hermes_cli.models import model_supports_fast_mode
|
||||
|
||||
assert model_supports_fast_mode("claude-sonnet-4-6") is False
|
||||
assert model_supports_fast_mode("claude-sonnet-4.6") is False
|
||||
assert model_supports_fast_mode("claude-haiku-4-5") is False
|
||||
assert model_supports_fast_mode("anthropic/claude-sonnet-4.6") is False
|
||||
|
||||
def test_anthropic_variant_tags_stripped(self):
|
||||
from hermes_cli.models import model_supports_fast_mode
|
||||
|
||||
# OpenRouter variant tags after colon should be stripped
|
||||
assert model_supports_fast_mode("claude-opus-4.6:fast") is True
|
||||
assert model_supports_fast_mode("claude-opus-4.6:beta") is True
|
||||
|
||||
def test_resolve_overrides_returns_speed_for_anthropic(self):
|
||||
from hermes_cli.models import resolve_fast_mode_overrides
|
||||
|
||||
result = resolve_fast_mode_overrides("claude-opus-4-6")
|
||||
assert result == {"speed": "fast"}
|
||||
|
||||
result = resolve_fast_mode_overrides("anthropic/claude-opus-4.6")
|
||||
assert result == {"speed": "fast"}
|
||||
|
||||
def test_resolve_overrides_returns_service_tier_for_openai(self):
|
||||
"""OpenAI models should still get service_tier, not speed."""
|
||||
from hermes_cli.models import resolve_fast_mode_overrides
|
||||
|
||||
result = resolve_fast_mode_overrides("gpt-5.4")
|
||||
assert result == {"service_tier": "priority"}
|
||||
|
||||
def test_is_anthropic_fast_model(self):
|
||||
from hermes_cli.models import _is_anthropic_fast_model
|
||||
|
||||
assert _is_anthropic_fast_model("claude-opus-4-6") is True
|
||||
assert _is_anthropic_fast_model("claude-opus-4.6") is True
|
||||
assert _is_anthropic_fast_model("anthropic/claude-opus-4-6") is True
|
||||
assert _is_anthropic_fast_model("gpt-5.4") is False
|
||||
assert _is_anthropic_fast_model("claude-sonnet-4-6") is False
|
||||
|
||||
def test_fast_command_exposed_for_anthropic_model(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = SimpleNamespace(
|
||||
provider="anthropic", requested_provider="anthropic",
|
||||
model="claude-opus-4-6", agent=None,
|
||||
)
|
||||
assert cli_mod.HermesCLI._fast_command_available(stub) is True
|
||||
|
||||
def test_fast_command_hidden_for_anthropic_sonnet(self):
|
||||
cli_mod = _import_cli()
|
||||
stub = SimpleNamespace(
|
||||
provider="anthropic", requested_provider="anthropic",
|
||||
model="claude-sonnet-4-6", agent=None,
|
||||
)
|
||||
assert cli_mod.HermesCLI._fast_command_available(stub) is False
|
||||
|
||||
def test_turn_route_injects_speed_for_anthropic(self):
|
||||
"""Anthropic models should get speed:'fast' override, not service_tier."""
|
||||
cli_mod = _import_cli()
|
||||
stub = SimpleNamespace(
|
||||
model="claude-opus-4-6",
|
||||
api_key="sk-ant-test",
|
||||
base_url="https://api.anthropic.com",
|
||||
provider="anthropic",
|
||||
api_mode="anthropic_messages",
|
||||
acp_command=None,
|
||||
acp_args=[],
|
||||
_credential_pool=None,
|
||||
_smart_model_routing={},
|
||||
service_tier="priority",
|
||||
)
|
||||
|
||||
original_runtime = {
|
||||
"api_key": "***",
|
||||
"base_url": "https://api.anthropic.com",
|
||||
"provider": "anthropic",
|
||||
"api_mode": "anthropic_messages",
|
||||
"command": None,
|
||||
"args": [],
|
||||
"credential_pool": None,
|
||||
}
|
||||
|
||||
with patch("agent.smart_model_routing.resolve_turn_route", return_value={
|
||||
"model": "claude-opus-4-6",
|
||||
"runtime": dict(original_runtime),
|
||||
"label": None,
|
||||
"signature": ("claude-opus-4-6", "anthropic", "https://api.anthropic.com", "anthropic_messages", None, ()),
|
||||
}):
|
||||
route = cli_mod.HermesCLI._resolve_turn_agent_config(stub, "hi")
|
||||
|
||||
assert route["runtime"]["provider"] == "anthropic"
|
||||
assert route["request_overrides"] == {"speed": "fast"}
|
||||
|
||||
|
||||
class TestAnthropicFastModeAdapter(unittest.TestCase):
|
||||
"""Verify build_anthropic_kwargs handles fast_mode parameter."""
|
||||
|
||||
def test_fast_mode_adds_speed_and_beta(self):
|
||||
from agent.anthropic_adapter import build_anthropic_kwargs, _FAST_MODE_BETA
|
||||
|
||||
kwargs = build_anthropic_kwargs(
|
||||
model="claude-opus-4-6",
|
||||
messages=[{"role": "user", "content": [{"type": "text", "text": "hi"}]}],
|
||||
tools=None,
|
||||
max_tokens=None,
|
||||
reasoning_config=None,
|
||||
fast_mode=True,
|
||||
)
|
||||
assert kwargs.get("speed") == "fast"
|
||||
assert "extra_headers" in kwargs
|
||||
assert _FAST_MODE_BETA in kwargs["extra_headers"].get("anthropic-beta", "")
|
||||
|
||||
def test_fast_mode_off_no_speed(self):
|
||||
from agent.anthropic_adapter import build_anthropic_kwargs
|
||||
|
||||
kwargs = build_anthropic_kwargs(
|
||||
model="claude-opus-4-6",
|
||||
messages=[{"role": "user", "content": [{"type": "text", "text": "hi"}]}],
|
||||
tools=None,
|
||||
max_tokens=None,
|
||||
reasoning_config=None,
|
||||
fast_mode=False,
|
||||
)
|
||||
assert "speed" not in kwargs
|
||||
assert "extra_headers" not in kwargs
|
||||
|
||||
def test_fast_mode_skipped_for_third_party_endpoint(self):
|
||||
from agent.anthropic_adapter import build_anthropic_kwargs
|
||||
|
||||
kwargs = build_anthropic_kwargs(
|
||||
model="claude-opus-4-6",
|
||||
messages=[{"role": "user", "content": [{"type": "text", "text": "hi"}]}],
|
||||
tools=None,
|
||||
max_tokens=None,
|
||||
reasoning_config=None,
|
||||
fast_mode=True,
|
||||
base_url="https://api.minimax.io/anthropic/v1",
|
||||
)
|
||||
# Third-party endpoints should NOT get speed or fast-mode beta
|
||||
assert "speed" not in kwargs
|
||||
assert "extra_headers" not in kwargs
|
||||
|
||||
|
||||
class TestConfigDefault(unittest.TestCase):
|
||||
def test_default_config_has_service_tier(self):
|
||||
from hermes_cli.config import DEFAULT_CONFIG
|
||||
|
||||
agent = DEFAULT_CONFIG.get("agent", {})
|
||||
self.assertIn("service_tier", agent)
|
||||
self.assertEqual(agent["service_tier"], "")
|
||||
66
tests/cli/test_manual_compress.py
Normal file
66
tests/cli/test_manual_compress.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
"""Tests for CLI manual compression messaging."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from tests.cli.test_cli_init import _make_cli
|
||||
|
||||
|
||||
def _make_history() -> list[dict[str, str]]:
|
||||
return [
|
||||
{"role": "user", "content": "one"},
|
||||
{"role": "assistant", "content": "two"},
|
||||
{"role": "user", "content": "three"},
|
||||
{"role": "assistant", "content": "four"},
|
||||
]
|
||||
|
||||
|
||||
def test_manual_compress_reports_noop_without_success_banner(capsys):
|
||||
shell = _make_cli()
|
||||
history = _make_history()
|
||||
shell.conversation_history = history
|
||||
shell.agent = MagicMock()
|
||||
shell.agent.compression_enabled = True
|
||||
shell.agent._cached_system_prompt = ""
|
||||
shell.agent._compress_context.return_value = (list(history), "")
|
||||
|
||||
def _estimate(messages):
|
||||
assert messages == history
|
||||
return 100
|
||||
|
||||
with patch("agent.model_metadata.estimate_messages_tokens_rough", side_effect=_estimate):
|
||||
shell._manual_compress()
|
||||
|
||||
output = capsys.readouterr().out
|
||||
assert "No changes from compression" in output
|
||||
assert "✅ Compressed" not in output
|
||||
assert "Rough transcript estimate: ~100 tokens (unchanged)" in output
|
||||
|
||||
|
||||
def test_manual_compress_explains_when_token_estimate_rises(capsys):
|
||||
shell = _make_cli()
|
||||
history = _make_history()
|
||||
compressed = [
|
||||
history[0],
|
||||
{"role": "assistant", "content": "Dense summary that still counts as more tokens."},
|
||||
history[-1],
|
||||
]
|
||||
shell.conversation_history = history
|
||||
shell.agent = MagicMock()
|
||||
shell.agent.compression_enabled = True
|
||||
shell.agent._cached_system_prompt = ""
|
||||
shell.agent._compress_context.return_value = (compressed, "")
|
||||
|
||||
def _estimate(messages):
|
||||
if messages == history:
|
||||
return 100
|
||||
if messages == compressed:
|
||||
return 120
|
||||
raise AssertionError(f"unexpected transcript: {messages!r}")
|
||||
|
||||
with patch("agent.model_metadata.estimate_messages_tokens_rough", side_effect=_estimate):
|
||||
shell._manual_compress()
|
||||
|
||||
output = capsys.readouterr().out
|
||||
assert "✅ Compressed: 4 → 3 messages" in output
|
||||
assert "Rough transcript estimate: ~100 → ~120 tokens" in output
|
||||
assert "denser summaries" in output
|
||||
|
|
@ -619,17 +619,14 @@ class TestReasoningDeltasFiredFlag(unittest.TestCase):
|
|||
agent = AIAgent.__new__(AIAgent)
|
||||
agent.reasoning_callback = None
|
||||
agent.stream_delta_callback = None
|
||||
agent._reasoning_deltas_fired = False
|
||||
agent.verbose_logging = False
|
||||
return agent
|
||||
|
||||
def test_fire_reasoning_delta_sets_flag(self):
|
||||
def test_fire_reasoning_delta_calls_callback(self):
|
||||
agent = self._make_agent()
|
||||
captured = []
|
||||
agent.reasoning_callback = lambda t: captured.append(t)
|
||||
self.assertFalse(agent._reasoning_deltas_fired)
|
||||
agent._fire_reasoning_delta("thinking...")
|
||||
self.assertTrue(agent._reasoning_deltas_fired)
|
||||
self.assertEqual(captured, ["thinking..."])
|
||||
|
||||
def test_build_assistant_message_skips_callback_when_already_streamed(self):
|
||||
|
|
@ -640,8 +637,7 @@ class TestReasoningDeltasFiredFlag(unittest.TestCase):
|
|||
agent.reasoning_callback = lambda t: captured.append(t)
|
||||
agent.stream_delta_callback = lambda t: None # streaming is active
|
||||
|
||||
# Simulate streaming having fired reasoning
|
||||
agent._reasoning_deltas_fired = True
|
||||
# Simulate streaming having already fired reasoning
|
||||
|
||||
msg = SimpleNamespace(
|
||||
content="I'll merge that.",
|
||||
|
|
@ -665,9 +661,8 @@ class TestReasoningDeltasFiredFlag(unittest.TestCase):
|
|||
agent.reasoning_callback = lambda t: captured.append(t)
|
||||
agent.stream_delta_callback = lambda t: None # streaming active
|
||||
|
||||
# Even though _reasoning_deltas_fired is False (reasoning came through
|
||||
# content tags, not reasoning_content deltas), callback should not fire
|
||||
agent._reasoning_deltas_fired = False
|
||||
# Reasoning came through content tags, not reasoning_content deltas.
|
||||
# Callback should not fire since streaming is active.
|
||||
|
||||
msg = SimpleNamespace(
|
||||
content="I'll merge that.",
|
||||
|
|
@ -689,7 +684,6 @@ class TestReasoningDeltasFiredFlag(unittest.TestCase):
|
|||
agent.reasoning_callback = lambda t: captured.append(t)
|
||||
# No streaming
|
||||
agent.stream_delta_callback = None
|
||||
agent._reasoning_deltas_fired = False
|
||||
|
||||
msg = SimpleNamespace(
|
||||
content="I'll merge that.",
|
||||
|
|
|
|||
138
tests/cli/test_stream_delta_think_tag.py
Normal file
138
tests/cli/test_stream_delta_think_tag.py
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
"""Tests for _stream_delta's handling of <think> tags in prose vs real reasoning blocks."""
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def _make_cli_stub():
|
||||
"""Create a minimal HermesCLI-like object with stream state."""
|
||||
from cli import HermesCLI
|
||||
|
||||
cli = HermesCLI.__new__(HermesCLI)
|
||||
cli.show_reasoning = False
|
||||
cli._stream_buf = ""
|
||||
cli._stream_started = False
|
||||
cli._stream_box_opened = False
|
||||
cli._stream_prefilt = ""
|
||||
cli._in_reasoning_block = False
|
||||
cli._reasoning_stream_started = False
|
||||
cli._reasoning_box_opened = False
|
||||
cli._reasoning_buf = ""
|
||||
cli._reasoning_preview_buf = ""
|
||||
cli._deferred_content = ""
|
||||
cli._stream_text_ansi = ""
|
||||
cli._stream_needs_break = False
|
||||
cli._emitted = []
|
||||
|
||||
# Mock _emit_stream_text to capture output
|
||||
def mock_emit(text):
|
||||
cli._emitted.append(text)
|
||||
cli._emit_stream_text = mock_emit
|
||||
|
||||
# Mock _stream_reasoning_delta
|
||||
cli._reasoning_emitted = []
|
||||
def mock_reasoning(text):
|
||||
cli._reasoning_emitted.append(text)
|
||||
cli._stream_reasoning_delta = mock_reasoning
|
||||
|
||||
return cli
|
||||
|
||||
|
||||
class TestThinkTagInProse:
|
||||
"""<think> mentioned in prose should NOT trigger reasoning suppression."""
|
||||
|
||||
def test_think_tag_mid_sentence(self):
|
||||
"""'(/think not producing <think> tags)' should pass through."""
|
||||
cli = _make_cli_stub()
|
||||
tokens = [
|
||||
" 1. Fix reasoning mode in eval ",
|
||||
"(/think not producing ",
|
||||
"<think>",
|
||||
" tags — ~2% gap)",
|
||||
"\n 2. Launch production",
|
||||
]
|
||||
for t in tokens:
|
||||
cli._stream_delta(t)
|
||||
assert not cli._in_reasoning_block, "<think> in prose should not enter reasoning block"
|
||||
full = "".join(cli._emitted)
|
||||
assert "<think>" in full, "The literal <think> tag should be in the emitted text"
|
||||
assert "Launch production" in full
|
||||
|
||||
def test_think_tag_after_text_on_same_line(self):
|
||||
"""'some text <think>' should NOT trigger reasoning."""
|
||||
cli = _make_cli_stub()
|
||||
cli._stream_delta("Here is the <think> tag explanation")
|
||||
assert not cli._in_reasoning_block
|
||||
full = "".join(cli._emitted)
|
||||
assert "<think>" in full
|
||||
|
||||
def test_think_tag_in_backticks(self):
|
||||
"""'`<think>`' should NOT trigger reasoning."""
|
||||
cli = _make_cli_stub()
|
||||
cli._stream_delta("Use the `<think>` tag for reasoning")
|
||||
assert not cli._in_reasoning_block
|
||||
|
||||
|
||||
class TestRealReasoningBlock:
|
||||
"""Real <think> tags at block boundaries should still be caught."""
|
||||
|
||||
def test_think_at_start_of_stream(self):
|
||||
"""'<think>reasoning</think>answer' should suppress reasoning."""
|
||||
cli = _make_cli_stub()
|
||||
cli._stream_delta("<think>")
|
||||
assert cli._in_reasoning_block
|
||||
cli._stream_delta("I need to analyze this")
|
||||
cli._stream_delta("</think>")
|
||||
assert not cli._in_reasoning_block
|
||||
cli._stream_delta("Here is my answer")
|
||||
full = "".join(cli._emitted)
|
||||
assert "Here is my answer" in full
|
||||
assert "I need to analyze" not in full # reasoning was suppressed
|
||||
|
||||
def test_think_after_newline(self):
|
||||
"""'text\\n<think>' should trigger reasoning block."""
|
||||
cli = _make_cli_stub()
|
||||
cli._stream_delta("Some preamble\n<think>")
|
||||
assert cli._in_reasoning_block
|
||||
full = "".join(cli._emitted)
|
||||
assert "Some preamble" in full
|
||||
|
||||
def test_think_after_newline_with_whitespace(self):
|
||||
"""'text\\n <think>' should trigger reasoning block."""
|
||||
cli = _make_cli_stub()
|
||||
cli._stream_delta("Some preamble\n <think>")
|
||||
assert cli._in_reasoning_block
|
||||
|
||||
def test_think_with_only_whitespace_before(self):
|
||||
"""' <think>' (whitespace only prefix) should trigger."""
|
||||
cli = _make_cli_stub()
|
||||
cli._stream_delta(" <think>")
|
||||
assert cli._in_reasoning_block
|
||||
|
||||
|
||||
class TestFlushRecovery:
|
||||
"""_flush_stream should recover content from false-positive reasoning blocks."""
|
||||
|
||||
def test_flush_recovers_buffered_content(self):
|
||||
"""If somehow in reasoning block at flush, content is recovered."""
|
||||
cli = _make_cli_stub()
|
||||
# Manually set up a false-positive state
|
||||
cli._in_reasoning_block = True
|
||||
cli._stream_prefilt = " tags — ~2% gap)\n 2. Launch production"
|
||||
cli._stream_box_opened = True
|
||||
|
||||
# Mock _close_reasoning_box and box closing
|
||||
cli._close_reasoning_box = lambda: None
|
||||
|
||||
# Call flush
|
||||
from unittest.mock import patch
|
||||
import shutil
|
||||
with patch.object(shutil, "get_terminal_size", return_value=os.terminal_size((80, 24))):
|
||||
with patch("cli._cprint"):
|
||||
cli._flush_stream()
|
||||
|
||||
assert not cli._in_reasoning_block
|
||||
full = "".join(cli._emitted)
|
||||
assert "Launch production" in full
|
||||
Loading…
Add table
Add a link
Reference in a new issue