mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-05 02:31:47 +00:00
Tests that construct AIAgent(api_key=..., ...) without base_url were relying on provider-resolver fallback state from other tests in the same xdist worker. When matrix-split distributed them to different shards, the resolver found no env vars and no config and raised 'No LLM provider configured'. Fix: add base_url='https://openrouter.ai/api/v1' to every AIAgent construction that passes api_key. AIAgent.__init__ with both args set takes the direct-construction path (line 960 in run_agent.py) and skips resolver fallback entirely, making these tests self-contained. 7 files, 16 call sites updated via AST-based fixup. One call site (test_none_base_url_passed_as_none) left alone — that test's intent is to verify base_url=None behavior, so adding base_url defeats the test. Validation: - tests/run_agent/ full run: 760 passed, 0 failed (was 1 failure under the AST script's over-application, now clean) - Matrix shard 3 local run: 3083 passed, 0 failed, 1m44s
63 lines
2 KiB
Python
63 lines
2 KiB
Python
from types import SimpleNamespace
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
from run_agent import AIAgent
|
|
|
|
|
|
def _mock_response(*, usage: dict, content: str = "done"):
|
|
msg = SimpleNamespace(content=content, tool_calls=None)
|
|
choice = SimpleNamespace(message=msg, finish_reason="stop")
|
|
return SimpleNamespace(
|
|
choices=[choice],
|
|
model="test/model",
|
|
usage=SimpleNamespace(**usage),
|
|
)
|
|
|
|
|
|
def _make_agent(session_db, *, platform: str):
|
|
with (
|
|
patch("run_agent.get_tool_definitions", return_value=[]),
|
|
patch("run_agent.check_toolset_requirements", return_value={}),
|
|
patch("run_agent.OpenAI"),
|
|
):
|
|
agent = AIAgent(
|
|
api_key="test-key",
|
|
base_url="https://openrouter.ai/api/v1",
|
|
quiet_mode=True,
|
|
skip_context_files=True,
|
|
skip_memory=True,
|
|
session_db=session_db,
|
|
session_id=f"{platform}-session",
|
|
platform=platform,
|
|
)
|
|
agent.client = MagicMock()
|
|
agent.client.chat.completions.create.return_value = _mock_response(
|
|
usage={
|
|
"prompt_tokens": 11,
|
|
"completion_tokens": 7,
|
|
"total_tokens": 18,
|
|
}
|
|
)
|
|
return agent
|
|
|
|
|
|
def test_run_conversation_persists_tokens_for_telegram_sessions():
|
|
session_db = MagicMock()
|
|
agent = _make_agent(session_db, platform="telegram")
|
|
|
|
result = agent.run_conversation("hello")
|
|
|
|
assert result["final_response"] == "done"
|
|
session_db.update_token_counts.assert_called_once()
|
|
assert session_db.update_token_counts.call_args.args[0] == "telegram-session"
|
|
|
|
|
|
def test_run_conversation_persists_tokens_for_cron_sessions():
|
|
session_db = MagicMock()
|
|
agent = _make_agent(session_db, platform="cron")
|
|
|
|
result = agent.run_conversation("hello")
|
|
|
|
assert result["final_response"] == "done"
|
|
session_db.update_token_counts.assert_called_once()
|
|
assert session_db.update_token_counts.call_args.args[0] == "cron-session"
|