mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-06 02:41:48 +00:00
fix(cron): bump skill usage when cron jobs load skills
Some checks are pending
Deploy Site / deploy-vercel (push) Waiting to run
Deploy Site / deploy-docs (push) Waiting to run
Docker Build and Publish / build-and-push (push) Waiting to run
Nix / nix (macos-latest) (push) Waiting to run
Nix / nix (ubuntu-latest) (push) Waiting to run
Tests / test (push) Waiting to run
Tests / e2e (push) Waiting to run
Some checks are pending
Deploy Site / deploy-vercel (push) Waiting to run
Deploy Site / deploy-docs (push) Waiting to run
Docker Build and Publish / build-and-push (push) Waiting to run
Nix / nix (macos-latest) (push) Waiting to run
Nix / nix (ubuntu-latest) (push) Waiting to run
Tests / test (push) Waiting to run
Tests / e2e (push) Waiting to run
Cron jobs that reference skills via their skills: config never bumped the usage counters in .usage.json, so the curator could auto-archive skills actively used by cron jobs based on stale timestamps. Now _build_job_prompt() calls bump_use(skill_name) for each successfully loaded skill so the curator sees them as active.
This commit is contained in:
parent
808fee151d
commit
363cc93674
2 changed files with 55 additions and 0 deletions
|
|
@ -1857,6 +1857,54 @@ class TestBuildJobPromptMissingSkill:
|
|||
assert "go" in result
|
||||
|
||||
|
||||
class TestBuildJobPromptBumpUse:
|
||||
"""Verify that cron jobs bump skill usage counters so the curator sees them as active."""
|
||||
|
||||
def test_bump_use_called_for_loaded_skill(self):
|
||||
"""bump_use is called for each successfully loaded skill."""
|
||||
|
||||
def _skill_view(name: str) -> str:
|
||||
return json.dumps({"success": True, "content": f"Content for {name}."})
|
||||
|
||||
with patch("tools.skills_tool.skill_view", side_effect=_skill_view), \
|
||||
patch("tools.skill_usage.bump_use") as mock_bump:
|
||||
_build_job_prompt({"skills": ["alpha", "beta"], "prompt": "go"})
|
||||
|
||||
assert mock_bump.call_count == 2
|
||||
calls = [c[0][0] for c in mock_bump.call_args_list]
|
||||
assert "alpha" in calls
|
||||
assert "beta" in calls
|
||||
|
||||
def test_bump_use_not_called_for_missing_skill(self):
|
||||
"""bump_use is NOT called when a skill fails to load."""
|
||||
|
||||
def _missing_view(name: str) -> str:
|
||||
return json.dumps({"success": False, "error": "not found"})
|
||||
|
||||
with patch("tools.skills_tool.skill_view", side_effect=_missing_view), \
|
||||
patch("tools.skill_usage.bump_use") as mock_bump:
|
||||
_build_job_prompt({"skills": ["ghost"], "prompt": "go"})
|
||||
|
||||
assert mock_bump.call_count == 0
|
||||
|
||||
def test_bump_failure_does_not_break_prompt(self, caplog):
|
||||
"""If bump_use raises, the prompt still builds — error is logged at DEBUG."""
|
||||
|
||||
def _skill_view(name: str) -> str:
|
||||
return json.dumps({"success": True, "content": "Works."})
|
||||
|
||||
with patch("tools.skills_tool.skill_view", side_effect=_skill_view), \
|
||||
patch("tools.skill_usage.bump_use", side_effect=RuntimeError("boom")), \
|
||||
caplog.at_level(logging.DEBUG, logger="cron.scheduler"):
|
||||
result = _build_job_prompt({"skills": ["good-skill"], "prompt": "go"})
|
||||
|
||||
# Prompt should still contain the skill content and original instruction
|
||||
assert "Works." in result
|
||||
assert "go" in result
|
||||
# The error should be logged at DEBUG level, not crash
|
||||
assert any("failed to bump" in r.message for r in caplog.records)
|
||||
|
||||
|
||||
class TestSendMediaViaAdapter:
|
||||
"""Unit tests for _send_media_via_adapter — routes files to typed adapter methods."""
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue