mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 03:01:47 +00:00
feat: add transform_llm_output plugin hook
Enables plugins to transform LLM output text after generation, useful for vocabulary/personality transformation without burning inference tokens. Follows same pattern as transform_tool_result and transform_terminal_output: - First non-empty string result wins - Fail-open: exceptions logged as warnings, agent continues - Signature: (response_text, session_id, model, platform)
This commit is contained in:
parent
6e250a55de
commit
c3be6ec184
2 changed files with 25 additions and 0 deletions
|
|
@ -80,6 +80,10 @@ VALID_HOOKS: Set[str] = {
|
||||||
"post_tool_call",
|
"post_tool_call",
|
||||||
"transform_terminal_output",
|
"transform_terminal_output",
|
||||||
"transform_tool_result",
|
"transform_tool_result",
|
||||||
|
# Transform LLM output before it's returned to the user.
|
||||||
|
# Plugins return a string to replace the response text, or None/empty to leave unchanged.
|
||||||
|
# First non-None string wins. Useful for vocabulary/personality transformation.
|
||||||
|
"transform_llm_output",
|
||||||
"pre_llm_call",
|
"pre_llm_call",
|
||||||
"post_llm_call",
|
"post_llm_call",
|
||||||
"pre_api_request",
|
"pre_api_request",
|
||||||
|
|
|
||||||
21
run_agent.py
21
run_agent.py
|
|
@ -14035,6 +14035,27 @@ class AIAgent:
|
||||||
else:
|
else:
|
||||||
logger.info(_diag_msg, *_diag_args)
|
logger.info(_diag_msg, *_diag_args)
|
||||||
|
|
||||||
|
# Plugin hook: transform_llm_output
|
||||||
|
# Fired once per turn after the tool-calling loop completes.
|
||||||
|
# Plugins can transform the LLM's output text before it's returned.
|
||||||
|
# First hook to return a string wins; None/empty return leaves text unchanged.
|
||||||
|
if final_response and not interrupted:
|
||||||
|
try:
|
||||||
|
from hermes_cli.plugins import invoke_hook as _invoke_hook
|
||||||
|
_transform_results = _invoke_hook(
|
||||||
|
"transform_llm_output",
|
||||||
|
response_text=final_response,
|
||||||
|
session_id=self.session_id or "",
|
||||||
|
model=self.model,
|
||||||
|
platform=getattr(self, "platform", None) or "",
|
||||||
|
)
|
||||||
|
for _hook_result in _transform_results:
|
||||||
|
if isinstance(_hook_result, str) and _hook_result:
|
||||||
|
final_response = _hook_result
|
||||||
|
break # First non-empty string wins
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("transform_llm_output hook failed: %s", exc)
|
||||||
|
|
||||||
# Plugin hook: post_llm_call
|
# Plugin hook: post_llm_call
|
||||||
# Fired once per turn after the tool-calling loop completes.
|
# Fired once per turn after the tool-calling loop completes.
|
||||||
# Plugins can use this to persist conversation data (e.g. sync
|
# Plugins can use this to persist conversation data (e.g. sync
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue