From c3be6ec184e0f17a184eaff1018051b47a89eeb7 Mon Sep 17 00:00:00 2001 From: BarnacleBoy Date: Wed, 6 May 2026 15:44:19 +0000 Subject: [PATCH] feat: add transform_llm_output plugin hook Enables plugins to transform LLM output text after generation, useful for vocabulary/personality transformation without burning inference tokens. Follows same pattern as transform_tool_result and transform_terminal_output: - First non-empty string result wins - Fail-open: exceptions logged as warnings, agent continues - Signature: (response_text, session_id, model, platform) --- hermes_cli/plugins.py | 4 ++++ run_agent.py | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/hermes_cli/plugins.py b/hermes_cli/plugins.py index 5b30e7e7ca..1267457737 100644 --- a/hermes_cli/plugins.py +++ b/hermes_cli/plugins.py @@ -80,6 +80,10 @@ VALID_HOOKS: Set[str] = { "post_tool_call", "transform_terminal_output", "transform_tool_result", + # Transform LLM output before it's returned to the user. + # Plugins return a string to replace the response text, or None/empty to leave unchanged. + # First non-None string wins. Useful for vocabulary/personality transformation. + "transform_llm_output", "pre_llm_call", "post_llm_call", "pre_api_request", diff --git a/run_agent.py b/run_agent.py index 919a5875b6..54b0ebccb8 100644 --- a/run_agent.py +++ b/run_agent.py @@ -14035,6 +14035,27 @@ class AIAgent: else: logger.info(_diag_msg, *_diag_args) + # Plugin hook: transform_llm_output + # Fired once per turn after the tool-calling loop completes. + # Plugins can transform the LLM's output text before it's returned. + # First hook to return a string wins; None/empty return leaves text unchanged. + if final_response and not interrupted: + try: + from hermes_cli.plugins import invoke_hook as _invoke_hook + _transform_results = _invoke_hook( + "transform_llm_output", + response_text=final_response, + session_id=self.session_id or "", + model=self.model, + platform=getattr(self, "platform", None) or "", + ) + for _hook_result in _transform_results: + if isinstance(_hook_result, str) and _hook_result: + final_response = _hook_result + break # First non-empty string wins + except Exception as exc: + logger.warning("transform_llm_output hook failed: %s", exc) + # Plugin hook: post_llm_call # Fired once per turn after the tool-calling loop completes. # Plugins can use this to persist conversation data (e.g. sync