fix: keep ACP stdout protocol-clean

Route AIAgent print output to stderr via _print_fn for ACP stdio sessions.
Gate quiet-mode spinner startup on _should_start_quiet_spinner() so JSON-RPC
on stdout isn't corrupted. Child agents inherit the redirect.

Co-authored-by: Git-on-my-level <Git-on-my-level@users.noreply.github.com>
This commit is contained in:
Git-on-my-level 2026-04-05 11:42:13 -07:00 committed by Teknium
parent 914a7db448
commit fcdd5447e2
6 changed files with 160 additions and 8 deletions

View file

@ -13,6 +13,7 @@ from hermes_constants import get_hermes_home
import copy
import json
import logging
import sys
import uuid
from dataclasses import dataclass, field
from threading import Lock
@ -21,6 +22,17 @@ from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
def _acp_stderr_print(*args, **kwargs) -> None:
"""Best-effort human-readable output sink for ACP stdio sessions.
ACP reserves stdout for JSON-RPC frames, so any incidental CLI/status output
from AIAgent must be redirected away from stdout. Route it to stderr instead.
"""
kwargs = dict(kwargs)
kwargs.setdefault("file", sys.stderr)
print(*args, **kwargs)
def _register_task_cwd(task_id: str, cwd: str) -> None:
"""Bind a task/session id to the editor's working directory for tools."""
if not task_id:
@ -458,4 +470,8 @@ class SessionManager:
logger.debug("ACP session falling back to default provider resolution", exc_info=True)
_register_task_cwd(session_id, cwd)
return AIAgent(**kwargs)
agent = AIAgent(**kwargs)
# ACP stdio transport requires stdout to remain protocol-only JSON-RPC.
# Route any incidental human-readable agent output to stderr instead.
agent._print_fn = _acp_stderr_print
return agent

View file

@ -1491,6 +1491,25 @@ class AIAgent:
return
self._safe_print(*args, **kwargs)
def _should_start_quiet_spinner(self) -> bool:
"""Return True when quiet-mode spinner output has a safe sink.
In headless/stdio-protocol environments, a raw spinner with no custom
``_print_fn`` falls back to ``sys.stdout`` and can corrupt protocol
streams such as ACP JSON-RPC. Allow quiet spinners only when either:
- output is explicitly rerouted via ``_print_fn``; or
- stdout is a real TTY.
"""
if self._print_fn is not None:
return True
stream = getattr(sys, "stdout", None)
if stream is None:
return False
try:
return bool(stream.isatty())
except (AttributeError, ValueError, OSError):
return False
def _emit_status(self, message: str) -> None:
"""Emit a lifecycle status message to both CLI and gateway channels.
@ -6066,7 +6085,7 @@ class AIAgent:
# Start spinner for CLI mode (skip when TUI handles tool progress)
spinner = None
if self.quiet_mode and not self.tool_progress_callback:
if self.quiet_mode and not self.tool_progress_callback and self._should_start_quiet_spinner():
face = random.choice(KawaiiSpinner.KAWAII_WAITING)
spinner = KawaiiSpinner(f"{face} ⚡ running {num_tools} tools concurrently", spinner_type='dots', print_fn=self._print_fn)
spinner.start()
@ -6294,7 +6313,7 @@ class AIAgent:
goal_preview = (function_args.get("goal") or "")[:30]
spinner_label = f"🔀 {goal_preview}" if goal_preview else "🔀 delegating"
spinner = None
if self.quiet_mode and not self.tool_progress_callback:
if self.quiet_mode and not self.tool_progress_callback and self._should_start_quiet_spinner():
face = random.choice(KawaiiSpinner.KAWAII_WAITING)
spinner = KawaiiSpinner(f"{face} {spinner_label}", spinner_type='dots', print_fn=self._print_fn)
spinner.start()
@ -7120,9 +7139,9 @@ class AIAgent:
# CLI TUI mode: use prompt_toolkit widget instead of raw spinner
# (works in both streaming and non-streaming modes)
self.thinking_callback(f"{face} {verb}...")
elif not self._has_stream_consumers():
# Raw KawaiiSpinner only when no streaming consumers
# (would conflict with streamed token output)
elif not self._has_stream_consumers() and self._should_start_quiet_spinner():
# Raw KawaiiSpinner only when no streaming consumers and the
# spinner output has a safe sink.
spinner_type = random.choice(['brain', 'sparkle', 'pulse', 'moon', 'star'])
thinking_spinner = KawaiiSpinner(f"{face} {verb}...", spinner_type=spinner_type, print_fn=self._print_fn)
thinking_spinner.start()

View file

@ -1,5 +1,7 @@
"""Tests for acp_adapter.session — SessionManager and SessionState."""
import contextlib
import io
import json
from types import SimpleNamespace
import pytest
@ -329,3 +331,40 @@ class TestPersistence:
assert restored is not None
assert restored.agent.provider == "anthropic"
assert restored.agent.base_url == "https://anthropic.example/v1"
def test_acp_agents_route_human_output_to_stderr(self, tmp_path, monkeypatch):
"""ACP agents must keep stdout clean for JSON-RPC stdio transport."""
def fake_resolve_runtime_provider(requested=None, **kwargs):
return {
"provider": "openrouter",
"api_mode": "chat_completions",
"base_url": "https://openrouter.example/v1",
"api_key": "test-key",
"command": None,
"args": [],
}
def fake_agent(**kwargs):
return SimpleNamespace(model=kwargs.get("model"), _print_fn=None)
monkeypatch.setattr("hermes_cli.config.load_config", lambda: {
"model": {"provider": "openrouter", "default": "test-model"}
})
monkeypatch.setattr(
"hermes_cli.runtime_provider.resolve_runtime_provider",
fake_resolve_runtime_provider,
)
db = SessionDB(tmp_path / "state.db")
with patch("run_agent.AIAgent", side_effect=fake_agent):
manager = SessionManager(db=db)
state = manager.create_session(cwd="/work")
stdout_buf = io.StringIO()
stderr_buf = io.StringIO()
with contextlib.redirect_stdout(stdout_buf), contextlib.redirect_stderr(stderr_buf):
state.agent._print_fn("ACP noise")
assert stdout_buf.getvalue() == ""
assert stderr_buf.getvalue() == "ACP noise\n"

View file

@ -2612,6 +2612,24 @@ def test_aiagent_uses_copilot_acp_client():
assert mock_acp_client.call_args.kwargs["args"] == ["--acp", "--stdio"]
def test_quiet_spinner_allowed_with_explicit_print_fn(agent):
agent._print_fn = lambda *_a, **_kw: None
with patch.object(run_agent.sys.stdout, "isatty", return_value=False):
assert agent._should_start_quiet_spinner() is True
def test_quiet_spinner_allowed_on_real_tty(agent):
agent._print_fn = None
with patch.object(run_agent.sys.stdout, "isatty", return_value=True):
assert agent._should_start_quiet_spinner() is True
def test_quiet_spinner_suppressed_on_non_tty_without_print_fn(agent):
agent._print_fn = None
with patch.object(run_agent.sys.stdout, "isatty", return_value=False):
assert agent._should_start_quiet_spinner() is False
def test_is_openai_client_closed_honors_custom_client_flag():
assert AIAgent._is_openai_client_closed(SimpleNamespace(is_closed=True)) is True
assert AIAgent._is_openai_client_closed(SimpleNamespace(is_closed=False)) is False

View file

@ -34,7 +34,7 @@ def _make_mock_parent(depth=0):
"""Create a mock parent agent with the fields delegate_task expects."""
parent = MagicMock()
parent.base_url = "https://openrouter.ai/api/v1"
parent.api_key = "parent-key"
parent.api_key="***"
parent.provider = "openrouter"
parent.api_mode = "chat_completions"
parent.model = "anthropic/claude-sonnet-4"
@ -47,6 +47,9 @@ def _make_mock_parent(depth=0):
parent._delegate_depth = depth
parent._active_children = []
parent._active_children_lock = threading.Lock()
parent._print_fn = None
parent.tool_progress_callback = None
parent.thinking_callback = None
return parent
@ -228,7 +231,7 @@ class TestDelegateTask(unittest.TestCase):
def test_child_inherits_runtime_credentials(self):
parent = _make_mock_parent(depth=0)
parent.base_url = "https://chatgpt.com/backend-api/codex"
parent.api_key = "codex-token"
parent.api_key="***"
parent.provider = "openai-codex"
parent.api_mode = "codex_responses"
@ -249,6 +252,49 @@ class TestDelegateTask(unittest.TestCase):
self.assertEqual(kwargs["provider"], parent.provider)
self.assertEqual(kwargs["api_mode"], parent.api_mode)
def test_child_inherits_parent_print_fn(self):
parent = _make_mock_parent(depth=0)
sink = MagicMock()
parent._print_fn = sink
with patch("run_agent.AIAgent") as MockAgent:
mock_child = MagicMock()
MockAgent.return_value = mock_child
_build_child_agent(
task_index=0,
goal="Keep stdout clean",
context=None,
toolsets=None,
model=None,
max_iterations=10,
parent_agent=parent,
)
self.assertIs(mock_child._print_fn, sink)
def test_child_uses_thinking_callback_when_progress_callback_available(self):
parent = _make_mock_parent(depth=0)
parent.tool_progress_callback = MagicMock()
with patch("run_agent.AIAgent") as MockAgent:
mock_child = MagicMock()
MockAgent.return_value = mock_child
_build_child_agent(
task_index=0,
goal="Avoid raw child spinners",
context=None,
toolsets=None,
model=None,
max_iterations=10,
parent_agent=parent,
)
self.assertTrue(callable(mock_child.thinking_callback))
mock_child.thinking_callback("deliberating...")
parent.tool_progress_callback.assert_not_called()
class TestToolNamePreservation(unittest.TestCase):
"""Verify _last_resolved_tool_names is restored after subagent runs."""

View file

@ -197,6 +197,18 @@ def _build_child_agent(
# total iterations across parent + subagents can exceed the parent's
# max_iterations. The user controls the per-subagent cap in config.yaml.
child_thinking_cb = None
if child_progress_cb:
def _child_thinking(text: str) -> None:
if not text:
return
try:
child_progress_cb("_thinking", text)
except Exception as e:
logger.debug("Child thinking callback relay failed: %s", e)
child_thinking_cb = _child_thinking
# Resolve effective credentials: config override > parent inherit
effective_model = model or parent_agent.model
effective_provider = override_provider or getattr(parent_agent, "provider", None)
@ -226,6 +238,7 @@ def _build_child_agent(
skip_context_files=True,
skip_memory=True,
clarify_callback=None,
thinking_callback=child_thinking_cb,
session_db=getattr(parent_agent, '_session_db', None),
providers_allowed=parent_agent.providers_allowed,
providers_ignored=parent_agent.providers_ignored,
@ -234,6 +247,7 @@ def _build_child_agent(
tool_progress_callback=child_progress_cb,
iteration_budget=None, # fresh budget per subagent
)
child._print_fn = getattr(parent_agent, '_print_fn', None)
# Set delegation depth so children can't spawn grandchildren
child._delegate_depth = getattr(parent_agent, '_delegate_depth', 0) + 1