#!/usr/bin/env python3 """ AI Agent Runner with Tool Calling This module provides a clean, standalone agent that can execute AI models with tool calling capabilities. It handles the conversation loop, tool execution, and response management. Features: - Automatic tool calling loop until completion - Configurable model parameters - Error handling and recovery - Message history management - Support for multiple model providers Usage: from run_agent import AIAgent agent = AIAgent(base_url="http://localhost:30000/v1", model="claude-opus-4-20250514") response = agent.run_conversation("Tell me about the latest Python updates") """ import asyncio import base64 import concurrent.futures import copy import hashlib import json import logging logger = logging.getLogger(__name__) import os import random import re import sys import tempfile import time import threading from types import SimpleNamespace import uuid from typing import List, Dict, Any, Optional from openai import OpenAI import fire from datetime import datetime from pathlib import Path from hermes_constants import get_hermes_home # Load .env from ~/.hermes/.env first, then project root as dev fallback. # User-managed env files should override stale shell exports on restart. from hermes_cli.env_loader import load_hermes_dotenv _hermes_home = get_hermes_home() _project_env = Path(__file__).parent / '.env' _loaded_env_paths = load_hermes_dotenv(hermes_home=_hermes_home, project_env=_project_env) if _loaded_env_paths: for _env_path in _loaded_env_paths: logger.info("Loaded environment variables from %s", _env_path) else: logger.info("No .env file found. Using system environment variables.") # Import our tool system from model_tools import ( get_tool_definitions, get_toolset_for_tool, handle_function_call, check_toolset_requirements, ) from tools.terminal_tool import cleanup_vm, get_active_env, is_persistent_env from tools.tool_result_storage import maybe_persist_tool_result, enforce_turn_budget from tools.interrupt import set_interrupt as _set_interrupt from tools.browser_tool import cleanup_browser from hermes_constants import OPENROUTER_BASE_URL # Agent internals extracted to agent/ package for modularity from agent.memory_manager import build_memory_context_block, sanitize_context from agent.retry_utils import jittered_backoff from agent.error_classifier import classify_api_error, FailoverReason from agent.prompt_builder import ( DEFAULT_AGENT_IDENTITY, PLATFORM_HINTS, MEMORY_GUIDANCE, SESSION_SEARCH_GUIDANCE, SKILLS_GUIDANCE, build_nous_subscription_prompt, ) from agent.model_metadata import ( fetch_model_metadata, estimate_tokens_rough, estimate_messages_tokens_rough, estimate_request_tokens_rough, get_next_probe_tier, parse_context_limit_from_error, parse_available_output_tokens_from_error, save_context_length, is_local_endpoint, query_ollama_num_ctx, ) from agent.context_compressor import ContextCompressor from agent.subdirectory_hints import SubdirectoryHintTracker from agent.prompt_caching import apply_anthropic_cache_control from agent.prompt_builder import build_skills_system_prompt, build_context_files_prompt, build_environment_hints, load_soul_md, TOOL_USE_ENFORCEMENT_GUIDANCE, TOOL_USE_ENFORCEMENT_MODELS, DEVELOPER_ROLE_MODELS, GOOGLE_MODEL_OPERATIONAL_GUIDANCE, OPENAI_MODEL_EXECUTION_GUIDANCE from agent.usage_pricing import estimate_usage_cost, normalize_usage from agent.display import ( KawaiiSpinner, build_tool_preview as _build_tool_preview, get_cute_tool_message as _get_cute_tool_message_impl, _detect_tool_failure, get_tool_emoji as _get_tool_emoji, ) from agent.trajectory import ( convert_scratchpad_to_think, has_incomplete_scratchpad, save_trajectory as _save_trajectory_to_file, ) from utils import atomic_json_write, env_var_enabled class _SafeWriter: """Transparent stdio wrapper that catches OSError/ValueError from broken pipes. When hermes-agent runs as a systemd service, Docker container, or headless daemon, the stdout/stderr pipe can become unavailable (idle timeout, buffer exhaustion, socket reset). Any print() call then raises ``OSError: [Errno 5] Input/output error``, which can crash agent setup or run_conversation() — especially via double-fault when an except handler also tries to print. Additionally, when subagents run in ThreadPoolExecutor threads, the shared stdout handle can close between thread teardown and cleanup, raising ``ValueError: I/O operation on closed file`` instead of OSError. This wrapper delegates all writes to the underlying stream and silently catches both OSError and ValueError. It is transparent when the wrapped stream is healthy. """ __slots__ = ("_inner",) def __init__(self, inner): object.__setattr__(self, "_inner", inner) def write(self, data): try: return self._inner.write(data) except (OSError, ValueError): return len(data) if isinstance(data, str) else 0 def flush(self): try: self._inner.flush() except (OSError, ValueError): pass def fileno(self): return self._inner.fileno() def isatty(self): try: return self._inner.isatty() except (OSError, ValueError): return False def __getattr__(self, name): return getattr(self._inner, name) def _install_safe_stdio() -> None: """Wrap stdout/stderr so best-effort console output cannot crash the agent.""" for stream_name in ("stdout", "stderr"): stream = getattr(sys, stream_name, None) if stream is not None and not isinstance(stream, _SafeWriter): setattr(sys, stream_name, _SafeWriter(stream)) class IterationBudget: """Thread-safe iteration counter for an agent. Each agent (parent or subagent) gets its own ``IterationBudget``. The parent's budget is capped at ``max_iterations`` (default 90). Each subagent gets an independent budget capped at ``delegation.max_iterations`` (default 50) — this means total iterations across parent + subagents can exceed the parent's cap. Users control the per-subagent limit via ``delegation.max_iterations`` in config.yaml. ``execute_code`` (programmatic tool calling) iterations are refunded via :meth:`refund` so they don't eat into the budget. """ def __init__(self, max_total: int): self.max_total = max_total self._used = 0 self._lock = threading.Lock() def consume(self) -> bool: """Try to consume one iteration. Returns True if allowed.""" with self._lock: if self._used >= self.max_total: return False self._used += 1 return True def refund(self) -> None: """Give back one iteration (e.g. for execute_code turns).""" with self._lock: if self._used > 0: self._used -= 1 @property def used(self) -> int: return self._used @property def remaining(self) -> int: with self._lock: return max(0, self.max_total - self._used) # Tools that must never run concurrently (interactive / user-facing). # When any of these appear in a batch, we fall back to sequential execution. _NEVER_PARALLEL_TOOLS = frozenset({"clarify"}) # Read-only tools with no shared mutable session state. _PARALLEL_SAFE_TOOLS = frozenset({ "ha_get_state", "ha_list_entities", "ha_list_services", "read_file", "search_files", "session_search", "skill_view", "skills_list", "vision_analyze", "web_extract", "web_search", }) # File tools can run concurrently when they target independent paths. _PATH_SCOPED_TOOLS = frozenset({"read_file", "write_file", "patch"}) # Maximum number of concurrent worker threads for parallel tool execution. _MAX_TOOL_WORKERS = 8 # Patterns that indicate a terminal command may modify/delete files. _DESTRUCTIVE_PATTERNS = re.compile( r"""(?:^|\s|&&|\|\||;|`)(?: rm\s|rmdir\s| mv\s| sed\s+-i| truncate\s| dd\s| shred\s| git\s+(?:reset|clean|checkout)\s )""", re.VERBOSE, ) # Output redirects that overwrite files (> but not >>) _REDIRECT_OVERWRITE = re.compile(r'[^>]>[^>]|^>[^>]') def _is_destructive_command(cmd: str) -> bool: """Heuristic: does this terminal command look like it modifies/deletes files?""" if not cmd: return False if _DESTRUCTIVE_PATTERNS.search(cmd): return True if _REDIRECT_OVERWRITE.search(cmd): return True return False def _should_parallelize_tool_batch(tool_calls) -> bool: """Return True when a tool-call batch is safe to run concurrently.""" if len(tool_calls) <= 1: return False tool_names = [tc.function.name for tc in tool_calls] if any(name in _NEVER_PARALLEL_TOOLS for name in tool_names): return False reserved_paths: list[Path] = [] for tool_call in tool_calls: tool_name = tool_call.function.name try: function_args = json.loads(tool_call.function.arguments) except Exception: logging.debug( "Could not parse args for %s — defaulting to sequential; raw=%s", tool_name, tool_call.function.arguments[:200], ) return False if not isinstance(function_args, dict): logging.debug( "Non-dict args for %s (%s) — defaulting to sequential", tool_name, type(function_args).__name__, ) return False if tool_name in _PATH_SCOPED_TOOLS: scoped_path = _extract_parallel_scope_path(tool_name, function_args) if scoped_path is None: return False if any(_paths_overlap(scoped_path, existing) for existing in reserved_paths): return False reserved_paths.append(scoped_path) continue if tool_name not in _PARALLEL_SAFE_TOOLS: return False return True def _extract_parallel_scope_path(tool_name: str, function_args: dict) -> Path | None: """Return the normalized file target for path-scoped tools.""" if tool_name not in _PATH_SCOPED_TOOLS: return None raw_path = function_args.get("path") if not isinstance(raw_path, str) or not raw_path.strip(): return None expanded = Path(raw_path).expanduser() if expanded.is_absolute(): return Path(os.path.abspath(str(expanded))) # Avoid resolve(); the file may not exist yet. return Path(os.path.abspath(str(Path.cwd() / expanded))) def _paths_overlap(left: Path, right: Path) -> bool: """Return True when two paths may refer to the same subtree.""" left_parts = left.parts right_parts = right.parts if not left_parts or not right_parts: # Empty paths shouldn't reach here (guarded upstream), but be safe. return bool(left_parts) == bool(right_parts) and bool(left_parts) common_len = min(len(left_parts), len(right_parts)) return left_parts[:common_len] == right_parts[:common_len] _SURROGATE_RE = re.compile(r'[\ud800-\udfff]') def _sanitize_surrogates(text: str) -> str: """Replace lone surrogate code points with U+FFFD (replacement character). Surrogates are invalid in UTF-8 and will crash ``json.dumps()`` inside the OpenAI SDK. This is a fast no-op when the text contains no surrogates. """ if _SURROGATE_RE.search(text): return _SURROGATE_RE.sub('\ufffd', text) return text def _sanitize_messages_surrogates(messages: list) -> bool: """Sanitize surrogate characters from all string content in a messages list. Walks message dicts in-place. Returns True if any surrogates were found and replaced, False otherwise. Covers content/text, name, and tool call metadata/arguments so retries don't fail on a non-content field. """ found = False for msg in messages: if not isinstance(msg, dict): continue content = msg.get("content") if isinstance(content, str) and _SURROGATE_RE.search(content): msg["content"] = _SURROGATE_RE.sub('\ufffd', content) found = True elif isinstance(content, list): for part in content: if isinstance(part, dict): text = part.get("text") if isinstance(text, str) and _SURROGATE_RE.search(text): part["text"] = _SURROGATE_RE.sub('\ufffd', text) found = True name = msg.get("name") if isinstance(name, str) and _SURROGATE_RE.search(name): msg["name"] = _SURROGATE_RE.sub('\ufffd', name) found = True tool_calls = msg.get("tool_calls") if isinstance(tool_calls, list): for tc in tool_calls: if not isinstance(tc, dict): continue tc_id = tc.get("id") if isinstance(tc_id, str) and _SURROGATE_RE.search(tc_id): tc["id"] = _SURROGATE_RE.sub('\ufffd', tc_id) found = True fn = tc.get("function") if isinstance(fn, dict): fn_name = fn.get("name") if isinstance(fn_name, str) and _SURROGATE_RE.search(fn_name): fn["name"] = _SURROGATE_RE.sub('\ufffd', fn_name) found = True fn_args = fn.get("arguments") if isinstance(fn_args, str) and _SURROGATE_RE.search(fn_args): fn["arguments"] = _SURROGATE_RE.sub('\ufffd', fn_args) found = True return found def _strip_non_ascii(text: str) -> str: """Remove non-ASCII characters, replacing with closest ASCII equivalent or removing. Used as a last resort when the system encoding is ASCII and can't handle any non-ASCII characters (e.g. LANG=C on Chromebooks). """ return text.encode('ascii', errors='ignore').decode('ascii') def _sanitize_messages_non_ascii(messages: list) -> bool: """Strip non-ASCII characters from all string content in a messages list. This is a last-resort recovery for systems with ASCII-only encoding (LANG=C, Chromebooks, minimal containers). Returns True if any non-ASCII content was found and sanitized. """ found = False for msg in messages: if not isinstance(msg, dict): continue # Sanitize content (string) content = msg.get("content") if isinstance(content, str): sanitized = _strip_non_ascii(content) if sanitized != content: msg["content"] = sanitized found = True elif isinstance(content, list): for part in content: if isinstance(part, dict): text = part.get("text") if isinstance(text, str): sanitized = _strip_non_ascii(text) if sanitized != text: part["text"] = sanitized found = True # Sanitize name field (can contain non-ASCII in tool results) name = msg.get("name") if isinstance(name, str): sanitized = _strip_non_ascii(name) if sanitized != name: msg["name"] = sanitized found = True # Sanitize tool_calls tool_calls = msg.get("tool_calls") if isinstance(tool_calls, list): for tc in tool_calls: if isinstance(tc, dict): fn = tc.get("function", {}) if isinstance(fn, dict): fn_args = fn.get("arguments") if isinstance(fn_args, str): sanitized = _strip_non_ascii(fn_args) if sanitized != fn_args: fn["arguments"] = sanitized found = True # Sanitize any additional top-level string fields (e.g. reasoning_content) for key, value in msg.items(): if key in {"content", "name", "tool_calls", "role"}: continue if isinstance(value, str): sanitized = _strip_non_ascii(value) if sanitized != value: msg[key] = sanitized found = True return found def _sanitize_tools_non_ascii(tools: list) -> bool: """Strip non-ASCII characters from tool payloads in-place.""" return _sanitize_structure_non_ascii(tools) def _sanitize_structure_non_ascii(payload: Any) -> bool: """Strip non-ASCII characters from nested dict/list payloads in-place.""" found = False def _walk(node): nonlocal found if isinstance(node, dict): for key, value in node.items(): if isinstance(value, str): sanitized = _strip_non_ascii(value) if sanitized != value: node[key] = sanitized found = True elif isinstance(value, (dict, list)): _walk(value) elif isinstance(node, list): for idx, value in enumerate(node): if isinstance(value, str): sanitized = _strip_non_ascii(value) if sanitized != value: node[idx] = sanitized found = True elif isinstance(value, (dict, list)): _walk(value) _walk(payload) return found # ========================================================================= # Large tool result handler — save oversized output to temp file # ========================================================================= # ========================================================================= # Qwen Portal headers — mimics QwenCode CLI for portal.qwen.ai compatibility. # Extracted as a module-level helper so both __init__ and # _apply_client_headers_for_base_url can share it. # ========================================================================= _QWEN_CODE_VERSION = "0.14.1" def _qwen_portal_headers() -> dict: """Return default HTTP headers required by Qwen Portal API.""" import platform as _plat _ua = f"QwenCode/{_QWEN_CODE_VERSION} ({_plat.system().lower()}; {_plat.machine()})" return { "User-Agent": _ua, "X-DashScope-CacheControl": "enable", "X-DashScope-UserAgent": _ua, "X-DashScope-AuthType": "qwen-oauth", } class AIAgent: """ AI Agent with tool calling capabilities. This class manages the conversation flow, tool execution, and response handling for AI models that support function calling. """ @property def base_url(self) -> str: return self._base_url @base_url.setter def base_url(self, value: str) -> None: self._base_url = value self._base_url_lower = value.lower() if value else "" def __init__( self, base_url: str = None, api_key: str = None, provider: str = None, api_mode: str = None, acp_command: str = None, acp_args: list[str] | None = None, command: str = None, args: list[str] | None = None, model: str = "", max_iterations: int = 90, # Default tool-calling iterations (shared with subagents) tool_delay: float = 1.0, enabled_toolsets: List[str] = None, disabled_toolsets: List[str] = None, save_trajectories: bool = False, verbose_logging: bool = False, quiet_mode: bool = False, ephemeral_system_prompt: str = None, log_prefix_chars: int = 100, log_prefix: str = "", providers_allowed: List[str] = None, providers_ignored: List[str] = None, providers_order: List[str] = None, provider_sort: str = None, provider_require_parameters: bool = False, provider_data_collection: str = None, session_id: str = None, tool_progress_callback: callable = None, tool_start_callback: callable = None, tool_complete_callback: callable = None, thinking_callback: callable = None, reasoning_callback: callable = None, clarify_callback: callable = None, step_callback: callable = None, stream_delta_callback: callable = None, interim_assistant_callback: callable = None, tool_gen_callback: callable = None, status_callback: callable = None, max_tokens: int = None, reasoning_config: Dict[str, Any] = None, service_tier: str = None, request_overrides: Dict[str, Any] = None, prefill_messages: List[Dict[str, Any]] = None, platform: str = None, user_id: str = None, gateway_session_key: str = None, skip_context_files: bool = False, skip_memory: bool = False, session_db=None, parent_session_id: str = None, iteration_budget: "IterationBudget" = None, fallback_model: Dict[str, Any] = None, credential_pool=None, checkpoints_enabled: bool = False, checkpoint_max_snapshots: int = 50, pass_session_id: bool = False, persist_session: bool = True, ): """ Initialize the AI Agent. Args: base_url (str): Base URL for the model API (optional) api_key (str): API key for authentication (optional, uses env var if not provided) provider (str): Provider identifier (optional; used for telemetry/routing hints) api_mode (str): API mode override: "chat_completions" or "codex_responses" model (str): Model name to use (default: "anthropic/claude-opus-4.6") max_iterations (int): Maximum number of tool calling iterations (default: 90) tool_delay (float): Delay between tool calls in seconds (default: 1.0) enabled_toolsets (List[str]): Only enable tools from these toolsets (optional) disabled_toolsets (List[str]): Disable tools from these toolsets (optional) save_trajectories (bool): Whether to save conversation trajectories to JSONL files (default: False) verbose_logging (bool): Enable verbose logging for debugging (default: False) quiet_mode (bool): Suppress progress output for clean CLI experience (default: False) ephemeral_system_prompt (str): System prompt used during agent execution but NOT saved to trajectories (optional) log_prefix_chars (int): Number of characters to show in log previews for tool calls/responses (default: 100) log_prefix (str): Prefix to add to all log messages for identification in parallel processing (default: "") providers_allowed (List[str]): OpenRouter providers to allow (optional) providers_ignored (List[str]): OpenRouter providers to ignore (optional) providers_order (List[str]): OpenRouter providers to try in order (optional) provider_sort (str): Sort providers by price/throughput/latency (optional) session_id (str): Pre-generated session ID for logging (optional, auto-generated if not provided) tool_progress_callback (callable): Callback function(tool_name, args_preview) for progress notifications clarify_callback (callable): Callback function(question, choices) -> str for interactive user questions. Provided by the platform layer (CLI or gateway). If None, the clarify tool returns an error. max_tokens (int): Maximum tokens for model responses (optional, uses model default if not set) reasoning_config (Dict): OpenRouter reasoning configuration override (e.g. {"effort": "none"} to disable thinking). If None, defaults to {"enabled": True, "effort": "medium"} for OpenRouter. Set to disable/customize reasoning. prefill_messages (List[Dict]): Messages to prepend to conversation history as prefilled context. Useful for injecting a few-shot example or priming the model's response style. Example: [{"role": "user", "content": "Hi!"}, {"role": "assistant", "content": "Hello!"}] NOTE: Anthropic Sonnet 4.6+ and Opus 4.6+ reject a conversation that ends on an assistant-role message (400 error). For those models use structured outputs or output_config.format instead of a trailing-assistant prefill. platform (str): The interface platform the user is on (e.g. "cli", "telegram", "discord", "whatsapp"). Used to inject platform-specific formatting hints into the system prompt. skip_context_files (bool): If True, skip auto-injection of SOUL.md, AGENTS.md, and .cursorrules into the system prompt. Use this for batch processing and data generation to avoid polluting trajectories with user-specific persona or project instructions. """ _install_safe_stdio() self.model = model self.max_iterations = max_iterations # Shared iteration budget — parent creates, children inherit. # Consumed by every LLM turn across parent + all subagents. self.iteration_budget = iteration_budget or IterationBudget(max_iterations) self.tool_delay = tool_delay self.save_trajectories = save_trajectories self.verbose_logging = verbose_logging self.quiet_mode = quiet_mode self.ephemeral_system_prompt = ephemeral_system_prompt self.platform = platform # "cli", "telegram", "discord", "whatsapp", etc. self._user_id = user_id # Platform user identifier (gateway sessions) self._gateway_session_key = gateway_session_key # Stable per-chat key (e.g. agent:main:telegram:dm:123) # Pluggable print function — CLI replaces this with _cprint so that # raw ANSI status lines are routed through prompt_toolkit's renderer # instead of going directly to stdout where patch_stdout's StdoutProxy # would mangle the escape sequences. None = use builtins.print. self._print_fn = None self.background_review_callback = None # Optional sync callback for gateway delivery self.skip_context_files = skip_context_files self.pass_session_id = pass_session_id self.persist_session = persist_session self._credential_pool = credential_pool self.log_prefix_chars = log_prefix_chars self.log_prefix = f"{log_prefix} " if log_prefix else "" # Store effective base URL for feature detection (prompt caching, reasoning, etc.) self.base_url = base_url or "" provider_name = provider.strip().lower() if isinstance(provider, str) and provider.strip() else None self.provider = provider_name or "" self.acp_command = acp_command or command self.acp_args = list(acp_args or args or []) if api_mode in {"chat_completions", "codex_responses", "anthropic_messages", "bedrock_converse"}: self.api_mode = api_mode elif self.provider == "openai-codex": self.api_mode = "codex_responses" elif self.provider == "xai": self.api_mode = "codex_responses" elif (provider_name is None) and "chatgpt.com/backend-api/codex" in self._base_url_lower: self.api_mode = "codex_responses" self.provider = "openai-codex" elif (provider_name is None) and "api.x.ai" in self._base_url_lower: self.api_mode = "codex_responses" self.provider = "xai" elif self.provider == "anthropic" or (provider_name is None and "api.anthropic.com" in self._base_url_lower): self.api_mode = "anthropic_messages" self.provider = "anthropic" elif self._base_url_lower.rstrip("/").endswith("/anthropic"): # Third-party Anthropic-compatible endpoints (e.g. MiniMax, DashScope) # use a URL convention ending in /anthropic. Auto-detect these so the # Anthropic Messages API adapter is used instead of chat completions. self.api_mode = "anthropic_messages" elif self.provider == "bedrock" or "bedrock-runtime" in self._base_url_lower: # AWS Bedrock — auto-detect from provider name or base URL. self.api_mode = "bedrock_converse" else: self.api_mode = "chat_completions" try: from hermes_cli.model_normalize import ( _AGGREGATOR_PROVIDERS, normalize_model_for_provider, ) if self.provider not in _AGGREGATOR_PROVIDERS: self.model = normalize_model_for_provider(self.model, self.provider) except Exception: pass # GPT-5.x models usually require the Responses API path, but some # providers have exceptions (for example Copilot's gpt-5-mini still # uses chat completions). Also auto-upgrade for direct OpenAI URLs # (api.openai.com) since all newer tool-calling models prefer # Responses there. ACP runtimes are excluded: CopilotACPClient # handles its own routing and does not implement the Responses API # surface. # When api_mode was explicitly provided, respect it — the user # knows what their endpoint supports (#10473). if ( api_mode is None and self.api_mode == "chat_completions" and self.provider != "copilot-acp" and not str(self.base_url or "").lower().startswith("acp://copilot") and not str(self.base_url or "").lower().startswith("acp+tcp://") and ( self._is_direct_openai_url() or self._provider_model_requires_responses_api( self.model, provider=self.provider, ) ) ): self.api_mode = "codex_responses" # Pre-warm OpenRouter model metadata cache in a background thread. # fetch_model_metadata() is cached for 1 hour; this avoids a blocking # HTTP request on the first API response when pricing is estimated. if self.provider == "openrouter" or self._is_openrouter_url(): threading.Thread( target=lambda: fetch_model_metadata(), daemon=True, ).start() self.tool_progress_callback = tool_progress_callback self.tool_start_callback = tool_start_callback self.tool_complete_callback = tool_complete_callback self.suppress_status_output = False self.thinking_callback = thinking_callback self.reasoning_callback = reasoning_callback self.clarify_callback = clarify_callback self.step_callback = step_callback self.stream_delta_callback = stream_delta_callback self.interim_assistant_callback = interim_assistant_callback self.status_callback = status_callback self.tool_gen_callback = tool_gen_callback # Tool execution state — allows _vprint during tool execution # even when stream consumers are registered (no tokens streaming then) self._executing_tools = False # Interrupt mechanism for breaking out of tool loops self._interrupt_requested = False self._interrupt_message = None # Optional message that triggered interrupt self._execution_thread_id: int | None = None # Set at run_conversation() start self._interrupt_thread_signal_pending = False self._client_lock = threading.RLock() # Subagent delegation state self._delegate_depth = 0 # 0 = top-level agent, incremented for children self._active_children = [] # Running child AIAgents (for interrupt propagation) self._active_children_lock = threading.Lock() # Store OpenRouter provider preferences self.providers_allowed = providers_allowed self.providers_ignored = providers_ignored self.providers_order = providers_order self.provider_sort = provider_sort self.provider_require_parameters = provider_require_parameters self.provider_data_collection = provider_data_collection # Store toolset filtering options self.enabled_toolsets = enabled_toolsets self.disabled_toolsets = disabled_toolsets # Model response configuration self.max_tokens = max_tokens # None = use model default self.reasoning_config = reasoning_config # None = use default (medium for OpenRouter) self.service_tier = service_tier self.request_overrides = dict(request_overrides or {}) self.prefill_messages = prefill_messages or [] # Prefilled conversation turns self._force_ascii_payload = False # Anthropic prompt caching: auto-enabled for Claude models via OpenRouter. # Reduces input costs by ~75% on multi-turn conversations by caching the # conversation prefix. Uses system_and_3 strategy (4 breakpoints). is_openrouter = self._is_openrouter_url() is_claude = "claude" in self.model.lower() is_native_anthropic = self.api_mode == "anthropic_messages" and self.provider == "anthropic" self._use_prompt_caching = (is_openrouter and is_claude) or is_native_anthropic self._cache_ttl = "5m" # Default 5-minute TTL (1.25x write cost) # Iteration budget: the LLM is only notified when it actually exhausts # the iteration budget (api_call_count >= max_iterations). At that # point we inject ONE message, allow one final API call, and if the # model doesn't produce a text response, force a user-message asking # it to summarise. No intermediate pressure warnings — they caused # models to "give up" prematurely on complex tasks (#7915). self._budget_exhausted_injected = False self._budget_grace_call = False # Activity tracking — updated on each API call, tool execution, and # stream chunk. Used by the gateway timeout handler to report what the # agent was doing when it was killed, and by the "still working" # notifications to show progress. self._last_activity_ts: float = time.time() self._last_activity_desc: str = "initializing" self._current_tool: str | None = None self._api_call_count: int = 0 # Rate limit tracking — updated from x-ratelimit-* response headers # after each API call. Accessed by /usage slash command. self._rate_limit_state: Optional["RateLimitState"] = None # Centralized logging — agent.log (INFO+) and errors.log (WARNING+) # both live under ~/.hermes/logs/. Idempotent, so gateway mode # (which creates a new AIAgent per message) won't duplicate handlers. from hermes_logging import setup_logging, setup_verbose_logging setup_logging(hermes_home=_hermes_home) if self.verbose_logging: setup_verbose_logging() logger.info("Verbose logging enabled (third-party library logs suppressed)") else: if self.quiet_mode: # In quiet mode (CLI default), suppress all tool/infra log # noise on the *console*. The TUI has its own rich display # for status; logger INFO/WARNING messages just clutter it. # File handlers (agent.log, errors.log) still capture everything. for quiet_logger in [ 'tools', # all tools.* (terminal, browser, web, file, etc.) 'run_agent', # agent runner internals 'trajectory_compressor', 'cron', # scheduler (only relevant in daemon mode) 'hermes_cli', # CLI helpers ]: logging.getLogger(quiet_logger).setLevel(logging.ERROR) # Internal stream callback (set during streaming TTS). # Initialized here so _vprint can reference it before run_conversation. self._stream_callback = None # Deferred paragraph break flag — set after tool iterations so a # single "\n\n" is prepended to the next real text delta. self._stream_needs_break = False # Visible assistant text already delivered through live token callbacks # during the current model response. Used to avoid re-sending the same # commentary when the provider later returns it as a completed interim # assistant message. self._current_streamed_assistant_text = "" # Optional current-turn user-message override used when the API-facing # user message intentionally differs from the persisted transcript # (e.g. CLI voice mode adds a temporary prefix for the live call only). self._persist_user_message_idx = None self._persist_user_message_override = None # Cache anthropic image-to-text fallbacks per image payload/URL so a # single tool loop does not repeatedly re-run auxiliary vision on the # same image history. self._anthropic_image_fallback_cache: Dict[str, str] = {} # Initialize LLM client via centralized provider router. # The router handles auth resolution, base URL, headers, and # Codex/Anthropic wrapping for all known providers. # raw_codex=True because the main agent needs direct responses.stream() # access for Codex Responses API streaming. self._anthropic_client = None self._is_anthropic_oauth = False if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_client, resolve_anthropic_token # Bedrock + Claude → use AnthropicBedrock SDK for full feature parity # (prompt caching, thinking budgets, adaptive thinking). _is_bedrock_anthropic = self.provider == "bedrock" if _is_bedrock_anthropic: from agent.anthropic_adapter import build_anthropic_bedrock_client import re as _re _region_match = _re.search(r"bedrock-runtime\.([a-z0-9-]+)\.", base_url or "") _br_region = _region_match.group(1) if _region_match else "us-east-1" self._bedrock_region = _br_region self._anthropic_client = build_anthropic_bedrock_client(_br_region) self._anthropic_api_key = "aws-sdk" self._anthropic_base_url = base_url self._is_anthropic_oauth = False self.api_key = "aws-sdk" self.client = None self._client_kwargs = {} if not self.quiet_mode: print(f"🤖 AI Agent initialized with model: {self.model} (AWS Bedrock + AnthropicBedrock SDK, {_br_region})") else: # Only fall back to ANTHROPIC_TOKEN when the provider is actually Anthropic. # Other anthropic_messages providers (MiniMax, Alibaba, etc.) must use their own API key. # Falling back would send Anthropic credentials to third-party endpoints (Fixes #1739, #minimax-401). _is_native_anthropic = self.provider == "anthropic" effective_key = (api_key or resolve_anthropic_token() or "") if _is_native_anthropic else (api_key or "") self.api_key = effective_key self._anthropic_api_key = effective_key self._anthropic_base_url = base_url from agent.anthropic_adapter import _is_oauth_token as _is_oat self._is_anthropic_oauth = _is_oat(effective_key) self._anthropic_client = build_anthropic_client(effective_key, base_url) # No OpenAI client needed for Anthropic mode self.client = None self._client_kwargs = {} if not self.quiet_mode: print(f"🤖 AI Agent initialized with model: {self.model} (Anthropic native)") if effective_key and len(effective_key) > 12: print(f"🔑 Using token: {effective_key[:8]}...{effective_key[-4:]}") elif self.api_mode == "bedrock_converse": # AWS Bedrock — uses boto3 directly, no OpenAI client needed. # Region is extracted from the base_url or defaults to us-east-1. import re as _re _region_match = _re.search(r"bedrock-runtime\.([a-z0-9-]+)\.", base_url or "") self._bedrock_region = _region_match.group(1) if _region_match else "us-east-1" # Guardrail config — read from config.yaml at init time. self._bedrock_guardrail_config = None try: from hermes_cli.config import load_config as _load_br_cfg _gr = _load_br_cfg().get("bedrock", {}).get("guardrail", {}) if _gr.get("guardrail_identifier") and _gr.get("guardrail_version"): self._bedrock_guardrail_config = { "guardrailIdentifier": _gr["guardrail_identifier"], "guardrailVersion": _gr["guardrail_version"], } if _gr.get("stream_processing_mode"): self._bedrock_guardrail_config["streamProcessingMode"] = _gr["stream_processing_mode"] if _gr.get("trace"): self._bedrock_guardrail_config["trace"] = _gr["trace"] except Exception: pass self.client = None self._client_kwargs = {} if not self.quiet_mode: _gr_label = " + Guardrails" if self._bedrock_guardrail_config else "" print(f"🤖 AI Agent initialized with model: {self.model} (AWS Bedrock, {self._bedrock_region}{_gr_label})") else: if api_key and base_url: # Explicit credentials from CLI/gateway — construct directly. # The runtime provider resolver already handled auth for us. client_kwargs = {"api_key": api_key, "base_url": base_url} if self.provider == "copilot-acp": client_kwargs["command"] = self.acp_command client_kwargs["args"] = self.acp_args effective_base = base_url if "openrouter" in effective_base.lower(): client_kwargs["default_headers"] = { "HTTP-Referer": "https://hermes-agent.nousresearch.com", "X-OpenRouter-Title": "Hermes Agent", "X-OpenRouter-Categories": "productivity,cli-agent", } elif "api.githubcopilot.com" in effective_base.lower(): from hermes_cli.models import copilot_default_headers client_kwargs["default_headers"] = copilot_default_headers() elif "api.kimi.com" in effective_base.lower(): client_kwargs["default_headers"] = { "User-Agent": "KimiCLI/1.30.0", } elif "portal.qwen.ai" in effective_base.lower(): client_kwargs["default_headers"] = _qwen_portal_headers() else: # No explicit creds — use the centralized provider router from agent.auxiliary_client import resolve_provider_client _routed_client, _ = resolve_provider_client( self.provider or "auto", model=self.model, raw_codex=True) if _routed_client is not None: client_kwargs = { "api_key": _routed_client.api_key, "base_url": str(_routed_client.base_url), } # Preserve any default_headers the router set if hasattr(_routed_client, '_default_headers') and _routed_client._default_headers: client_kwargs["default_headers"] = dict(_routed_client._default_headers) else: # When the user explicitly chose a non-OpenRouter provider # but no credentials were found, fail fast with a clear # message instead of silently routing through OpenRouter. _explicit = (self.provider or "").strip().lower() if _explicit and _explicit not in ("auto", "openrouter", "custom"): # Look up the actual env var name from the provider # config — some providers use non-standard names # (e.g. alibaba → DASHSCOPE_API_KEY, not ALIBABA_API_KEY). _env_hint = f"{_explicit.upper()}_API_KEY" try: from hermes_cli.auth import PROVIDER_REGISTRY _pcfg = PROVIDER_REGISTRY.get(_explicit) if _pcfg and _pcfg.api_key_env_vars: _env_hint = _pcfg.api_key_env_vars[0] except Exception: pass raise RuntimeError( f"Provider '{_explicit}' is set in config.yaml but no API key " f"was found. Set the {_env_hint} environment " f"variable, or switch to a different provider with `hermes model`." ) # No provider configured — reject with a clear message. raise RuntimeError( "No LLM provider configured. Run `hermes model` to " "select a provider, or run `hermes setup` for first-time " "configuration." ) self._client_kwargs = client_kwargs # stored for rebuilding after interrupt # Enable fine-grained tool streaming for Claude on OpenRouter. # Without this, Anthropic buffers the entire tool call and goes # silent for minutes while thinking — OpenRouter's upstream proxy # times out during the silence. The beta header makes Anthropic # stream tool call arguments token-by-token, keeping the # connection alive. _effective_base = str(client_kwargs.get("base_url", "")).lower() if "openrouter" in _effective_base and "claude" in (self.model or "").lower(): headers = client_kwargs.get("default_headers") or {} existing_beta = headers.get("x-anthropic-beta", "") _FINE_GRAINED = "fine-grained-tool-streaming-2025-05-14" if _FINE_GRAINED not in existing_beta: if existing_beta: headers["x-anthropic-beta"] = f"{existing_beta},{_FINE_GRAINED}" else: headers["x-anthropic-beta"] = _FINE_GRAINED client_kwargs["default_headers"] = headers self.api_key = client_kwargs.get("api_key", "") self.base_url = client_kwargs.get("base_url", self.base_url) try: self.client = self._create_openai_client(client_kwargs, reason="agent_init", shared=True) if not self.quiet_mode: print(f"🤖 AI Agent initialized with model: {self.model}") if base_url: print(f"🔗 Using custom base URL: {base_url}") # Always show API key info (masked) for debugging auth issues key_used = client_kwargs.get("api_key", "none") if key_used and key_used != "dummy-key" and len(key_used) > 12: print(f"🔑 Using API key: {key_used[:8]}...{key_used[-4:]}") else: print(f"⚠️ Warning: API key appears invalid or missing (got: '{key_used[:20] if key_used else 'none'}...')") except Exception as e: raise RuntimeError(f"Failed to initialize OpenAI client: {e}") # Provider fallback chain — ordered list of backup providers tried # when the primary is exhausted (rate-limit, overload, connection # failure). Supports both legacy single-dict ``fallback_model`` and # new list ``fallback_providers`` format. if isinstance(fallback_model, list): self._fallback_chain = [ f for f in fallback_model if isinstance(f, dict) and f.get("provider") and f.get("model") ] elif isinstance(fallback_model, dict) and fallback_model.get("provider") and fallback_model.get("model"): self._fallback_chain = [fallback_model] else: self._fallback_chain = [] self._fallback_index = 0 self._fallback_activated = False # Legacy attribute kept for backward compat (tests, external callers) self._fallback_model = self._fallback_chain[0] if self._fallback_chain else None if self._fallback_chain and not self.quiet_mode: if len(self._fallback_chain) == 1: fb = self._fallback_chain[0] print(f"🔄 Fallback model: {fb['model']} ({fb['provider']})") else: print(f"🔄 Fallback chain ({len(self._fallback_chain)} providers): " + " → ".join(f"{f['model']} ({f['provider']})" for f in self._fallback_chain)) # Get available tools with filtering self.tools = get_tool_definitions( enabled_toolsets=enabled_toolsets, disabled_toolsets=disabled_toolsets, quiet_mode=self.quiet_mode, ) # Show tool configuration and store valid tool names for validation self.valid_tool_names = set() if self.tools: self.valid_tool_names = {tool["function"]["name"] for tool in self.tools} tool_names = sorted(self.valid_tool_names) if not self.quiet_mode: print(f"🛠️ Loaded {len(self.tools)} tools: {', '.join(tool_names)}") # Show filtering info if applied if enabled_toolsets: print(f" ✅ Enabled toolsets: {', '.join(enabled_toolsets)}") if disabled_toolsets: print(f" ❌ Disabled toolsets: {', '.join(disabled_toolsets)}") elif not self.quiet_mode: print("🛠️ No tools loaded (all tools filtered out or unavailable)") # Check tool requirements if self.tools and not self.quiet_mode: requirements = check_toolset_requirements() missing_reqs = [name for name, available in requirements.items() if not available] if missing_reqs: print(f"⚠️ Some tools may not work due to missing requirements: {missing_reqs}") # Show trajectory saving status if self.save_trajectories and not self.quiet_mode: print("📝 Trajectory saving enabled") # Show ephemeral system prompt status if self.ephemeral_system_prompt and not self.quiet_mode: prompt_preview = self.ephemeral_system_prompt[:60] + "..." if len(self.ephemeral_system_prompt) > 60 else self.ephemeral_system_prompt print(f"🔒 Ephemeral system prompt: '{prompt_preview}' (not saved to trajectories)") # Show prompt caching status if self._use_prompt_caching and not self.quiet_mode: source = "native Anthropic" if is_native_anthropic else "Claude via OpenRouter" print(f"💾 Prompt caching: ENABLED ({source}, {self._cache_ttl} TTL)") # Session logging setup - auto-save conversation trajectories for debugging self.session_start = datetime.now() if session_id: # Use provided session ID (e.g., from CLI) self.session_id = session_id else: # Generate a new session ID timestamp_str = self.session_start.strftime("%Y%m%d_%H%M%S") short_uuid = uuid.uuid4().hex[:6] self.session_id = f"{timestamp_str}_{short_uuid}" # Session logs go into ~/.hermes/sessions/ alongside gateway sessions hermes_home = get_hermes_home() self.logs_dir = hermes_home / "sessions" self.logs_dir.mkdir(parents=True, exist_ok=True) self.session_log_file = self.logs_dir / f"session_{self.session_id}.json" # Track conversation messages for session logging self._session_messages: List[Dict[str, Any]] = [] # Cached system prompt -- built once per session, only rebuilt on compression self._cached_system_prompt: Optional[str] = None # Filesystem checkpoint manager (transparent — not a tool) from tools.checkpoint_manager import CheckpointManager self._checkpoint_mgr = CheckpointManager( enabled=checkpoints_enabled, max_snapshots=checkpoint_max_snapshots, ) # SQLite session store (optional -- provided by CLI or gateway) self._session_db = session_db self._parent_session_id = parent_session_id self._last_flushed_db_idx = 0 # tracks DB-write cursor to prevent duplicate writes if self._session_db: try: self._session_db.create_session( session_id=self.session_id, source=self.platform or os.environ.get("HERMES_SESSION_SOURCE", "cli"), model=self.model, model_config={ "max_iterations": self.max_iterations, "reasoning_config": reasoning_config, "max_tokens": max_tokens, }, user_id=None, parent_session_id=self._parent_session_id, ) except Exception as e: # Transient SQLite lock contention (e.g. CLI and gateway writing # concurrently) must NOT permanently disable session_search for # this agent. Keep _session_db alive — subsequent message # flushes and session_search calls will still work once the # lock clears. The session row may be missing from the index # for this run, but that is recoverable (flushes upsert rows). logger.warning( "Session DB create_session failed (session_search still available): %s", e ) # In-memory todo list for task planning (one per agent/session) from tools.todo_tool import TodoStore self._todo_store = TodoStore() # Load config once for memory, skills, and compression sections try: from hermes_cli.config import load_config as _load_agent_config _agent_cfg = _load_agent_config() except Exception: _agent_cfg = {} # Persistent memory (MEMORY.md + USER.md) -- loaded from disk self._memory_store = None self._memory_enabled = False self._user_profile_enabled = False self._memory_nudge_interval = 10 self._memory_flush_min_turns = 6 self._turns_since_memory = 0 self._iters_since_skill = 0 if not skip_memory: try: mem_config = _agent_cfg.get("memory", {}) self._memory_enabled = mem_config.get("memory_enabled", False) self._user_profile_enabled = mem_config.get("user_profile_enabled", False) self._memory_nudge_interval = int(mem_config.get("nudge_interval", 10)) self._memory_flush_min_turns = int(mem_config.get("flush_min_turns", 6)) if self._memory_enabled or self._user_profile_enabled: from tools.memory_tool import MemoryStore self._memory_store = MemoryStore( memory_char_limit=mem_config.get("memory_char_limit", 2200), user_char_limit=mem_config.get("user_char_limit", 1375), ) self._memory_store.load_from_disk() except Exception: pass # Memory is optional -- don't break agent init # Memory provider plugin (external — one at a time, alongside built-in) # Reads memory.provider from config to select which plugin to activate. self._memory_manager = None if not skip_memory: try: _mem_provider_name = mem_config.get("provider", "") if mem_config else "" # Auto-migrate: if Honcho was actively configured (enabled + # credentials) but memory.provider is not set, activate the # honcho plugin automatically. Just having the config file # is not enough — the user may have disabled Honcho or the # file may be from a different tool. if not _mem_provider_name: try: from plugins.memory.honcho.client import HonchoClientConfig as _HCC _hcfg = _HCC.from_global_config() if _hcfg.enabled and (_hcfg.api_key or _hcfg.base_url): _mem_provider_name = "honcho" # Persist so this only auto-migrates once try: from hermes_cli.config import load_config as _lc, save_config as _sc _cfg = _lc() _cfg.setdefault("memory", {})["provider"] = "honcho" _sc(_cfg) except Exception: pass if not self.quiet_mode: print(" ✓ Auto-migrated Honcho to memory provider plugin.") print(" Your config and data are preserved.\n") except Exception: pass if _mem_provider_name: from agent.memory_manager import MemoryManager as _MemoryManager from plugins.memory import load_memory_provider as _load_mem self._memory_manager = _MemoryManager() _mp = _load_mem(_mem_provider_name) if _mp and _mp.is_available(): self._memory_manager.add_provider(_mp) if self._memory_manager.providers: from hermes_constants import get_hermes_home as _ghh _init_kwargs = { "session_id": self.session_id, "platform": platform or "cli", "hermes_home": str(_ghh()), "agent_context": "primary", } # Thread session title for memory provider scoping # (e.g. honcho uses this to derive chat-scoped session keys) if self._session_db: try: _st = self._session_db.get_session_title(self.session_id) if _st: _init_kwargs["session_title"] = _st except Exception: pass # Thread gateway user identity for per-user memory scoping if self._user_id: _init_kwargs["user_id"] = self._user_id # Thread gateway session key for stable per-chat Honcho session isolation if self._gateway_session_key: _init_kwargs["gateway_session_key"] = self._gateway_session_key # Profile identity for per-profile provider scoping try: from hermes_cli.profiles import get_active_profile_name _profile = get_active_profile_name() _init_kwargs["agent_identity"] = _profile _init_kwargs["agent_workspace"] = "hermes" except Exception: pass self._memory_manager.initialize_all(**_init_kwargs) logger.info("Memory provider '%s' activated", _mem_provider_name) else: logger.debug("Memory provider '%s' not found or not available", _mem_provider_name) self._memory_manager = None except Exception as _mpe: logger.warning("Memory provider plugin init failed: %s", _mpe) self._memory_manager = None # Inject memory provider tool schemas into the tool surface. # Skip tools whose names already exist (plugins may register the # same tools via ctx.register_tool(), which lands in self.tools # through get_tool_definitions()). Duplicate function names cause # 400 errors on providers that enforce unique names (e.g. Xiaomi # MiMo via Nous Portal). if self._memory_manager and self.tools is not None: _existing_tool_names = { t.get("function", {}).get("name") for t in self.tools if isinstance(t, dict) } for _schema in self._memory_manager.get_all_tool_schemas(): _tname = _schema.get("name", "") if _tname and _tname in _existing_tool_names: continue # already registered via plugin path _wrapped = {"type": "function", "function": _schema} self.tools.append(_wrapped) if _tname: self.valid_tool_names.add(_tname) _existing_tool_names.add(_tname) # Skills config: nudge interval for skill creation reminders self._skill_nudge_interval = 10 try: skills_config = _agent_cfg.get("skills", {}) self._skill_nudge_interval = int(skills_config.get("creation_nudge_interval", 10)) except Exception: pass # Tool-use enforcement config: "auto" (default — matches hardcoded # model list), true (always), false (never), or list of substrings. _agent_section = _agent_cfg.get("agent", {}) if not isinstance(_agent_section, dict): _agent_section = {} self._tool_use_enforcement = _agent_section.get("tool_use_enforcement", "auto") # Initialize context compressor for automatic context management # Compresses conversation when approaching model's context limit # Configuration via config.yaml (compression section) _compression_cfg = _agent_cfg.get("compression", {}) if not isinstance(_compression_cfg, dict): _compression_cfg = {} compression_threshold = float(_compression_cfg.get("threshold", 0.50)) compression_enabled = str(_compression_cfg.get("enabled", True)).lower() in ("true", "1", "yes") compression_target_ratio = float(_compression_cfg.get("target_ratio", 0.20)) compression_protect_last = int(_compression_cfg.get("protect_last_n", 20)) # Read explicit context_length override from model config _model_cfg = _agent_cfg.get("model", {}) if isinstance(_model_cfg, dict): _config_context_length = _model_cfg.get("context_length") else: _config_context_length = None if _config_context_length is not None: try: _config_context_length = int(_config_context_length) except (TypeError, ValueError): logger.warning( "Invalid model.context_length in config.yaml: %r — " "must be a plain integer (e.g. 256000, not '256K'). " "Falling back to auto-detection.", _config_context_length, ) import sys print( f"\n⚠ Invalid model.context_length in config.yaml: {_config_context_length!r}\n" f" Must be a plain integer (e.g. 256000, not '256K').\n" f" Falling back to auto-detected context window.\n", file=sys.stderr, ) _config_context_length = None # Store for reuse in switch_model (so config override persists across model switches) self._config_context_length = _config_context_length # Check custom_providers per-model context_length if _config_context_length is None: try: from hermes_cli.config import get_compatible_custom_providers _custom_providers = get_compatible_custom_providers(_agent_cfg) except Exception: _custom_providers = _agent_cfg.get("custom_providers") if not isinstance(_custom_providers, list): _custom_providers = [] for _cp_entry in _custom_providers: if not isinstance(_cp_entry, dict): continue _cp_url = (_cp_entry.get("base_url") or "").rstrip("/") if _cp_url and _cp_url == self.base_url.rstrip("/"): _cp_models = _cp_entry.get("models", {}) if isinstance(_cp_models, dict): _cp_model_cfg = _cp_models.get(self.model, {}) if isinstance(_cp_model_cfg, dict): _cp_ctx = _cp_model_cfg.get("context_length") if _cp_ctx is not None: try: _config_context_length = int(_cp_ctx) except (TypeError, ValueError): logger.warning( "Invalid context_length for model %r in " "custom_providers: %r — must be a plain " "integer (e.g. 256000, not '256K'). " "Falling back to auto-detection.", self.model, _cp_ctx, ) import sys print( f"\n⚠ Invalid context_length for model {self.model!r} in custom_providers: {_cp_ctx!r}\n" f" Must be a plain integer (e.g. 256000, not '256K').\n" f" Falling back to auto-detected context window.\n", file=sys.stderr, ) break # Select context engine: config-driven (like memory providers). # 1. Check config.yaml context.engine setting # 2. Check plugins/context_engine// directory (repo-shipped) # 3. Check general plugin system (user-installed plugins) # 4. Fall back to built-in ContextCompressor _selected_engine = None _engine_name = "compressor" # default try: _ctx_cfg = _agent_cfg.get("context", {}) if isinstance(_agent_cfg, dict) else {} _engine_name = _ctx_cfg.get("engine", "compressor") or "compressor" except Exception: pass if _engine_name != "compressor": # Try loading from plugins/context_engine// try: from plugins.context_engine import load_context_engine _selected_engine = load_context_engine(_engine_name) except Exception as _ce_load_err: logger.debug("Context engine load from plugins/context_engine/: %s", _ce_load_err) # Try general plugin system as fallback if _selected_engine is None: try: from hermes_cli.plugins import get_plugin_context_engine _candidate = get_plugin_context_engine() if _candidate and _candidate.name == _engine_name: _selected_engine = _candidate except Exception: pass if _selected_engine is None: logger.warning( "Context engine '%s' not found — falling back to built-in compressor", _engine_name, ) # else: config says "compressor" — use built-in, don't auto-activate plugins if _selected_engine is not None: self.context_compressor = _selected_engine # Resolve context_length for plugin engines — mirrors switch_model() path from agent.model_metadata import get_model_context_length _plugin_ctx_len = get_model_context_length( self.model, base_url=self.base_url, api_key=getattr(self, "api_key", ""), config_context_length=_config_context_length, provider=self.provider, ) self.context_compressor.update_model( model=self.model, context_length=_plugin_ctx_len, base_url=self.base_url, api_key=getattr(self, "api_key", ""), provider=self.provider, ) if not self.quiet_mode: logger.info("Using context engine: %s", _selected_engine.name) else: self.context_compressor = ContextCompressor( model=self.model, threshold_percent=compression_threshold, protect_first_n=3, protect_last_n=compression_protect_last, summary_target_ratio=compression_target_ratio, summary_model_override=None, quiet_mode=self.quiet_mode, base_url=self.base_url, api_key=getattr(self, "api_key", ""), config_context_length=_config_context_length, provider=self.provider, api_mode=self.api_mode, ) self.compression_enabled = compression_enabled # Reject models whose context window is below the minimum required # for reliable tool-calling workflows (64K tokens). from agent.model_metadata import MINIMUM_CONTEXT_LENGTH _ctx = getattr(self.context_compressor, "context_length", 0) if _ctx and _ctx < MINIMUM_CONTEXT_LENGTH: raise ValueError( f"Model {self.model} has a context window of {_ctx:,} tokens, " f"which is below the minimum {MINIMUM_CONTEXT_LENGTH:,} required " f"by Hermes Agent. Choose a model with at least " f"{MINIMUM_CONTEXT_LENGTH // 1000}K context, or set " f"model.context_length in config.yaml to override." ) # Inject context engine tool schemas (e.g. lcm_grep, lcm_describe, lcm_expand) self._context_engine_tool_names: set = set() if hasattr(self, "context_compressor") and self.context_compressor and self.tools is not None: for _schema in self.context_compressor.get_tool_schemas(): _wrapped = {"type": "function", "function": _schema} self.tools.append(_wrapped) _tname = _schema.get("name", "") if _tname: self.valid_tool_names.add(_tname) self._context_engine_tool_names.add(_tname) # Notify context engine of session start if hasattr(self, "context_compressor") and self.context_compressor: try: self.context_compressor.on_session_start( self.session_id, hermes_home=str(get_hermes_home()), platform=self.platform or "cli", model=self.model, context_length=getattr(self.context_compressor, "context_length", 0), ) except Exception as _ce_err: logger.debug("Context engine on_session_start: %s", _ce_err) self._subdirectory_hints = SubdirectoryHintTracker( working_dir=os.getenv("TERMINAL_CWD") or None, ) self._user_turn_count = 0 # Cumulative token usage for the session self.session_prompt_tokens = 0 self.session_completion_tokens = 0 self.session_total_tokens = 0 self.session_api_calls = 0 self.session_input_tokens = 0 self.session_output_tokens = 0 self.session_cache_read_tokens = 0 self.session_cache_write_tokens = 0 self.session_reasoning_tokens = 0 self.session_estimated_cost_usd = 0.0 self.session_cost_status = "unknown" self.session_cost_source = "none" # ── Ollama num_ctx injection ── # Ollama defaults to 2048 context regardless of the model's capabilities. # When running against an Ollama server, detect the model's max context # and pass num_ctx on every chat request so the full window is used. # User override: set model.ollama_num_ctx in config.yaml to cap VRAM use. self._ollama_num_ctx: int | None = None _ollama_num_ctx_override = None if isinstance(_model_cfg, dict): _ollama_num_ctx_override = _model_cfg.get("ollama_num_ctx") if _ollama_num_ctx_override is not None: try: self._ollama_num_ctx = int(_ollama_num_ctx_override) except (TypeError, ValueError): logger.debug("Invalid ollama_num_ctx config value: %r", _ollama_num_ctx_override) if self._ollama_num_ctx is None and self.base_url and is_local_endpoint(self.base_url): try: _detected = query_ollama_num_ctx(self.model, self.base_url) if _detected and _detected > 0: self._ollama_num_ctx = _detected except Exception as exc: logger.debug("Ollama num_ctx detection failed: %s", exc) if self._ollama_num_ctx and not self.quiet_mode: logger.info( "Ollama num_ctx: will request %d tokens (model max from /api/show)", self._ollama_num_ctx, ) if not self.quiet_mode: if compression_enabled: print(f"📊 Context limit: {self.context_compressor.context_length:,} tokens (compress at {int(compression_threshold*100)}% = {self.context_compressor.threshold_tokens:,})") else: print(f"📊 Context limit: {self.context_compressor.context_length:,} tokens (auto-compression disabled)") # Check immediately so CLI users see the warning at startup. # Gateway status_callback is not yet wired, so any warning is stored # in _compression_warning and replayed in the first run_conversation(). self._compression_warning = None self._check_compression_model_feasibility() # Snapshot primary runtime for per-turn restoration. When fallback # activates during a turn, the next turn restores these values so the # preferred model gets a fresh attempt each time. Uses a single dict # so new state fields are easy to add without N individual attributes. _cc = self.context_compressor self._primary_runtime = { "model": self.model, "provider": self.provider, "base_url": self.base_url, "api_mode": self.api_mode, "api_key": getattr(self, "api_key", ""), "client_kwargs": dict(self._client_kwargs), "use_prompt_caching": self._use_prompt_caching, # Context engine state that _try_activate_fallback() overwrites. # Use getattr for model/base_url/api_key/provider since plugin # engines may not have these (they're ContextCompressor-specific). "compressor_model": getattr(_cc, "model", self.model), "compressor_base_url": getattr(_cc, "base_url", self.base_url), "compressor_api_key": getattr(_cc, "api_key", ""), "compressor_provider": getattr(_cc, "provider", self.provider), "compressor_context_length": _cc.context_length, "compressor_threshold_tokens": _cc.threshold_tokens, } if self.api_mode == "anthropic_messages": self._primary_runtime.update({ "anthropic_api_key": self._anthropic_api_key, "anthropic_base_url": self._anthropic_base_url, "is_anthropic_oauth": self._is_anthropic_oauth, }) def reset_session_state(self): """Reset all session-scoped token counters to 0 for a fresh session. This method encapsulates the reset logic for all session-level metrics including: - Token usage counters (input, output, total, prompt, completion) - Cache read/write tokens - API call count - Reasoning tokens - Estimated cost tracking - Context compressor internal counters The method safely handles optional attributes (e.g., context compressor) using ``hasattr`` checks. This keeps the counter reset logic DRY and maintainable in one place rather than scattering it across multiple methods. """ # Token usage counters self.session_total_tokens = 0 self.session_input_tokens = 0 self.session_output_tokens = 0 self.session_prompt_tokens = 0 self.session_completion_tokens = 0 self.session_cache_read_tokens = 0 self.session_cache_write_tokens = 0 self.session_reasoning_tokens = 0 self.session_api_calls = 0 self.session_estimated_cost_usd = 0.0 self.session_cost_status = "unknown" self.session_cost_source = "none" # Turn counter (added after reset_session_state was first written — #2635) self._user_turn_count = 0 # Context engine reset (works for both built-in compressor and plugins) if hasattr(self, "context_compressor") and self.context_compressor: self.context_compressor.on_session_reset() def switch_model(self, new_model, new_provider, api_key='', base_url='', api_mode=''): """Switch the model/provider in-place for a live agent. Called by the /model command handlers (CLI and gateway) after ``model_switch.switch_model()`` has resolved credentials and validated the model. This method performs the actual runtime swap: rebuilding clients, updating caching flags, and refreshing the context compressor. The implementation mirrors ``_try_activate_fallback()`` for the client-swap logic but also updates ``_primary_runtime`` so the change persists across turns (unlike fallback which is turn-scoped). """ import logging from hermes_cli.providers import determine_api_mode # ── Determine api_mode if not provided ── if not api_mode: api_mode = determine_api_mode(new_provider, base_url) old_model = self.model old_provider = self.provider # ── Swap core runtime fields ── self.model = new_model self.provider = new_provider self.base_url = base_url or self.base_url self.api_mode = api_mode if api_key: self.api_key = api_key # ── Build new client ── if api_mode == "anthropic_messages": from agent.anthropic_adapter import ( build_anthropic_client, resolve_anthropic_token, _is_oauth_token, ) # Only fall back to ANTHROPIC_TOKEN when the provider is actually Anthropic. # Other anthropic_messages providers (MiniMax, Alibaba, etc.) must use their own # API key — falling back would send Anthropic credentials to third-party endpoints. _is_native_anthropic = new_provider == "anthropic" effective_key = (api_key or self.api_key or resolve_anthropic_token() or "") if _is_native_anthropic else (api_key or self.api_key or "") self.api_key = effective_key self._anthropic_api_key = effective_key self._anthropic_base_url = base_url or getattr(self, "_anthropic_base_url", None) self._anthropic_client = build_anthropic_client( effective_key, self._anthropic_base_url, ) self._is_anthropic_oauth = _is_oauth_token(effective_key) self.client = None self._client_kwargs = {} else: effective_key = api_key or self.api_key effective_base = base_url or self.base_url self._client_kwargs = { "api_key": effective_key, "base_url": effective_base, } self.client = self._create_openai_client( dict(self._client_kwargs), reason="switch_model", shared=True, ) # ── Re-evaluate prompt caching ── is_native_anthropic = api_mode == "anthropic_messages" and new_provider == "anthropic" self._use_prompt_caching = ( ("openrouter" in (self.base_url or "").lower() and "claude" in new_model.lower()) or is_native_anthropic ) # ── Update context compressor ── if hasattr(self, "context_compressor") and self.context_compressor: from agent.model_metadata import get_model_context_length new_context_length = get_model_context_length( self.model, base_url=self.base_url, api_key=self.api_key, provider=self.provider, config_context_length=getattr(self, "_config_context_length", None), ) self.context_compressor.update_model( model=self.model, context_length=new_context_length, base_url=self.base_url, api_key=getattr(self, "api_key", ""), provider=self.provider, api_mode=self.api_mode, ) # ── Invalidate cached system prompt so it rebuilds next turn ── self._cached_system_prompt = None # ── Update _primary_runtime so the change persists across turns ── _cc = self.context_compressor if hasattr(self, "context_compressor") and self.context_compressor else None self._primary_runtime = { "model": self.model, "provider": self.provider, "base_url": self.base_url, "api_mode": self.api_mode, "api_key": getattr(self, "api_key", ""), "client_kwargs": dict(self._client_kwargs), "use_prompt_caching": self._use_prompt_caching, "compressor_model": getattr(_cc, "model", self.model) if _cc else self.model, "compressor_base_url": getattr(_cc, "base_url", self.base_url) if _cc else self.base_url, "compressor_api_key": getattr(_cc, "api_key", "") if _cc else "", "compressor_provider": getattr(_cc, "provider", self.provider) if _cc else self.provider, "compressor_context_length": _cc.context_length if _cc else 0, "compressor_threshold_tokens": _cc.threshold_tokens if _cc else 0, } if api_mode == "anthropic_messages": self._primary_runtime.update({ "anthropic_api_key": self._anthropic_api_key, "anthropic_base_url": self._anthropic_base_url, "is_anthropic_oauth": self._is_anthropic_oauth, }) # ── Reset fallback state ── self._fallback_activated = False self._fallback_index = 0 logging.info( "Model switched in-place: %s (%s) -> %s (%s)", old_model, old_provider, new_model, new_provider, ) def _safe_print(self, *args, **kwargs): """Print that silently handles broken pipes / closed stdout. In headless environments (systemd, Docker, nohup) stdout may become unavailable mid-session. A raw ``print()`` raises ``OSError`` which can crash cron jobs and lose completed work. Internally routes through ``self._print_fn`` (default: builtin ``print``) so callers such as the CLI can inject a renderer that handles ANSI escape sequences properly (e.g. prompt_toolkit's ``print_formatted_text(ANSI(...))``) without touching this method. """ try: fn = self._print_fn or print fn(*args, **kwargs) except (OSError, ValueError): pass def _vprint(self, *args, force: bool = False, **kwargs): """Verbose print — suppressed when actively streaming tokens. Pass ``force=True`` for error/warning messages that should always be shown even during streaming playback (TTS or display). During tool execution (``_executing_tools`` is True), printing is allowed even with stream consumers registered because no tokens are being streamed at that point. After the main response has been delivered and the remaining tool calls are post-response housekeeping (``_mute_post_response``), all non-forced output is suppressed. ``suppress_status_output`` is a stricter CLI automation mode used by parseable single-query flows such as ``hermes chat -q``. In that mode, all status/diagnostic prints routed through ``_vprint`` are suppressed so stdout stays machine-readable. """ if getattr(self, "suppress_status_output", False): return if not force and getattr(self, "_mute_post_response", False): return if not force and self._has_stream_consumers() and not self._executing_tools: return self._safe_print(*args, **kwargs) def _should_start_quiet_spinner(self) -> bool: """Return True when quiet-mode spinner output has a safe sink. In headless/stdio-protocol environments, a raw spinner with no custom ``_print_fn`` falls back to ``sys.stdout`` and can corrupt protocol streams such as ACP JSON-RPC. Allow quiet spinners only when either: - output is explicitly rerouted via ``_print_fn``; or - stdout is a real TTY. """ if self._print_fn is not None: return True stream = getattr(sys, "stdout", None) if stream is None: return False try: return bool(stream.isatty()) except (AttributeError, ValueError, OSError): return False def _should_emit_quiet_tool_messages(self) -> bool: """Return True when quiet-mode tool summaries should print directly. When the caller provides ``tool_progress_callback`` (for example the CLI TUI or a gateway progress renderer), that callback owns progress display. Emitting quiet-mode summary lines here duplicates progress and leaks tool previews into flows that are expected to stay silent, such as ``hermes chat -q``. """ return self.quiet_mode and not self.tool_progress_callback def _emit_status(self, message: str) -> None: """Emit a lifecycle status message to both CLI and gateway channels. CLI users see the message via ``_vprint(force=True)`` so it is always visible regardless of verbose/quiet mode. Gateway consumers receive it through ``status_callback("lifecycle", ...)``. This helper never raises — exceptions are swallowed so it cannot interrupt the retry/fallback logic. """ try: self._vprint(f"{self.log_prefix}{message}", force=True) except Exception: pass if self.status_callback: try: self.status_callback("lifecycle", message) except Exception: logger.debug("status_callback error in _emit_status", exc_info=True) def _current_main_runtime(self) -> Dict[str, str]: """Return the live main runtime for session-scoped auxiliary routing.""" return { "model": getattr(self, "model", "") or "", "provider": getattr(self, "provider", "") or "", "base_url": getattr(self, "base_url", "") or "", "api_key": getattr(self, "api_key", "") or "", "api_mode": getattr(self, "api_mode", "") or "", } def _check_compression_model_feasibility(self) -> None: """Warn at session start if the auxiliary compression model's context window is smaller than the main model's compression threshold. When the auxiliary model cannot fit the content that needs summarising, compression will either fail outright (the LLM call errors) or produce a severely truncated summary. Called during ``__init__`` so CLI users see the warning immediately (via ``_vprint``). The gateway sets ``status_callback`` *after* construction, so ``_replay_compression_warning()`` re-sends the stored warning through the callback on the first ``run_conversation()`` call. """ if not self.compression_enabled: return try: from agent.auxiliary_client import get_text_auxiliary_client from agent.model_metadata import get_model_context_length client, aux_model = get_text_auxiliary_client( "compression", main_runtime=self._current_main_runtime(), ) if client is None or not aux_model: msg = ( "⚠ No auxiliary LLM provider configured — context " "compression will drop middle turns without a summary. " "Run `hermes setup` or set OPENROUTER_API_KEY." ) self._compression_warning = msg self._emit_status(msg) logger.warning( "No auxiliary LLM provider for compression — " "summaries will be unavailable." ) return aux_base_url = str(getattr(client, "base_url", "")) aux_api_key = str(getattr(client, "api_key", "")) # Read user-configured context_length for the compression model. # Custom endpoints often don't support /models API queries so # get_model_context_length() falls through to the 128K default, # ignoring the explicit config value. Pass it as the highest- # priority hint so the configured value is always respected. _aux_cfg = (self.config or {}).get("auxiliary", {}).get("compression", {}) _aux_context_config = _aux_cfg.get("context_length") if isinstance(_aux_cfg, dict) else None if _aux_context_config is not None: try: _aux_context_config = int(_aux_context_config) except (TypeError, ValueError): _aux_context_config = None aux_context = get_model_context_length( aux_model, base_url=aux_base_url, api_key=aux_api_key, config_context_length=_aux_context_config, ) threshold = self.context_compressor.threshold_tokens if aux_context < threshold: # Suggest a threshold that would fit the aux model, # rounded down to a clean percentage. safe_pct = int((aux_context / self.context_compressor.context_length) * 100) msg = ( f"⚠ Compression model ({aux_model}) context " f"is {aux_context:,} tokens, but the main model's " f"compression threshold is {threshold:,} tokens. " f"Context compression will not be possible — the " f"content to summarise will exceed the auxiliary " f"model's context window.\n" f" Fix options (config.yaml):\n" f" 1. Use a larger compression model:\n" f" auxiliary:\n" f" compression:\n" f" model: \n" f" 2. Lower the compression threshold to fit " f"the current model:\n" f" compression:\n" f" threshold: 0.{safe_pct:02d}" ) self._compression_warning = msg self._emit_status(msg) logger.warning( "Auxiliary compression model %s has %d token context, " "below the main model's compression threshold of %d " "tokens — compression summaries will fail or be " "severely truncated.", aux_model, aux_context, threshold, ) except Exception as exc: logger.debug( "Compression feasibility check failed (non-fatal): %s", exc ) def _replay_compression_warning(self) -> None: """Re-send the compression warning through ``status_callback``. During ``__init__`` the gateway's ``status_callback`` is not yet wired, so ``_emit_status`` only reaches ``_vprint`` (CLI). This method is called once at the start of the first ``run_conversation()`` — by then the gateway has set the callback, so every platform (Telegram, Discord, Slack, etc.) receives the warning. """ msg = getattr(self, "_compression_warning", None) if msg and self.status_callback: try: self.status_callback("lifecycle", msg) except Exception: pass def _is_direct_openai_url(self, base_url: str = None) -> bool: """Return True when a base URL targets OpenAI's native API.""" url = (base_url or self._base_url_lower).lower() return "api.openai.com" in url and "openrouter" not in url def _is_openrouter_url(self) -> bool: """Return True when the base URL targets OpenRouter.""" return "openrouter" in self._base_url_lower @staticmethod def _model_requires_responses_api(model: str) -> bool: """Return True for models that require the Responses API path. GPT-5.x models are rejected on /v1/chat/completions by both OpenAI and OpenRouter (error: ``unsupported_api_for_model``). Detect these so the correct api_mode is set regardless of which provider is serving the model. """ m = model.lower() # Strip vendor prefix (e.g. "openai/gpt-5.4" → "gpt-5.4") if "/" in m: m = m.rsplit("/", 1)[-1] return m.startswith("gpt-5") @staticmethod def _provider_model_requires_responses_api( model: str, *, provider: Optional[str] = None, ) -> bool: """Return True when this provider/model pair should use Responses API.""" normalized_provider = (provider or "").strip().lower() if normalized_provider == "copilot": try: from hermes_cli.models import _should_use_copilot_responses_api return _should_use_copilot_responses_api(model) except Exception: # Fall back to the generic GPT-5 rule if Copilot-specific # logic is unavailable for any reason. pass return AIAgent._model_requires_responses_api(model) def _max_tokens_param(self, value: int) -> dict: """Return the correct max tokens kwarg for the current provider. OpenAI's newer models (gpt-4o, o-series, gpt-5+) require 'max_completion_tokens'. OpenRouter, local models, and older OpenAI models use 'max_tokens'. """ if self._is_direct_openai_url(): return {"max_completion_tokens": value} return {"max_tokens": value} def _has_content_after_think_block(self, content: str) -> bool: """ Check if content has actual text after any reasoning/thinking blocks. This detects cases where the model only outputs reasoning but no actual response, which indicates an incomplete generation that should be retried. Must stay in sync with _strip_think_blocks() tag variants. Args: content: The assistant message content to check Returns: True if there's meaningful content after think blocks, False otherwise """ if not content: return False # Remove all reasoning tag variants (must match _strip_think_blocks) cleaned = self._strip_think_blocks(content) # Check if there's any non-whitespace content remaining return bool(cleaned.strip()) def _strip_think_blocks(self, content: str) -> str: """Remove reasoning/thinking blocks from content, returning only visible text.""" if not content: return "" # Strip all reasoning tag variants: , , , # , , (Gemma 4) content = re.sub(r'.*?', '', content, flags=re.DOTALL) content = re.sub(r'.*?', '', content, flags=re.DOTALL | re.IGNORECASE) content = re.sub(r'.*?', '', content, flags=re.DOTALL) content = re.sub(r'.*?', '', content, flags=re.DOTALL) content = re.sub(r'.*?', '', content, flags=re.DOTALL | re.IGNORECASE) content = re.sub(r'\s*', '', content, flags=re.IGNORECASE) return content @staticmethod def _has_natural_response_ending(content: str) -> bool: """Heuristic: does visible assistant text look intentionally finished?""" if not content: return False stripped = content.rstrip() if not stripped: return False if stripped.endswith("```"): return True return stripped[-1] in '.!?:)"\']}。!?:)】」』》' def _is_ollama_glm_backend(self) -> bool: """Detect the narrow backend family affected by Ollama/GLM stop misreports.""" model_lower = (self.model or "").lower() provider_lower = (self.provider or "").lower() if "glm" not in model_lower and provider_lower != "zai": return False if "ollama" in self._base_url_lower or ":11434" in self._base_url_lower: return True return bool(self.base_url and is_local_endpoint(self.base_url)) def _should_treat_stop_as_truncated( self, finish_reason: str, assistant_message, messages: Optional[list] = None, ) -> bool: """Detect conservative stop->length misreports for Ollama-hosted GLM models.""" if finish_reason != "stop" or self.api_mode != "chat_completions": return False if not self._is_ollama_glm_backend(): return False if not any( isinstance(msg, dict) and msg.get("role") == "tool" for msg in (messages or []) ): return False if assistant_message is None or getattr(assistant_message, "tool_calls", None): return False content = getattr(assistant_message, "content", None) if not isinstance(content, str): return False visible_text = self._strip_think_blocks(content).strip() if not visible_text: return False if len(visible_text) < 20 or not re.search(r"\s", visible_text): return False return not self._has_natural_response_ending(visible_text) def _looks_like_codex_intermediate_ack( self, user_message: str, assistant_content: str, messages: List[Dict[str, Any]], ) -> bool: """Detect a planning/ack message that should continue instead of ending the turn.""" if any(isinstance(msg, dict) and msg.get("role") == "tool" for msg in messages): return False assistant_text = self._strip_think_blocks(assistant_content or "").strip().lower() if not assistant_text: return False if len(assistant_text) > 1200: return False has_future_ack = bool( re.search(r"\b(i['’]ll|i will|let me|i can do that|i can help with that)\b", assistant_text) ) if not has_future_ack: return False action_markers = ( "look into", "look at", "inspect", "scan", "check", "analyz", "review", "explore", "read", "open", "run", "test", "fix", "debug", "search", "find", "walkthrough", "report back", "summarize", ) workspace_markers = ( "directory", "current directory", "current dir", "cwd", "repo", "repository", "codebase", "project", "folder", "filesystem", "file tree", "files", "path", ) user_text = (user_message or "").strip().lower() user_targets_workspace = ( any(marker in user_text for marker in workspace_markers) or "~/" in user_text or "/" in user_text ) assistant_mentions_action = any(marker in assistant_text for marker in action_markers) assistant_targets_workspace = any( marker in assistant_text for marker in workspace_markers ) return (user_targets_workspace or assistant_targets_workspace) and assistant_mentions_action def _extract_reasoning(self, assistant_message) -> Optional[str]: """ Extract reasoning/thinking content from an assistant message. OpenRouter and various providers can return reasoning in multiple formats: 1. message.reasoning - Direct reasoning field (DeepSeek, Qwen, etc.) 2. message.reasoning_content - Alternative field (Moonshot AI, Novita, etc.) 3. message.reasoning_details - Array of {type, summary, ...} objects (OpenRouter unified) Args: assistant_message: The assistant message object from the API response Returns: Combined reasoning text, or None if no reasoning found """ reasoning_parts = [] # Check direct reasoning field if hasattr(assistant_message, 'reasoning') and assistant_message.reasoning: reasoning_parts.append(assistant_message.reasoning) # Check reasoning_content field (alternative name used by some providers) if hasattr(assistant_message, 'reasoning_content') and assistant_message.reasoning_content: # Don't duplicate if same as reasoning if assistant_message.reasoning_content not in reasoning_parts: reasoning_parts.append(assistant_message.reasoning_content) # Check reasoning_details array (OpenRouter unified format) # Format: [{"type": "reasoning.summary", "summary": "...", ...}, ...] if hasattr(assistant_message, 'reasoning_details') and assistant_message.reasoning_details: for detail in assistant_message.reasoning_details: if isinstance(detail, dict): # Extract summary from reasoning detail object summary = ( detail.get('summary') or detail.get('thinking') or detail.get('content') or detail.get('text') ) if summary and summary not in reasoning_parts: reasoning_parts.append(summary) # Some providers embed reasoning directly inside assistant content # instead of returning structured reasoning fields. Only fall back # to inline extraction when no structured reasoning was found. content = getattr(assistant_message, "content", None) if not reasoning_parts and isinstance(content, str) and content: inline_patterns = ( r"(.*?)", r"(.*?)", r"(.*?)", r"(.*?)", r"(.*?)", ) for pattern in inline_patterns: flags = re.DOTALL | re.IGNORECASE for block in re.findall(pattern, content, flags=flags): cleaned = block.strip() if cleaned and cleaned not in reasoning_parts: reasoning_parts.append(cleaned) # Combine all reasoning parts if reasoning_parts: return "\n\n".join(reasoning_parts) return None def _cleanup_task_resources(self, task_id: str) -> None: """Clean up VM and browser resources for a given task. Skips ``cleanup_vm`` when the active terminal environment is marked persistent (``persistent_filesystem=True``) so that long-lived sandbox containers survive between turns. The idle reaper in ``terminal_tool._cleanup_inactive_envs`` still tears them down once ``terminal.lifetime_seconds`` is exceeded. Non-persistent backends are torn down per-turn as before to prevent resource leakage (the original intent of this hook for the Morph backend, see commit fbd3a2fd). """ try: if is_persistent_env(task_id): if self.verbose_logging: logging.debug( f"Skipping per-turn cleanup_vm for persistent env {task_id}; " f"idle reaper will handle it." ) else: cleanup_vm(task_id) except Exception as e: if self.verbose_logging: logging.warning(f"Failed to cleanup VM for task {task_id}: {e}") try: cleanup_browser(task_id) except Exception as e: if self.verbose_logging: logging.warning(f"Failed to cleanup browser for task {task_id}: {e}") # ------------------------------------------------------------------ # Background memory/skill review # ------------------------------------------------------------------ _MEMORY_REVIEW_PROMPT = ( "Review the conversation above and consider saving to memory if appropriate.\n\n" "Focus on:\n" "1. Has the user revealed things about themselves — their persona, desires, " "preferences, or personal details worth remembering?\n" "2. Has the user expressed expectations about how you should behave, their work " "style, or ways they want you to operate?\n\n" "If something stands out, save it using the memory tool. " "If nothing is worth saving, just say 'Nothing to save.' and stop." ) _SKILL_REVIEW_PROMPT = ( "Review the conversation above and consider saving or updating a skill if appropriate.\n\n" "Focus on: was a non-trivial approach used to complete a task that required trial " "and error, or changing course due to experiential findings along the way, or did " "the user expect or desire a different method or outcome?\n\n" "If a relevant skill already exists, update it with what you learned. " "Otherwise, create a new skill if the approach is reusable.\n" "If nothing is worth saving, just say 'Nothing to save.' and stop." ) _COMBINED_REVIEW_PROMPT = ( "Review the conversation above and consider two things:\n\n" "**Memory**: Has the user revealed things about themselves — their persona, " "desires, preferences, or personal details? Has the user expressed expectations " "about how you should behave, their work style, or ways they want you to operate? " "If so, save using the memory tool.\n\n" "**Skills**: Was a non-trivial approach used to complete a task that required trial " "and error, or changing course due to experiential findings along the way, or did " "the user expect or desire a different method or outcome? If a relevant skill " "already exists, update it. Otherwise, create a new one if the approach is reusable.\n\n" "Only act if there's something genuinely worth saving. " "If nothing stands out, just say 'Nothing to save.' and stop." ) def _spawn_background_review( self, messages_snapshot: List[Dict], review_memory: bool = False, review_skills: bool = False, ) -> None: """Spawn a background thread to review the conversation for memory/skill saves. Creates a full AIAgent fork with the same model, tools, and context as the main session. The review prompt is appended as the next user turn in the forked conversation. Writes directly to the shared memory/skill stores. Never modifies the main conversation history or produces user-visible output. """ import threading # Pick the right prompt based on which triggers fired if review_memory and review_skills: prompt = self._COMBINED_REVIEW_PROMPT elif review_memory: prompt = self._MEMORY_REVIEW_PROMPT else: prompt = self._SKILL_REVIEW_PROMPT def _run_review(): import contextlib, os as _os review_agent = None try: with open(_os.devnull, "w") as _devnull, \ contextlib.redirect_stdout(_devnull), \ contextlib.redirect_stderr(_devnull): review_agent = AIAgent( model=self.model, max_iterations=8, quiet_mode=True, platform=self.platform, provider=self.provider, ) review_agent._memory_store = self._memory_store review_agent._memory_enabled = self._memory_enabled review_agent._user_profile_enabled = self._user_profile_enabled review_agent._memory_nudge_interval = 0 review_agent._skill_nudge_interval = 0 review_agent.run_conversation( user_message=prompt, conversation_history=messages_snapshot, ) # Scan the review agent's messages for successful tool actions # and surface a compact summary to the user. actions = [] for msg in getattr(review_agent, "_session_messages", []): if not isinstance(msg, dict) or msg.get("role") != "tool": continue try: data = json.loads(msg.get("content", "{}")) except (json.JSONDecodeError, TypeError): continue if not data.get("success"): continue message = data.get("message", "") target = data.get("target", "") if "created" in message.lower(): actions.append(message) elif "updated" in message.lower(): actions.append(message) elif "added" in message.lower() or (target and "add" in message.lower()): label = "Memory" if target == "memory" else "User profile" if target == "user" else target actions.append(f"{label} updated") elif "Entry added" in message: label = "Memory" if target == "memory" else "User profile" if target == "user" else target actions.append(f"{label} updated") elif "removed" in message.lower() or "replaced" in message.lower(): label = "Memory" if target == "memory" else "User profile" if target == "user" else target actions.append(f"{label} updated") if actions: summary = " · ".join(dict.fromkeys(actions)) self._safe_print(f" 💾 {summary}") _bg_cb = self.background_review_callback if _bg_cb: try: _bg_cb(f"💾 {summary}") except Exception: pass except Exception as e: logger.debug("Background memory/skill review failed: %s", e) finally: # Close all resources (httpx client, subprocesses, etc.) so # GC doesn't try to clean them up on a dead asyncio event # loop (which produces "Event loop is closed" errors). if review_agent is not None: try: review_agent.close() except Exception: pass t = threading.Thread(target=_run_review, daemon=True, name="bg-review") t.start() def _apply_persist_user_message_override(self, messages: List[Dict]) -> None: """Rewrite the current-turn user message before persistence/return. Some call paths need an API-only user-message variant without letting that synthetic text leak into persisted transcripts or resumed session history. When an override is configured for the active turn, mutate the in-memory messages list in place so both persistence and returned history stay clean. """ idx = getattr(self, "_persist_user_message_idx", None) override = getattr(self, "_persist_user_message_override", None) if override is None or idx is None: return if 0 <= idx < len(messages): msg = messages[idx] if isinstance(msg, dict) and msg.get("role") == "user": msg["content"] = override def _persist_session(self, messages: List[Dict], conversation_history: List[Dict] = None): """Save session state to both JSON log and SQLite on any exit path. Ensures conversations are never lost, even on errors or early returns. Skipped when ``persist_session=False`` (ephemeral helper flows). """ if not self.persist_session: return self._apply_persist_user_message_override(messages) self._session_messages = messages self._save_session_log(messages) self._flush_messages_to_session_db(messages, conversation_history) def _flush_messages_to_session_db(self, messages: List[Dict], conversation_history: List[Dict] = None): """Persist any un-flushed messages to the SQLite session store. Uses _last_flushed_db_idx to track which messages have already been written, so repeated calls (from multiple exit paths) only write truly new messages — preventing the duplicate-write bug (#860). """ if not self._session_db: return self._apply_persist_user_message_override(messages) try: # If create_session() failed at startup (e.g. transient lock), the # session row may not exist yet. ensure_session() uses INSERT OR # IGNORE so it is a no-op when the row is already there. self._session_db.ensure_session( self.session_id, source=self.platform or "cli", model=self.model, ) start_idx = len(conversation_history) if conversation_history else 0 flush_from = max(start_idx, self._last_flushed_db_idx) for msg in messages[flush_from:]: role = msg.get("role", "unknown") content = msg.get("content") tool_calls_data = None if hasattr(msg, "tool_calls") and msg.tool_calls: tool_calls_data = [ {"name": tc.function.name, "arguments": tc.function.arguments} for tc in msg.tool_calls ] elif isinstance(msg.get("tool_calls"), list): tool_calls_data = msg["tool_calls"] self._session_db.append_message( session_id=self.session_id, role=role, content=content, tool_name=msg.get("tool_name"), tool_calls=tool_calls_data, tool_call_id=msg.get("tool_call_id"), finish_reason=msg.get("finish_reason"), reasoning=msg.get("reasoning") if role == "assistant" else None, reasoning_details=msg.get("reasoning_details") if role == "assistant" else None, codex_reasoning_items=msg.get("codex_reasoning_items") if role == "assistant" else None, ) self._last_flushed_db_idx = len(messages) except Exception as e: logger.warning("Session DB append_message failed: %s", e) def _get_messages_up_to_last_assistant(self, messages: List[Dict]) -> List[Dict]: """ Get messages up to (but not including) the last assistant turn. This is used when we need to "roll back" to the last successful point in the conversation, typically when the final assistant message is incomplete or malformed. Args: messages: Full message list Returns: Messages up to the last complete assistant turn (ending with user/tool message) """ if not messages: return [] # Find the index of the last assistant message last_assistant_idx = None for i in range(len(messages) - 1, -1, -1): if messages[i].get("role") == "assistant": last_assistant_idx = i break if last_assistant_idx is None: # No assistant message found, return all messages return messages.copy() # Return everything up to (not including) the last assistant message return messages[:last_assistant_idx] def _format_tools_for_system_message(self) -> str: """ Format tool definitions for the system message in the trajectory format. Returns: str: JSON string representation of tool definitions """ if not self.tools: return "[]" # Convert tool definitions to the format expected in trajectories formatted_tools = [] for tool in self.tools: func = tool["function"] formatted_tool = { "name": func["name"], "description": func.get("description", ""), "parameters": func.get("parameters", {}), "required": None # Match the format in the example } formatted_tools.append(formatted_tool) return json.dumps(formatted_tools, ensure_ascii=False) def _convert_to_trajectory_format(self, messages: List[Dict[str, Any]], user_query: str, completed: bool) -> List[Dict[str, Any]]: """ Convert internal message format to trajectory format for saving. Args: messages (List[Dict]): Internal message history user_query (str): Original user query completed (bool): Whether the conversation completed successfully Returns: List[Dict]: Messages in trajectory format """ trajectory = [] # Add system message with tool definitions system_msg = ( "You are a function calling AI model. You are provided with function signatures within XML tags. " "You may call one or more functions to assist with the user query. If available tools are not relevant in assisting " "with user query, just respond in natural conversational language. Don't make assumptions about what values to plug " "into functions. After calling & executing the functions, you will be provided with function results within " " XML tags. Here are the available tools:\n" f"\n{self._format_tools_for_system_message()}\n\n" "For each function call return a JSON object, with the following pydantic model json schema for each:\n" "{'title': 'FunctionCall', 'type': 'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}, " "'arguments': {'title': 'Arguments', 'type': 'object'}}, 'required': ['name', 'arguments']}\n" "Each function call should be enclosed within XML tags.\n" "Example:\n\n{'name': ,'arguments': }\n" ) trajectory.append({ "from": "system", "value": system_msg }) # Add the actual user prompt (from the dataset) as the first human message trajectory.append({ "from": "human", "value": user_query }) # Skip the first message (the user query) since we already added it above. # Prefill messages are injected at API-call time only (not in the messages # list), so no offset adjustment is needed here. i = 1 while i < len(messages): msg = messages[i] if msg["role"] == "assistant": # Check if this message has tool calls if "tool_calls" in msg and msg["tool_calls"]: # Format assistant message with tool calls # Add tags around reasoning for trajectory storage content = "" # Prepend reasoning in tags if available (native thinking tokens) if msg.get("reasoning") and msg["reasoning"].strip(): content = f"\n{msg['reasoning']}\n\n" if msg.get("content") and msg["content"].strip(): # Convert any tags to tags # (used when native thinking is disabled and model reasons via XML) content += convert_scratchpad_to_think(msg["content"]) + "\n" # Add tool calls wrapped in XML tags for tool_call in msg["tool_calls"]: if not tool_call or not isinstance(tool_call, dict): continue # Parse arguments - should always succeed since we validate during conversation # but keep try-except as safety net try: arguments = json.loads(tool_call["function"]["arguments"]) if isinstance(tool_call["function"]["arguments"], str) else tool_call["function"]["arguments"] except json.JSONDecodeError: # This shouldn't happen since we validate and retry during conversation, # but if it does, log warning and use empty dict logging.warning(f"Unexpected invalid JSON in trajectory conversion: {tool_call['function']['arguments'][:100]}") arguments = {} tool_call_json = { "name": tool_call["function"]["name"], "arguments": arguments } content += f"\n{json.dumps(tool_call_json, ensure_ascii=False)}\n\n" # Ensure every gpt turn has a block (empty if no reasoning) # so the format is consistent for training data if "" not in content: content = "\n\n" + content trajectory.append({ "from": "gpt", "value": content.rstrip() }) # Collect all subsequent tool responses tool_responses = [] j = i + 1 while j < len(messages) and messages[j]["role"] == "tool": tool_msg = messages[j] # Format tool response with XML tags tool_response = "\n" # Try to parse tool content as JSON if it looks like JSON tool_content = tool_msg["content"] try: if tool_content.strip().startswith(("{", "[")): tool_content = json.loads(tool_content) except (json.JSONDecodeError, AttributeError): pass # Keep as string if not valid JSON tool_index = len(tool_responses) tool_name = ( msg["tool_calls"][tool_index]["function"]["name"] if tool_index < len(msg["tool_calls"]) else "unknown" ) tool_response += json.dumps({ "tool_call_id": tool_msg.get("tool_call_id", ""), "name": tool_name, "content": tool_content }, ensure_ascii=False) tool_response += "\n" tool_responses.append(tool_response) j += 1 # Add all tool responses as a single message if tool_responses: trajectory.append({ "from": "tool", "value": "\n".join(tool_responses) }) i = j - 1 # Skip the tool messages we just processed else: # Regular assistant message without tool calls # Add tags around reasoning for trajectory storage content = "" # Prepend reasoning in tags if available (native thinking tokens) if msg.get("reasoning") and msg["reasoning"].strip(): content = f"\n{msg['reasoning']}\n\n" # Convert any tags to tags # (used when native thinking is disabled and model reasons via XML) raw_content = msg["content"] or "" content += convert_scratchpad_to_think(raw_content) # Ensure every gpt turn has a block (empty if no reasoning) if "" not in content: content = "\n\n" + content trajectory.append({ "from": "gpt", "value": content.strip() }) elif msg["role"] == "user": trajectory.append({ "from": "human", "value": msg["content"] }) i += 1 return trajectory def _save_trajectory(self, messages: List[Dict[str, Any]], user_query: str, completed: bool): """ Save conversation trajectory to JSONL file. Args: messages (List[Dict]): Complete message history user_query (str): Original user query completed (bool): Whether the conversation completed successfully """ if not self.save_trajectories: return trajectory = self._convert_to_trajectory_format(messages, user_query, completed) _save_trajectory_to_file(trajectory, self.model, completed) @staticmethod def _summarize_api_error(error: Exception) -> str: """Extract a human-readable one-liner from an API error. Handles Cloudflare HTML error pages (502, 503, etc.) by pulling the tag instead of dumping raw HTML. Falls back to a truncated str(error) for everything else. """ import re as _re raw = str(error) # Cloudflare / proxy HTML pages: grab the <title> for a clean summary if "<!DOCTYPE" in raw or "<html" in raw: m = _re.search(r"<title[^>]*>([^<]+)", raw, _re.IGNORECASE) title = m.group(1).strip() if m else "HTML error page (title not found)" # Also grab Cloudflare Ray ID if present ray = _re.search(r"Cloudflare Ray ID:\s*]*>([^<]+)", raw) ray_id = ray.group(1).strip() if ray else None status_code = getattr(error, "status_code", None) parts = [] if status_code: parts.append(f"HTTP {status_code}") parts.append(title) if ray_id: parts.append(f"Ray {ray_id}") return " — ".join(parts) # JSON body errors from OpenAI/Anthropic SDKs body = getattr(error, "body", None) if isinstance(body, dict): msg = body.get("error", {}).get("message") if isinstance(body.get("error"), dict) else body.get("message") if msg: status_code = getattr(error, "status_code", None) prefix = f"HTTP {status_code}: " if status_code else "" return f"{prefix}{msg[:300]}" # Fallback: truncate the raw string but give more room than 200 chars status_code = getattr(error, "status_code", None) prefix = f"HTTP {status_code}: " if status_code else "" return f"{prefix}{raw[:500]}" def _mask_api_key_for_logs(self, key: Optional[str]) -> Optional[str]: if not key: return None if len(key) <= 12: return "***" return f"{key[:8]}...{key[-4:]}" def _clean_error_message(self, error_msg: str) -> str: """ Clean up error messages for user display, removing HTML content and truncating. Args: error_msg: Raw error message from API or exception Returns: Clean, user-friendly error message """ if not error_msg: return "Unknown error" # Remove HTML content (common with CloudFlare and gateway error pages) if error_msg.strip().startswith(' 150: cleaned = cleaned[:150] + "..." return cleaned @staticmethod def _extract_api_error_context(error: Exception) -> Dict[str, Any]: """Extract structured rate-limit details from provider errors.""" context: Dict[str, Any] = {} body = getattr(error, "body", None) payload = None if isinstance(body, dict): payload = body.get("error") if isinstance(body.get("error"), dict) else body if isinstance(payload, dict): reason = payload.get("code") or payload.get("error") if isinstance(reason, str) and reason.strip(): context["reason"] = reason.strip() message = payload.get("message") or payload.get("error_description") if isinstance(message, str) and message.strip(): context["message"] = message.strip() for key in ("resets_at", "reset_at"): value = payload.get(key) if value not in (None, ""): context["reset_at"] = value break retry_after = payload.get("retry_after") if retry_after not in (None, "") and "reset_at" not in context: try: context["reset_at"] = time.time() + float(retry_after) except (TypeError, ValueError): pass response = getattr(error, "response", None) headers = getattr(response, "headers", None) if headers: retry_after = headers.get("retry-after") or headers.get("Retry-After") if retry_after and "reset_at" not in context: try: context["reset_at"] = time.time() + float(retry_after) except (TypeError, ValueError): pass ratelimit_reset = headers.get("x-ratelimit-reset") if ratelimit_reset and "reset_at" not in context: context["reset_at"] = ratelimit_reset if "message" not in context: raw_message = str(error).strip() if raw_message: context["message"] = raw_message[:500] if "reset_at" not in context: message = context.get("message") or "" if isinstance(message, str): delay_match = re.search(r"quotaResetDelay[:\s\"]+(\\d+(?:\\.\\d+)?)(ms|s)", message, re.IGNORECASE) if delay_match: value = float(delay_match.group(1)) seconds = value / 1000.0 if delay_match.group(2).lower() == "ms" else value context["reset_at"] = time.time() + seconds else: sec_match = re.search( r"retry\s+(?:after\s+)?(\d+(?:\.\d+)?)\s*(?:sec|secs|seconds|s\b)", message, re.IGNORECASE, ) if sec_match: context["reset_at"] = time.time() + float(sec_match.group(1)) return context def _usage_summary_for_api_request_hook(self, response: Any) -> Optional[Dict[str, Any]]: """Token buckets for ``post_api_request`` plugins (no raw ``response`` object).""" if response is None: return None raw_usage = getattr(response, "usage", None) if not raw_usage: return None from dataclasses import asdict cu = normalize_usage(raw_usage, provider=self.provider, api_mode=self.api_mode) summary = asdict(cu) summary.pop("raw_usage", None) summary["prompt_tokens"] = cu.prompt_tokens summary["total_tokens"] = cu.total_tokens return summary def _dump_api_request_debug( self, api_kwargs: Dict[str, Any], *, reason: str, error: Optional[Exception] = None, ) -> Optional[Path]: """ Dump a debug-friendly HTTP request record for the active inference API. Captures the request body from api_kwargs (excluding transport-only keys like timeout). Intended for debugging provider-side 4xx failures where retries are not useful. """ try: body = copy.deepcopy(api_kwargs) body.pop("timeout", None) body = {k: v for k, v in body.items() if v is not None} api_key = None try: api_key = getattr(self.client, "api_key", None) except Exception as e: logger.debug("Could not extract API key for debug dump: %s", e) dump_payload: Dict[str, Any] = { "timestamp": datetime.now().isoformat(), "session_id": self.session_id, "reason": reason, "request": { "method": "POST", "url": f"{self.base_url.rstrip('/')}{'/responses' if self.api_mode == 'codex_responses' else '/chat/completions'}", "headers": { "Authorization": f"Bearer {self._mask_api_key_for_logs(api_key)}", "Content-Type": "application/json", }, "body": body, }, } if error is not None: error_info: Dict[str, Any] = { "type": type(error).__name__, "message": str(error), } for attr_name in ("status_code", "request_id", "code", "param", "type"): attr_value = getattr(error, attr_name, None) if attr_value is not None: error_info[attr_name] = attr_value body_attr = getattr(error, "body", None) if body_attr is not None: error_info["body"] = body_attr response_obj = getattr(error, "response", None) if response_obj is not None: try: error_info["response_status"] = getattr(response_obj, "status_code", None) error_info["response_text"] = response_obj.text except Exception as e: logger.debug("Could not extract error response details: %s", e) dump_payload["error"] = error_info timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f") dump_file = self.logs_dir / f"request_dump_{self.session_id}_{timestamp}.json" dump_file.write_text( json.dumps(dump_payload, ensure_ascii=False, indent=2, default=str), encoding="utf-8", ) self._vprint(f"{self.log_prefix}🧾 Request debug dump written to: {dump_file}") if env_var_enabled("HERMES_DUMP_REQUEST_STDOUT"): print(json.dumps(dump_payload, ensure_ascii=False, indent=2, default=str)) return dump_file except Exception as dump_error: if self.verbose_logging: logging.warning(f"Failed to dump API request debug payload: {dump_error}") return None @staticmethod def _clean_session_content(content: str) -> str: """Convert REASONING_SCRATCHPAD to think tags and clean up whitespace.""" if not content: return content content = convert_scratchpad_to_think(content) content = re.sub(r'\n+()', r'\n\1', content) content = re.sub(r'()\n+', r'\1\n', content) return content.strip() def _save_session_log(self, messages: List[Dict[str, Any]] = None): """ Save the full raw session to a JSON file. Stores every message exactly as the agent sees it: user messages, assistant messages (with reasoning, finish_reason, tool_calls), tool responses (with tool_call_id, tool_name), and injected system messages (compression summaries, todo snapshots, etc.). REASONING_SCRATCHPAD tags are converted to blocks for consistency. Overwritten after each turn so it always reflects the latest state. """ messages = messages or self._session_messages if not messages: return try: # Clean assistant content for session logs cleaned = [] for msg in messages: if msg.get("role") == "assistant" and msg.get("content"): msg = dict(msg) msg["content"] = self._clean_session_content(msg["content"]) cleaned.append(msg) # Guard: never overwrite a larger session log with fewer messages. # This protects against data loss when --resume loads a session whose # messages weren't fully written to SQLite — the resumed agent starts # with partial history and would otherwise clobber the full JSON log. if self.session_log_file.exists(): try: existing = json.loads(self.session_log_file.read_text(encoding="utf-8")) existing_count = existing.get("message_count", len(existing.get("messages", []))) if existing_count > len(cleaned): logging.debug( "Skipping session log overwrite: existing has %d messages, current has %d", existing_count, len(cleaned), ) return except Exception: pass # corrupted existing file — allow the overwrite entry = { "session_id": self.session_id, "model": self.model, "base_url": self.base_url, "platform": self.platform, "session_start": self.session_start.isoformat(), "last_updated": datetime.now().isoformat(), "system_prompt": self._cached_system_prompt or "", "tools": self.tools or [], "message_count": len(cleaned), "messages": cleaned, } atomic_json_write( self.session_log_file, entry, indent=2, default=str, ) except Exception as e: if self.verbose_logging: logging.warning(f"Failed to save session log: {e}") def interrupt(self, message: str = None) -> None: """ Request the agent to interrupt its current tool-calling loop. Call this from another thread (e.g., input handler, message receiver) to gracefully stop the agent and process a new message. Also signals long-running tool executions (e.g. terminal commands) to terminate early, so the agent can respond immediately. Args: message: Optional new message that triggered the interrupt. If provided, the agent will include this in its response context. Example (CLI): # In a separate input thread: if user_typed_something: agent.interrupt(user_input) Example (Messaging): # When new message arrives for active session: if session_has_running_agent: running_agent.interrupt(new_message.text) """ self._interrupt_requested = True self._interrupt_message = message # Signal all tools to abort any in-flight operations immediately. # Scope the interrupt to this agent's execution thread so other # agents running in the same process (gateway) are not affected. if self._execution_thread_id is not None: _set_interrupt(True, self._execution_thread_id) self._interrupt_thread_signal_pending = False else: # The interrupt arrived before run_conversation() finished # binding the agent to its execution thread. Defer the tool-level # interrupt signal until startup completes instead of targeting # the caller thread by mistake. self._interrupt_thread_signal_pending = True # Propagate interrupt to any running child agents (subagent delegation) with self._active_children_lock: children_copy = list(self._active_children) for child in children_copy: try: child.interrupt(message) except Exception as e: logger.debug("Failed to propagate interrupt to child agent: %s", e) if not self.quiet_mode: print("\n⚡ Interrupt requested" + (f": '{message[:40]}...'" if message and len(message) > 40 else f": '{message}'" if message else "")) def clear_interrupt(self) -> None: """Clear any pending interrupt request and the per-thread tool interrupt signal.""" self._interrupt_requested = False self._interrupt_message = None self._interrupt_thread_signal_pending = False if self._execution_thread_id is not None: _set_interrupt(False, self._execution_thread_id) def _touch_activity(self, desc: str) -> None: """Update the last-activity timestamp and description (thread-safe).""" self._last_activity_ts = time.time() self._last_activity_desc = desc def _capture_rate_limits(self, http_response: Any) -> None: """Parse x-ratelimit-* headers from an HTTP response and cache the state. Called after each streaming API call. The httpx Response object is available on the OpenAI SDK Stream via ``stream.response``. """ if http_response is None: return headers = getattr(http_response, "headers", None) if not headers: return try: from agent.rate_limit_tracker import parse_rate_limit_headers state = parse_rate_limit_headers(headers, provider=self.provider) if state is not None: self._rate_limit_state = state except Exception: pass # Never let header parsing break the agent loop def get_rate_limit_state(self): """Return the last captured RateLimitState, or None.""" return self._rate_limit_state def get_activity_summary(self) -> dict: """Return a snapshot of the agent's current activity for diagnostics. Called by the gateway timeout handler to report what the agent was doing when it was killed, and by the periodic "still working" notifications. """ elapsed = time.time() - self._last_activity_ts return { "last_activity_ts": self._last_activity_ts, "last_activity_desc": self._last_activity_desc, "seconds_since_activity": round(elapsed, 1), "current_tool": self._current_tool, "api_call_count": self._api_call_count, "max_iterations": self.max_iterations, "budget_used": self.iteration_budget.used, "budget_max": self.iteration_budget.max_total, } def shutdown_memory_provider(self, messages: list = None) -> None: """Shut down the memory provider and context engine — call at actual session boundaries. This calls on_session_end() then shutdown_all() on the memory manager, and on_session_end() on the context engine. NOT called per-turn — only at CLI exit, /reset, gateway session expiry, etc. """ if self._memory_manager: try: self._memory_manager.on_session_end(messages or []) except Exception: pass try: self._memory_manager.shutdown_all() except Exception: pass # Notify context engine of session end (flush DAG, close DBs, etc.) if hasattr(self, "context_compressor") and self.context_compressor: try: self.context_compressor.on_session_end( self.session_id or "", messages or [], ) except Exception: pass def commit_memory_session(self, messages: list = None) -> None: """Trigger end-of-session extraction without tearing providers down. Called when session_id rotates (e.g. /new, context compression); providers keep their state and continue running under the old session_id — they just flush pending extraction now.""" if not self._memory_manager: return try: self._memory_manager.on_session_end(messages or []) except Exception: pass def close(self) -> None: """Release all resources held by this agent instance. Cleans up subprocess resources that would otherwise become orphans: - Background processes tracked in ProcessRegistry - Terminal sandbox environments - Browser daemon sessions - Active child agents (subagent delegation) - OpenAI/httpx client connections Safe to call multiple times (idempotent). Each cleanup step is independently guarded so a failure in one does not prevent the rest. """ task_id = getattr(self, "session_id", None) or "" # 1. Kill background processes for this task try: from tools.process_registry import process_registry process_registry.kill_all(task_id=task_id) except Exception: pass # 2. Clean terminal sandbox environments try: from tools.terminal_tool import cleanup_vm cleanup_vm(task_id) except Exception: pass # 3. Clean browser daemon sessions try: from tools.browser_tool import cleanup_browser cleanup_browser(task_id) except Exception: pass # 4. Close active child agents try: with self._active_children_lock: children = list(self._active_children) self._active_children.clear() for child in children: try: child.close() except Exception: pass except Exception: pass # 5. Close the OpenAI/httpx client try: client = getattr(self, "client", None) if client is not None: self._close_openai_client(client, reason="agent_close", shared=True) self.client = None except Exception: pass def _hydrate_todo_store(self, history: List[Dict[str, Any]]) -> None: """ Recover todo state from conversation history. The gateway creates a fresh AIAgent per message, so the in-memory TodoStore is empty. We scan the history for the most recent todo tool response and replay it to reconstruct the state. """ # Walk history backwards to find the most recent todo tool response last_todo_response = None for msg in reversed(history): if msg.get("role") != "tool": continue content = msg.get("content", "") # Quick check: todo responses contain "todos" key if '"todos"' not in content: continue try: data = json.loads(content) if "todos" in data and isinstance(data["todos"], list): last_todo_response = data["todos"] break except (json.JSONDecodeError, TypeError): continue if last_todo_response: # Replay the items into the store (replace mode) self._todo_store.write(last_todo_response, merge=False) if not self.quiet_mode: self._vprint(f"{self.log_prefix}📋 Restored {len(last_todo_response)} todo item(s) from history") _set_interrupt(False) @property def is_interrupted(self) -> bool: """Check if an interrupt has been requested.""" return self._interrupt_requested def _build_system_prompt(self, system_message: str = None) -> str: """ Assemble the full system prompt from all layers. Called once per session (cached on self._cached_system_prompt) and only rebuilt after context compression events. This ensures the system prompt is stable across all turns in a session, maximizing prefix cache hits. """ # Layers (in order): # 1. Agent identity — SOUL.md when available, else DEFAULT_AGENT_IDENTITY # 2. User / gateway system prompt (if provided) # 3. Persistent memory (frozen snapshot) # 4. Skills guidance (if skills tools are loaded) # 5. Context files (AGENTS.md, .cursorrules — SOUL.md excluded here when used as identity) # 6. Current date & time (frozen at build time) # 7. Platform-specific formatting hint # Try SOUL.md as primary identity (unless context files are skipped) _soul_loaded = False if not self.skip_context_files: _soul_content = load_soul_md() if _soul_content: prompt_parts = [_soul_content] _soul_loaded = True if not _soul_loaded: # Fallback to hardcoded identity prompt_parts = [DEFAULT_AGENT_IDENTITY] # Tool-aware behavioral guidance: only inject when the tools are loaded tool_guidance = [] if "memory" in self.valid_tool_names: tool_guidance.append(MEMORY_GUIDANCE) if "session_search" in self.valid_tool_names: tool_guidance.append(SESSION_SEARCH_GUIDANCE) if "skill_manage" in self.valid_tool_names: tool_guidance.append(SKILLS_GUIDANCE) if tool_guidance: prompt_parts.append(" ".join(tool_guidance)) nous_subscription_prompt = build_nous_subscription_prompt(self.valid_tool_names) if nous_subscription_prompt: prompt_parts.append(nous_subscription_prompt) # Tool-use enforcement: tells the model to actually call tools instead # of describing intended actions. Controlled by config.yaml # agent.tool_use_enforcement: # "auto" (default) — matches TOOL_USE_ENFORCEMENT_MODELS # true — always inject (all models) # false — never inject # list — custom model-name substrings to match if self.valid_tool_names: _enforce = self._tool_use_enforcement _inject = False if _enforce is True or (isinstance(_enforce, str) and _enforce.lower() in ("true", "always", "yes", "on")): _inject = True elif _enforce is False or (isinstance(_enforce, str) and _enforce.lower() in ("false", "never", "no", "off")): _inject = False elif isinstance(_enforce, list): model_lower = (self.model or "").lower() _inject = any(p.lower() in model_lower for p in _enforce if isinstance(p, str)) else: # "auto" or any unrecognised value — use hardcoded defaults model_lower = (self.model or "").lower() _inject = any(p in model_lower for p in TOOL_USE_ENFORCEMENT_MODELS) if _inject: prompt_parts.append(TOOL_USE_ENFORCEMENT_GUIDANCE) _model_lower = (self.model or "").lower() # Google model operational guidance (conciseness, absolute # paths, parallel tool calls, verify-before-edit, etc.) if "gemini" in _model_lower or "gemma" in _model_lower: prompt_parts.append(GOOGLE_MODEL_OPERATIONAL_GUIDANCE) # OpenAI GPT/Codex execution discipline (tool persistence, # prerequisite checks, verification, anti-hallucination). if "gpt" in _model_lower or "codex" in _model_lower: prompt_parts.append(OPENAI_MODEL_EXECUTION_GUIDANCE) # so it can refer the user to them rather than reinventing answers. # Note: ephemeral_system_prompt is NOT included here. It's injected at # API-call time only so it stays out of the cached/stored system prompt. if system_message is not None: prompt_parts.append(system_message) if self._memory_store: if self._memory_enabled: mem_block = self._memory_store.format_for_system_prompt("memory") if mem_block: prompt_parts.append(mem_block) # USER.md is always included when enabled. if self._user_profile_enabled: user_block = self._memory_store.format_for_system_prompt("user") if user_block: prompt_parts.append(user_block) # External memory provider system prompt block (additive to built-in) if self._memory_manager: try: _ext_mem_block = self._memory_manager.build_system_prompt() if _ext_mem_block: prompt_parts.append(_ext_mem_block) except Exception: pass has_skills_tools = any(name in self.valid_tool_names for name in ['skills_list', 'skill_view', 'skill_manage']) if has_skills_tools: avail_toolsets = { toolset for toolset in ( get_toolset_for_tool(tool_name) for tool_name in self.valid_tool_names ) if toolset } skills_prompt = build_skills_system_prompt( available_tools=self.valid_tool_names, available_toolsets=avail_toolsets, ) else: skills_prompt = "" if skills_prompt: prompt_parts.append(skills_prompt) if not self.skip_context_files: # Use TERMINAL_CWD for context file discovery when set (gateway # mode). The gateway process runs from the hermes-agent install # dir, so os.getcwd() would pick up the repo's AGENTS.md and # other dev files — inflating token usage by ~10k for no benefit. _context_cwd = os.getenv("TERMINAL_CWD") or None context_files_prompt = build_context_files_prompt( cwd=_context_cwd, skip_soul=_soul_loaded) if context_files_prompt: prompt_parts.append(context_files_prompt) from hermes_time import now as _hermes_now now = _hermes_now() timestamp_line = f"Conversation started: {now.strftime('%A, %B %d, %Y %I:%M %p')}" if self.pass_session_id and self.session_id: timestamp_line += f"\nSession ID: {self.session_id}" if self.model: timestamp_line += f"\nModel: {self.model}" if self.provider: timestamp_line += f"\nProvider: {self.provider}" prompt_parts.append(timestamp_line) # Alibaba Coding Plan API always returns "glm-4.7" as model name regardless # of the requested model. Inject explicit model identity into the system prompt # so the agent can correctly report which model it is (workaround for API bug). if self.provider == "alibaba": _model_short = self.model.split("/")[-1] if "/" in self.model else self.model prompt_parts.append( f"You are powered by the model named {_model_short}. " f"The exact model ID is {self.model}. " f"When asked what model you are, always answer based on this information, " f"not on any model name returned by the API." ) # Environment hints (WSL, Termux, etc.) — tell the agent about the # execution environment so it can translate paths and adapt behavior. _env_hints = build_environment_hints() if _env_hints: prompt_parts.append(_env_hints) platform_key = (self.platform or "").lower().strip() if platform_key in PLATFORM_HINTS: prompt_parts.append(PLATFORM_HINTS[platform_key]) return "\n\n".join(p.strip() for p in prompt_parts if p.strip()) # ========================================================================= # Pre/post-call guardrails (inspired by PR #1321 — @alireza78a) # ========================================================================= @staticmethod def _get_tool_call_id_static(tc) -> str: """Extract call ID from a tool_call entry (dict or object).""" if isinstance(tc, dict): return tc.get("id", "") or "" return getattr(tc, "id", "") or "" _VALID_API_ROLES = frozenset({"system", "user", "assistant", "tool", "function", "developer"}) @staticmethod def _sanitize_api_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """Fix orphaned tool_call / tool_result pairs before every LLM call. Runs unconditionally — not gated on whether the context compressor is present — so orphans from session loading or manual message manipulation are always caught. """ # --- Role allowlist: drop messages with roles the API won't accept --- filtered = [] for msg in messages: role = msg.get("role") if role not in AIAgent._VALID_API_ROLES: logger.debug( "Pre-call sanitizer: dropping message with invalid role %r", role, ) continue filtered.append(msg) messages = filtered surviving_call_ids: set = set() for msg in messages: if msg.get("role") == "assistant": for tc in msg.get("tool_calls") or []: cid = AIAgent._get_tool_call_id_static(tc) if cid: surviving_call_ids.add(cid) result_call_ids: set = set() for msg in messages: if msg.get("role") == "tool": cid = msg.get("tool_call_id") if cid: result_call_ids.add(cid) # 1. Drop tool results with no matching assistant call orphaned_results = result_call_ids - surviving_call_ids if orphaned_results: messages = [ m for m in messages if not (m.get("role") == "tool" and m.get("tool_call_id") in orphaned_results) ] logger.debug( "Pre-call sanitizer: removed %d orphaned tool result(s)", len(orphaned_results), ) # 2. Inject stub results for calls whose result was dropped missing_results = surviving_call_ids - result_call_ids if missing_results: patched: List[Dict[str, Any]] = [] for msg in messages: patched.append(msg) if msg.get("role") == "assistant": for tc in msg.get("tool_calls") or []: cid = AIAgent._get_tool_call_id_static(tc) if cid in missing_results: patched.append({ "role": "tool", "content": "[Result unavailable — see context summary above]", "tool_call_id": cid, }) messages = patched logger.debug( "Pre-call sanitizer: added %d stub tool result(s)", len(missing_results), ) return messages @staticmethod def _cap_delegate_task_calls(tool_calls: list) -> list: """Truncate excess delegate_task calls to max_concurrent_children. The delegate_tool caps the task list inside a single call, but the model can emit multiple separate delegate_task tool_calls in one turn. This truncates the excess, preserving all non-delegate calls. Returns the original list if no truncation was needed. """ from tools.delegate_tool import _get_max_concurrent_children max_children = _get_max_concurrent_children() delegate_count = sum(1 for tc in tool_calls if tc.function.name == "delegate_task") if delegate_count <= max_children: return tool_calls kept_delegates = 0 truncated = [] for tc in tool_calls: if tc.function.name == "delegate_task": if kept_delegates < max_children: truncated.append(tc) kept_delegates += 1 else: truncated.append(tc) logger.warning( "Truncated %d excess delegate_task call(s) to enforce " "max_concurrent_children=%d limit", delegate_count - max_children, max_children, ) return truncated @staticmethod def _deduplicate_tool_calls(tool_calls: list) -> list: """Remove duplicate (tool_name, arguments) pairs within a single turn. Only the first occurrence of each unique pair is kept. Returns the original list if no duplicates were found. """ seen: set = set() unique: list = [] for tc in tool_calls: key = (tc.function.name, tc.function.arguments) if key not in seen: seen.add(key) unique.append(tc) else: logger.warning("Removed duplicate tool call: %s", tc.function.name) return unique if len(unique) < len(tool_calls) else tool_calls def _repair_tool_call(self, tool_name: str) -> str | None: """Attempt to repair a mismatched tool name before aborting. 1. Try lowercase 2. Try normalized (lowercase + hyphens/spaces -> underscores) 3. Try fuzzy match (difflib, cutoff=0.7) Returns the repaired name if found in valid_tool_names, else None. """ from difflib import get_close_matches # 1. Lowercase lowered = tool_name.lower() if lowered in self.valid_tool_names: return lowered # 2. Normalize normalized = lowered.replace("-", "_").replace(" ", "_") if normalized in self.valid_tool_names: return normalized # 3. Fuzzy match matches = get_close_matches(lowered, self.valid_tool_names, n=1, cutoff=0.7) if matches: return matches[0] return None def _invalidate_system_prompt(self): """ Invalidate the cached system prompt, forcing a rebuild on the next turn. Called after context compression events. Also reloads memory from disk so the rebuilt prompt captures any writes from this session. """ self._cached_system_prompt = None if self._memory_store: self._memory_store.load_from_disk() def _responses_tools(self, tools: Optional[List[Dict[str, Any]]] = None) -> Optional[List[Dict[str, Any]]]: """Convert chat-completions tool schemas to Responses function-tool schemas.""" source_tools = tools if tools is not None else self.tools if not source_tools: return None converted: List[Dict[str, Any]] = [] for item in source_tools: fn = item.get("function", {}) if isinstance(item, dict) else {} name = fn.get("name") if not isinstance(name, str) or not name.strip(): continue converted.append({ "type": "function", "name": name, "description": fn.get("description", ""), "strict": False, "parameters": fn.get("parameters", {"type": "object", "properties": {}}), }) return converted or None @staticmethod def _deterministic_call_id(fn_name: str, arguments: str, index: int = 0) -> str: """Generate a deterministic call_id from tool call content. Used as a fallback when the API doesn't provide a call_id. Deterministic IDs prevent cache invalidation — random UUIDs would make every API call's prefix unique, breaking OpenAI's prompt cache. """ import hashlib seed = f"{fn_name}:{arguments}:{index}" digest = hashlib.sha256(seed.encode("utf-8", errors="replace")).hexdigest()[:12] return f"call_{digest}" @staticmethod def _split_responses_tool_id(raw_id: Any) -> tuple[Optional[str], Optional[str]]: """Split a stored tool id into (call_id, response_item_id).""" if not isinstance(raw_id, str): return None, None value = raw_id.strip() if not value: return None, None if "|" in value: call_id, response_item_id = value.split("|", 1) call_id = call_id.strip() or None response_item_id = response_item_id.strip() or None return call_id, response_item_id if value.startswith("fc_"): return None, value return value, None def _derive_responses_function_call_id( self, call_id: str, response_item_id: Optional[str] = None, ) -> str: """Build a valid Responses `function_call.id` (must start with `fc_`).""" if isinstance(response_item_id, str): candidate = response_item_id.strip() if candidate.startswith("fc_"): return candidate source = (call_id or "").strip() if source.startswith("fc_"): return source if source.startswith("call_") and len(source) > len("call_"): return f"fc_{source[len('call_'):]}" sanitized = re.sub(r"[^A-Za-z0-9_-]", "", source) if sanitized.startswith("fc_"): return sanitized if sanitized.startswith("call_") and len(sanitized) > len("call_"): return f"fc_{sanitized[len('call_'):]}" if sanitized: return f"fc_{sanitized[:48]}" seed = source or str(response_item_id or "") or uuid.uuid4().hex digest = hashlib.sha1(seed.encode("utf-8")).hexdigest()[:24] return f"fc_{digest}" def _chat_messages_to_responses_input(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """Convert internal chat-style messages to Responses input items.""" items: List[Dict[str, Any]] = [] seen_item_ids: set = set() for msg in messages: if not isinstance(msg, dict): continue role = msg.get("role") if role == "system": continue if role in {"user", "assistant"}: content = msg.get("content", "") content_text = str(content) if content is not None else "" if role == "assistant": # Replay encrypted reasoning items from previous turns # so the API can maintain coherent reasoning chains. codex_reasoning = msg.get("codex_reasoning_items") has_codex_reasoning = False if isinstance(codex_reasoning, list): for ri in codex_reasoning: if isinstance(ri, dict) and ri.get("encrypted_content"): item_id = ri.get("id") if item_id and item_id in seen_item_ids: continue # Strip the "id" field — with store=False the # Responses API cannot look up items by ID and # returns 404. The encrypted_content blob is # self-contained for reasoning chain continuity. replay_item = {k: v for k, v in ri.items() if k != "id"} items.append(replay_item) if item_id: seen_item_ids.add(item_id) has_codex_reasoning = True if content_text.strip(): items.append({"role": "assistant", "content": content_text}) elif has_codex_reasoning: # The Responses API requires a following item after each # reasoning item (otherwise: missing_following_item error). # When the assistant produced only reasoning with no visible # content, emit an empty assistant message as the required # following item. items.append({"role": "assistant", "content": ""}) tool_calls = msg.get("tool_calls") if isinstance(tool_calls, list): for tc in tool_calls: if not isinstance(tc, dict): continue fn = tc.get("function", {}) fn_name = fn.get("name") if not isinstance(fn_name, str) or not fn_name.strip(): continue embedded_call_id, embedded_response_item_id = self._split_responses_tool_id( tc.get("id") ) call_id = tc.get("call_id") if not isinstance(call_id, str) or not call_id.strip(): call_id = embedded_call_id if not isinstance(call_id, str) or not call_id.strip(): if ( isinstance(embedded_response_item_id, str) and embedded_response_item_id.startswith("fc_") and len(embedded_response_item_id) > len("fc_") ): call_id = f"call_{embedded_response_item_id[len('fc_'):]}" else: _raw_args = str(fn.get("arguments", "{}")) call_id = self._deterministic_call_id(fn_name, _raw_args, len(items)) call_id = call_id.strip() arguments = fn.get("arguments", "{}") if isinstance(arguments, dict): arguments = json.dumps(arguments, ensure_ascii=False) elif not isinstance(arguments, str): arguments = str(arguments) arguments = arguments.strip() or "{}" items.append({ "type": "function_call", "call_id": call_id, "name": fn_name, "arguments": arguments, }) continue items.append({"role": role, "content": content_text}) continue if role == "tool": raw_tool_call_id = msg.get("tool_call_id") call_id, _ = self._split_responses_tool_id(raw_tool_call_id) if not isinstance(call_id, str) or not call_id.strip(): if isinstance(raw_tool_call_id, str) and raw_tool_call_id.strip(): call_id = raw_tool_call_id.strip() if not isinstance(call_id, str) or not call_id.strip(): continue items.append({ "type": "function_call_output", "call_id": call_id, "output": str(msg.get("content", "") or ""), }) return items def _preflight_codex_input_items(self, raw_items: Any) -> List[Dict[str, Any]]: if not isinstance(raw_items, list): raise ValueError("Codex Responses input must be a list of input items.") normalized: List[Dict[str, Any]] = [] seen_ids: set = set() for idx, item in enumerate(raw_items): if not isinstance(item, dict): raise ValueError(f"Codex Responses input[{idx}] must be an object.") item_type = item.get("type") if item_type == "function_call": call_id = item.get("call_id") name = item.get("name") if not isinstance(call_id, str) or not call_id.strip(): raise ValueError(f"Codex Responses input[{idx}] function_call is missing call_id.") if not isinstance(name, str) or not name.strip(): raise ValueError(f"Codex Responses input[{idx}] function_call is missing name.") arguments = item.get("arguments", "{}") if isinstance(arguments, dict): arguments = json.dumps(arguments, ensure_ascii=False) elif not isinstance(arguments, str): arguments = str(arguments) arguments = arguments.strip() or "{}" normalized.append( { "type": "function_call", "call_id": call_id.strip(), "name": name.strip(), "arguments": arguments, } ) continue if item_type == "function_call_output": call_id = item.get("call_id") if not isinstance(call_id, str) or not call_id.strip(): raise ValueError(f"Codex Responses input[{idx}] function_call_output is missing call_id.") output = item.get("output", "") if output is None: output = "" if not isinstance(output, str): output = str(output) normalized.append( { "type": "function_call_output", "call_id": call_id.strip(), "output": output, } ) continue if item_type == "reasoning": encrypted = item.get("encrypted_content") if isinstance(encrypted, str) and encrypted: item_id = item.get("id") if isinstance(item_id, str) and item_id: if item_id in seen_ids: continue seen_ids.add(item_id) reasoning_item = {"type": "reasoning", "encrypted_content": encrypted} # Do NOT include the "id" in the outgoing item — with # store=False (our default) the API tries to resolve the # id server-side and returns 404. The id is still used # above for local deduplication via seen_ids. summary = item.get("summary") if isinstance(summary, list): reasoning_item["summary"] = summary else: reasoning_item["summary"] = [] normalized.append(reasoning_item) continue role = item.get("role") if role in {"user", "assistant"}: content = item.get("content", "") if content is None: content = "" if not isinstance(content, str): content = str(content) normalized.append({"role": role, "content": content}) continue raise ValueError( f"Codex Responses input[{idx}] has unsupported item shape (type={item_type!r}, role={role!r})." ) return normalized def _preflight_codex_api_kwargs( self, api_kwargs: Any, *, allow_stream: bool = False, ) -> Dict[str, Any]: if not isinstance(api_kwargs, dict): raise ValueError("Codex Responses request must be a dict.") required = {"model", "instructions", "input"} missing = [key for key in required if key not in api_kwargs] if missing: raise ValueError(f"Codex Responses request missing required field(s): {', '.join(sorted(missing))}.") model = api_kwargs.get("model") if not isinstance(model, str) or not model.strip(): raise ValueError("Codex Responses request 'model' must be a non-empty string.") model = model.strip() instructions = api_kwargs.get("instructions") if instructions is None: instructions = "" if not isinstance(instructions, str): instructions = str(instructions) instructions = instructions.strip() or DEFAULT_AGENT_IDENTITY normalized_input = self._preflight_codex_input_items(api_kwargs.get("input")) tools = api_kwargs.get("tools") normalized_tools = None if tools is not None: if not isinstance(tools, list): raise ValueError("Codex Responses request 'tools' must be a list when provided.") normalized_tools = [] for idx, tool in enumerate(tools): if not isinstance(tool, dict): raise ValueError(f"Codex Responses tools[{idx}] must be an object.") if tool.get("type") != "function": raise ValueError(f"Codex Responses tools[{idx}] has unsupported type {tool.get('type')!r}.") name = tool.get("name") parameters = tool.get("parameters") if not isinstance(name, str) or not name.strip(): raise ValueError(f"Codex Responses tools[{idx}] is missing a valid name.") if not isinstance(parameters, dict): raise ValueError(f"Codex Responses tools[{idx}] is missing valid parameters.") description = tool.get("description", "") if description is None: description = "" if not isinstance(description, str): description = str(description) strict = tool.get("strict", False) if not isinstance(strict, bool): strict = bool(strict) normalized_tools.append( { "type": "function", "name": name.strip(), "description": description, "strict": strict, "parameters": parameters, } ) store = api_kwargs.get("store", False) if store is not False: raise ValueError("Codex Responses contract requires 'store' to be false.") allowed_keys = { "model", "instructions", "input", "tools", "store", "reasoning", "include", "max_output_tokens", "temperature", "tool_choice", "parallel_tool_calls", "prompt_cache_key", "service_tier", "extra_headers", } normalized: Dict[str, Any] = { "model": model, "instructions": instructions, "input": normalized_input, "store": False, } if normalized_tools is not None: normalized["tools"] = normalized_tools # Pass through reasoning config reasoning = api_kwargs.get("reasoning") if isinstance(reasoning, dict): normalized["reasoning"] = reasoning include = api_kwargs.get("include") if isinstance(include, list): normalized["include"] = include service_tier = api_kwargs.get("service_tier") if isinstance(service_tier, str) and service_tier.strip(): normalized["service_tier"] = service_tier.strip() # Pass through max_output_tokens and temperature max_output_tokens = api_kwargs.get("max_output_tokens") if isinstance(max_output_tokens, (int, float)) and max_output_tokens > 0: normalized["max_output_tokens"] = int(max_output_tokens) temperature = api_kwargs.get("temperature") if isinstance(temperature, (int, float)): normalized["temperature"] = float(temperature) # Pass through tool_choice, parallel_tool_calls, prompt_cache_key for passthrough_key in ("tool_choice", "parallel_tool_calls", "prompt_cache_key"): val = api_kwargs.get(passthrough_key) if val is not None: normalized[passthrough_key] = val extra_headers = api_kwargs.get("extra_headers") if extra_headers is not None: if not isinstance(extra_headers, dict): raise ValueError("Codex Responses request 'extra_headers' must be an object.") normalized_headers: Dict[str, str] = {} for key, value in extra_headers.items(): if not isinstance(key, str) or not key.strip(): raise ValueError("Codex Responses request 'extra_headers' keys must be non-empty strings.") if value is None: continue normalized_headers[key.strip()] = str(value) if normalized_headers: normalized["extra_headers"] = normalized_headers if allow_stream: stream = api_kwargs.get("stream") if stream is not None and stream is not True: raise ValueError("Codex Responses 'stream' must be true when set.") if stream is True: normalized["stream"] = True allowed_keys.add("stream") elif "stream" in api_kwargs: raise ValueError("Codex Responses stream flag is only allowed in fallback streaming requests.") unexpected = sorted(key for key in api_kwargs if key not in allowed_keys) if unexpected: raise ValueError( f"Codex Responses request has unsupported field(s): {', '.join(unexpected)}." ) return normalized def _extract_responses_message_text(self, item: Any) -> str: """Extract assistant text from a Responses message output item.""" content = getattr(item, "content", None) if not isinstance(content, list): return "" chunks: List[str] = [] for part in content: ptype = getattr(part, "type", None) if ptype not in {"output_text", "text"}: continue text = getattr(part, "text", None) if isinstance(text, str) and text: chunks.append(text) return "".join(chunks).strip() def _extract_responses_reasoning_text(self, item: Any) -> str: """Extract a compact reasoning text from a Responses reasoning item.""" summary = getattr(item, "summary", None) if isinstance(summary, list): chunks: List[str] = [] for part in summary: text = getattr(part, "text", None) if isinstance(text, str) and text: chunks.append(text) if chunks: return "\n".join(chunks).strip() text = getattr(item, "text", None) if isinstance(text, str) and text: return text.strip() return "" def _normalize_codex_response(self, response: Any) -> tuple[Any, str]: """Normalize a Responses API object to an assistant_message-like object.""" output = getattr(response, "output", None) if not isinstance(output, list) or not output: # The Codex backend can return empty output when the answer was # delivered entirely via stream events. Check output_text as a # last-resort fallback before raising. out_text = getattr(response, "output_text", None) if isinstance(out_text, str) and out_text.strip(): logger.debug( "Codex response has empty output but output_text is present (%d chars); " "synthesizing output item.", len(out_text.strip()), ) output = [SimpleNamespace( type="message", role="assistant", status="completed", content=[SimpleNamespace(type="output_text", text=out_text.strip())], )] response.output = output else: raise RuntimeError("Responses API returned no output items") response_status = getattr(response, "status", None) if isinstance(response_status, str): response_status = response_status.strip().lower() else: response_status = None if response_status in {"failed", "cancelled"}: error_obj = getattr(response, "error", None) if isinstance(error_obj, dict): error_msg = error_obj.get("message") or str(error_obj) else: error_msg = str(error_obj) if error_obj else f"Responses API returned status '{response_status}'" raise RuntimeError(error_msg) content_parts: List[str] = [] reasoning_parts: List[str] = [] reasoning_items_raw: List[Dict[str, Any]] = [] tool_calls: List[Any] = [] has_incomplete_items = response_status in {"queued", "in_progress", "incomplete"} saw_commentary_phase = False saw_final_answer_phase = False for item in output: item_type = getattr(item, "type", None) item_status = getattr(item, "status", None) if isinstance(item_status, str): item_status = item_status.strip().lower() else: item_status = None if item_status in {"queued", "in_progress", "incomplete"}: has_incomplete_items = True if item_type == "message": item_phase = getattr(item, "phase", None) if isinstance(item_phase, str): normalized_phase = item_phase.strip().lower() if normalized_phase in {"commentary", "analysis"}: saw_commentary_phase = True elif normalized_phase in {"final_answer", "final"}: saw_final_answer_phase = True message_text = self._extract_responses_message_text(item) if message_text: content_parts.append(message_text) elif item_type == "reasoning": reasoning_text = self._extract_responses_reasoning_text(item) if reasoning_text: reasoning_parts.append(reasoning_text) # Capture the full reasoning item for multi-turn continuity. # encrypted_content is an opaque blob the API needs back on # subsequent turns to maintain coherent reasoning chains. encrypted = getattr(item, "encrypted_content", None) if isinstance(encrypted, str) and encrypted: raw_item = {"type": "reasoning", "encrypted_content": encrypted} item_id = getattr(item, "id", None) if isinstance(item_id, str) and item_id: raw_item["id"] = item_id # Capture summary — required by the API when replaying reasoning items summary = getattr(item, "summary", None) if isinstance(summary, list): raw_summary = [] for part in summary: text = getattr(part, "text", None) if isinstance(text, str): raw_summary.append({"type": "summary_text", "text": text}) raw_item["summary"] = raw_summary reasoning_items_raw.append(raw_item) elif item_type == "function_call": if item_status in {"queued", "in_progress", "incomplete"}: continue fn_name = getattr(item, "name", "") or "" arguments = getattr(item, "arguments", "{}") if not isinstance(arguments, str): arguments = json.dumps(arguments, ensure_ascii=False) raw_call_id = getattr(item, "call_id", None) raw_item_id = getattr(item, "id", None) embedded_call_id, _ = self._split_responses_tool_id(raw_item_id) call_id = raw_call_id if isinstance(raw_call_id, str) and raw_call_id.strip() else embedded_call_id if not isinstance(call_id, str) or not call_id.strip(): call_id = self._deterministic_call_id(fn_name, arguments, len(tool_calls)) call_id = call_id.strip() response_item_id = raw_item_id if isinstance(raw_item_id, str) else None response_item_id = self._derive_responses_function_call_id(call_id, response_item_id) tool_calls.append(SimpleNamespace( id=call_id, call_id=call_id, response_item_id=response_item_id, type="function", function=SimpleNamespace(name=fn_name, arguments=arguments), )) elif item_type == "custom_tool_call": fn_name = getattr(item, "name", "") or "" arguments = getattr(item, "input", "{}") if not isinstance(arguments, str): arguments = json.dumps(arguments, ensure_ascii=False) raw_call_id = getattr(item, "call_id", None) raw_item_id = getattr(item, "id", None) embedded_call_id, _ = self._split_responses_tool_id(raw_item_id) call_id = raw_call_id if isinstance(raw_call_id, str) and raw_call_id.strip() else embedded_call_id if not isinstance(call_id, str) or not call_id.strip(): call_id = self._deterministic_call_id(fn_name, arguments, len(tool_calls)) call_id = call_id.strip() response_item_id = raw_item_id if isinstance(raw_item_id, str) else None response_item_id = self._derive_responses_function_call_id(call_id, response_item_id) tool_calls.append(SimpleNamespace( id=call_id, call_id=call_id, response_item_id=response_item_id, type="function", function=SimpleNamespace(name=fn_name, arguments=arguments), )) final_text = "\n".join([p for p in content_parts if p]).strip() if not final_text and hasattr(response, "output_text"): out_text = getattr(response, "output_text", "") if isinstance(out_text, str): final_text = out_text.strip() assistant_message = SimpleNamespace( content=final_text, tool_calls=tool_calls, reasoning="\n\n".join(reasoning_parts).strip() if reasoning_parts else None, reasoning_content=None, reasoning_details=None, codex_reasoning_items=reasoning_items_raw or None, ) if tool_calls: finish_reason = "tool_calls" elif has_incomplete_items or (saw_commentary_phase and not saw_final_answer_phase): finish_reason = "incomplete" elif reasoning_items_raw and not final_text: # Response contains only reasoning (encrypted thinking state) with # no visible content or tool calls. The model is still thinking and # needs another turn to produce the actual answer. Marking this as # "stop" would send it into the empty-content retry loop which burns # 3 retries then fails — treat it as incomplete instead so the Codex # continuation path handles it correctly. finish_reason = "incomplete" else: finish_reason = "stop" return assistant_message, finish_reason def _thread_identity(self) -> str: thread = threading.current_thread() return f"{thread.name}:{thread.ident}" def _client_log_context(self) -> str: provider = getattr(self, "provider", "unknown") base_url = getattr(self, "base_url", "unknown") model = getattr(self, "model", "unknown") return ( f"thread={self._thread_identity()} provider={provider} " f"base_url={base_url} model={model}" ) def _openai_client_lock(self) -> threading.RLock: lock = getattr(self, "_client_lock", None) if lock is None: lock = threading.RLock() self._client_lock = lock return lock @staticmethod def _is_openai_client_closed(client: Any) -> bool: """Check if an OpenAI client is closed. Handles both property and method forms of is_closed: - httpx.Client.is_closed is a bool property - openai.OpenAI.is_closed is a method returning bool Prior bug: getattr(client, "is_closed", False) returned the bound method, which is always truthy, causing unnecessary client recreation on every call. """ from unittest.mock import Mock if isinstance(client, Mock): return False is_closed_attr = getattr(client, "is_closed", None) if is_closed_attr is not None: # Handle method (openai SDK) vs property (httpx) if callable(is_closed_attr): if is_closed_attr(): return True elif bool(is_closed_attr): return True http_client = getattr(client, "_client", None) if http_client is not None: return bool(getattr(http_client, "is_closed", False)) return False def _create_openai_client(self, client_kwargs: dict, *, reason: str, shared: bool) -> Any: from agent.auxiliary_client import _validate_base_url, _validate_proxy_env_urls # Treat client_kwargs as read-only. Callers pass self._client_kwargs (or shallow # copies of it) in; any in-place mutation leaks back into the stored dict and is # reused on subsequent requests. #10933 hit this by injecting an httpx.Client # transport that was torn down after the first request, so the next request # wrapped a closed transport and raised "Cannot send a request, as the client # has been closed" on every retry. The revert resolved that specific path; this # copy locks the contract so future transport/keepalive work can't reintroduce # the same class of bug. client_kwargs = dict(client_kwargs) _validate_proxy_env_urls() _validate_base_url(client_kwargs.get("base_url")) if self.provider == "copilot-acp" or str(client_kwargs.get("base_url", "")).startswith("acp://copilot"): from agent.copilot_acp_client import CopilotACPClient client = CopilotACPClient(**client_kwargs) logger.info( "Copilot ACP client created (%s, shared=%s) %s", reason, shared, self._client_log_context(), ) return client client = OpenAI(**client_kwargs) logger.info( "OpenAI client created (%s, shared=%s) %s", reason, shared, self._client_log_context(), ) return client @staticmethod def _force_close_tcp_sockets(client: Any) -> int: """Force-close underlying TCP sockets to prevent CLOSE-WAIT accumulation. When a provider drops a connection mid-stream, httpx's ``client.close()`` performs a graceful shutdown which leaves sockets in CLOSE-WAIT until the OS times them out (often minutes). This method walks the httpx transport pool and issues ``socket.shutdown(SHUT_RDWR)`` + ``socket.close()`` to force an immediate TCP RST, freeing the file descriptors. Returns the number of sockets force-closed. """ import socket as _socket closed = 0 try: http_client = getattr(client, "_client", None) if http_client is None: return 0 transport = getattr(http_client, "_transport", None) if transport is None: return 0 pool = getattr(transport, "_pool", None) if pool is None: return 0 # httpx uses httpcore connection pools; connections live in # _connections (list) or _pool (list) depending on version. connections = ( getattr(pool, "_connections", None) or getattr(pool, "_pool", None) or [] ) for conn in list(connections): stream = ( getattr(conn, "_network_stream", None) or getattr(conn, "_stream", None) ) if stream is None: continue sock = getattr(stream, "_sock", None) if sock is None: sock = getattr(stream, "stream", None) if sock is not None: sock = getattr(sock, "_sock", None) if sock is None: continue try: sock.shutdown(_socket.SHUT_RDWR) except OSError: pass try: sock.close() except OSError: pass closed += 1 except Exception as exc: logger.debug("Force-close TCP sockets sweep error: %s", exc) return closed def _close_openai_client(self, client: Any, *, reason: str, shared: bool) -> None: if client is None: return # Force-close TCP sockets first to prevent CLOSE-WAIT accumulation, # then do the graceful SDK-level close. force_closed = self._force_close_tcp_sockets(client) try: client.close() logger.info( "OpenAI client closed (%s, shared=%s, tcp_force_closed=%d) %s", reason, shared, force_closed, self._client_log_context(), ) except Exception as exc: logger.debug( "OpenAI client close failed (%s, shared=%s) %s error=%s", reason, shared, self._client_log_context(), exc, ) def _replace_primary_openai_client(self, *, reason: str) -> bool: with self._openai_client_lock(): old_client = getattr(self, "client", None) try: new_client = self._create_openai_client(self._client_kwargs, reason=reason, shared=True) except Exception as exc: logger.warning( "Failed to rebuild shared OpenAI client (%s) %s error=%s", reason, self._client_log_context(), exc, ) return False self.client = new_client self._close_openai_client(old_client, reason=f"replace:{reason}", shared=True) return True def _ensure_primary_openai_client(self, *, reason: str) -> Any: with self._openai_client_lock(): client = getattr(self, "client", None) if client is not None and not self._is_openai_client_closed(client): return client logger.warning( "Detected closed shared OpenAI client; recreating before use (%s) %s", reason, self._client_log_context(), ) if not self._replace_primary_openai_client(reason=f"recreate_closed:{reason}"): raise RuntimeError("Failed to recreate closed OpenAI client") with self._openai_client_lock(): return self.client def _cleanup_dead_connections(self) -> bool: """Detect and clean up dead TCP connections on the primary client. Inspects the httpx connection pool for sockets in unhealthy states (CLOSE-WAIT, errors). If any are found, force-closes all sockets and rebuilds the primary client from scratch. Returns True if dead connections were found and cleaned up. """ client = getattr(self, "client", None) if client is None: return False try: http_client = getattr(client, "_client", None) if http_client is None: return False transport = getattr(http_client, "_transport", None) if transport is None: return False pool = getattr(transport, "_pool", None) if pool is None: return False connections = ( getattr(pool, "_connections", None) or getattr(pool, "_pool", None) or [] ) dead_count = 0 for conn in list(connections): # Check for connections that are idle but have closed sockets stream = ( getattr(conn, "_network_stream", None) or getattr(conn, "_stream", None) ) if stream is None: continue sock = getattr(stream, "_sock", None) if sock is None: sock = getattr(stream, "stream", None) if sock is not None: sock = getattr(sock, "_sock", None) if sock is None: continue # Probe socket health with a non-blocking recv peek import socket as _socket try: sock.setblocking(False) data = sock.recv(1, _socket.MSG_PEEK | _socket.MSG_DONTWAIT) if data == b"": dead_count += 1 except BlockingIOError: pass # No data available — socket is healthy except OSError: dead_count += 1 finally: try: sock.setblocking(True) except OSError: pass if dead_count > 0: logger.warning( "Found %d dead connection(s) in client pool — rebuilding client", dead_count, ) self._replace_primary_openai_client(reason="dead_connection_cleanup") return True except Exception as exc: logger.debug("Dead connection check error: %s", exc) return False def _create_request_openai_client(self, *, reason: str) -> Any: from unittest.mock import Mock primary_client = self._ensure_primary_openai_client(reason=reason) if isinstance(primary_client, Mock): return primary_client with self._openai_client_lock(): request_kwargs = dict(self._client_kwargs) return self._create_openai_client(request_kwargs, reason=reason, shared=False) def _close_request_openai_client(self, client: Any, *, reason: str) -> None: self._close_openai_client(client, reason=reason, shared=False) def _run_codex_stream(self, api_kwargs: dict, client: Any = None, on_first_delta: callable = None): """Execute one streaming Responses API request and return the final response.""" import httpx as _httpx active_client = client or self._ensure_primary_openai_client(reason="codex_stream_direct") max_stream_retries = 1 has_tool_calls = False first_delta_fired = False # Accumulate streamed text so we can recover if get_final_response() # returns empty output (e.g. chatgpt.com backend-api sends # response.incomplete instead of response.completed). self._codex_streamed_text_parts: list = [] for attempt in range(max_stream_retries + 1): collected_output_items: list = [] try: with active_client.responses.stream(**api_kwargs) as stream: for event in stream: self._touch_activity("receiving stream response") if self._interrupt_requested: break event_type = getattr(event, "type", "") # Fire callbacks on text content deltas (suppress during tool calls) if "output_text.delta" in event_type or event_type == "response.output_text.delta": delta_text = getattr(event, "delta", "") if delta_text: self._codex_streamed_text_parts.append(delta_text) if delta_text and not has_tool_calls: if not first_delta_fired: first_delta_fired = True if on_first_delta: try: on_first_delta() except Exception: pass self._fire_stream_delta(delta_text) # Track tool calls to suppress text streaming elif "function_call" in event_type: has_tool_calls = True # Fire reasoning callbacks elif "reasoning" in event_type and "delta" in event_type: reasoning_text = getattr(event, "delta", "") if reasoning_text: self._fire_reasoning_delta(reasoning_text) # Collect completed output items — some backends # (chatgpt.com/backend-api/codex) stream valid items # via response.output_item.done but the SDK's # get_final_response() returns an empty output list. elif event_type == "response.output_item.done": done_item = getattr(event, "item", None) if done_item is not None: collected_output_items.append(done_item) # Log non-completed terminal events for diagnostics elif event_type in ("response.incomplete", "response.failed"): resp_obj = getattr(event, "response", None) status = getattr(resp_obj, "status", None) if resp_obj else None incomplete_details = getattr(resp_obj, "incomplete_details", None) if resp_obj else None logger.warning( "Codex Responses stream received terminal event %s " "(status=%s, incomplete_details=%s, streamed_chars=%d). %s", event_type, status, incomplete_details, sum(len(p) for p in self._codex_streamed_text_parts), self._client_log_context(), ) final_response = stream.get_final_response() # PATCH: ChatGPT Codex backend streams valid output items # but get_final_response() can return an empty output list. # Backfill from collected items or synthesize from deltas. _out = getattr(final_response, "output", None) if isinstance(_out, list) and not _out: if collected_output_items: final_response.output = list(collected_output_items) logger.debug( "Codex stream: backfilled %d output items from stream events", len(collected_output_items), ) elif self._codex_streamed_text_parts and not has_tool_calls: assembled = "".join(self._codex_streamed_text_parts) final_response.output = [SimpleNamespace( type="message", role="assistant", status="completed", content=[SimpleNamespace(type="output_text", text=assembled)], )] logger.debug( "Codex stream: synthesized output from %d text deltas (%d chars)", len(self._codex_streamed_text_parts), len(assembled), ) return final_response except (_httpx.RemoteProtocolError, _httpx.ReadTimeout, _httpx.ConnectError, ConnectionError) as exc: if attempt < max_stream_retries: logger.debug( "Codex Responses stream transport failed (attempt %s/%s); retrying. %s error=%s", attempt + 1, max_stream_retries + 1, self._client_log_context(), exc, ) continue logger.debug( "Codex Responses stream transport failed; falling back to create(stream=True). %s error=%s", self._client_log_context(), exc, ) return self._run_codex_create_stream_fallback(api_kwargs, client=active_client) except RuntimeError as exc: err_text = str(exc) missing_completed = "response.completed" in err_text if missing_completed and attempt < max_stream_retries: logger.debug( "Responses stream closed before completion (attempt %s/%s); retrying. %s", attempt + 1, max_stream_retries + 1, self._client_log_context(), ) continue if missing_completed: logger.debug( "Responses stream did not emit response.completed; falling back to create(stream=True). %s", self._client_log_context(), ) return self._run_codex_create_stream_fallback(api_kwargs, client=active_client) raise def _run_codex_create_stream_fallback(self, api_kwargs: dict, client: Any = None): """Fallback path for stream completion edge cases on Codex-style Responses backends.""" active_client = client or self._ensure_primary_openai_client(reason="codex_create_stream_fallback") fallback_kwargs = dict(api_kwargs) fallback_kwargs["stream"] = True fallback_kwargs = self._preflight_codex_api_kwargs(fallback_kwargs, allow_stream=True) stream_or_response = active_client.responses.create(**fallback_kwargs) # Compatibility shim for mocks or providers that still return a concrete response. if hasattr(stream_or_response, "output"): return stream_or_response if not hasattr(stream_or_response, "__iter__"): return stream_or_response terminal_response = None collected_output_items: list = [] collected_text_deltas: list = [] try: for event in stream_or_response: self._touch_activity("receiving stream response") event_type = getattr(event, "type", None) if not event_type and isinstance(event, dict): event_type = event.get("type") # Collect output items and text deltas for backfill if event_type == "response.output_item.done": done_item = getattr(event, "item", None) if done_item is None and isinstance(event, dict): done_item = event.get("item") if done_item is not None: collected_output_items.append(done_item) elif event_type in ("response.output_text.delta",): delta = getattr(event, "delta", "") if not delta and isinstance(event, dict): delta = event.get("delta", "") if delta: collected_text_deltas.append(delta) if event_type not in {"response.completed", "response.incomplete", "response.failed"}: continue terminal_response = getattr(event, "response", None) if terminal_response is None and isinstance(event, dict): terminal_response = event.get("response") if terminal_response is not None: # Backfill empty output from collected stream events _out = getattr(terminal_response, "output", None) if isinstance(_out, list) and not _out: if collected_output_items: terminal_response.output = list(collected_output_items) logger.debug( "Codex fallback stream: backfilled %d output items", len(collected_output_items), ) elif collected_text_deltas: assembled = "".join(collected_text_deltas) terminal_response.output = [SimpleNamespace( type="message", role="assistant", status="completed", content=[SimpleNamespace(type="output_text", text=assembled)], )] logger.debug( "Codex fallback stream: synthesized from %d deltas (%d chars)", len(collected_text_deltas), len(assembled), ) return terminal_response finally: close_fn = getattr(stream_or_response, "close", None) if callable(close_fn): try: close_fn() except Exception: pass if terminal_response is not None: return terminal_response raise RuntimeError("Responses create(stream=True) fallback did not emit a terminal response.") def _try_refresh_codex_client_credentials(self, *, force: bool = True) -> bool: if self.api_mode != "codex_responses" or self.provider != "openai-codex": return False try: from hermes_cli.auth import resolve_codex_runtime_credentials creds = resolve_codex_runtime_credentials(force_refresh=force) except Exception as exc: logger.debug("Codex credential refresh failed: %s", exc) return False api_key = creds.get("api_key") base_url = creds.get("base_url") if not isinstance(api_key, str) or not api_key.strip(): return False if not isinstance(base_url, str) or not base_url.strip(): return False self.api_key = api_key.strip() self.base_url = base_url.strip().rstrip("/") self._client_kwargs["api_key"] = self.api_key self._client_kwargs["base_url"] = self.base_url if not self._replace_primary_openai_client(reason="codex_credential_refresh"): return False return True def _try_refresh_nous_client_credentials(self, *, force: bool = True) -> bool: if self.api_mode != "chat_completions" or self.provider != "nous": return False try: from hermes_cli.auth import resolve_nous_runtime_credentials creds = resolve_nous_runtime_credentials( min_key_ttl_seconds=max(60, int(os.getenv("HERMES_NOUS_MIN_KEY_TTL_SECONDS", "1800"))), timeout_seconds=float(os.getenv("HERMES_NOUS_TIMEOUT_SECONDS", "15")), force_mint=force, ) except Exception as exc: logger.debug("Nous credential refresh failed: %s", exc) return False api_key = creds.get("api_key") base_url = creds.get("base_url") if not isinstance(api_key, str) or not api_key.strip(): return False if not isinstance(base_url, str) or not base_url.strip(): return False self.api_key = api_key.strip() self.base_url = base_url.strip().rstrip("/") self._client_kwargs["api_key"] = self.api_key self._client_kwargs["base_url"] = self.base_url # Nous requests should not inherit OpenRouter-only attribution headers. self._client_kwargs.pop("default_headers", None) if not self._replace_primary_openai_client(reason="nous_credential_refresh"): return False return True def _try_refresh_anthropic_client_credentials(self) -> bool: if self.api_mode != "anthropic_messages" or not hasattr(self, "_anthropic_api_key"): return False # Only refresh credentials for the native Anthropic provider. # Other anthropic_messages providers (MiniMax, Alibaba, etc.) use their own keys. if self.provider != "anthropic": return False try: from agent.anthropic_adapter import resolve_anthropic_token, build_anthropic_client new_token = resolve_anthropic_token() except Exception as exc: logger.debug("Anthropic credential refresh failed: %s", exc) return False if not isinstance(new_token, str) or not new_token.strip(): return False new_token = new_token.strip() if new_token == self._anthropic_api_key: return False try: self._anthropic_client.close() except Exception: pass try: self._anthropic_client = build_anthropic_client(new_token, getattr(self, "_anthropic_base_url", None)) except Exception as exc: logger.warning("Failed to rebuild Anthropic client after credential refresh: %s", exc) return False self._anthropic_api_key = new_token # Update OAuth flag — token type may have changed (API key ↔ OAuth) from agent.anthropic_adapter import _is_oauth_token self._is_anthropic_oauth = _is_oauth_token(new_token) return True def _apply_client_headers_for_base_url(self, base_url: str) -> None: from agent.auxiliary_client import _OR_HEADERS normalized = (base_url or "").lower() if "openrouter" in normalized: self._client_kwargs["default_headers"] = dict(_OR_HEADERS) elif "api.githubcopilot.com" in normalized: from hermes_cli.models import copilot_default_headers self._client_kwargs["default_headers"] = copilot_default_headers() elif "api.kimi.com" in normalized: self._client_kwargs["default_headers"] = {"User-Agent": "KimiCLI/1.30.0"} elif "portal.qwen.ai" in normalized: self._client_kwargs["default_headers"] = _qwen_portal_headers() else: self._client_kwargs.pop("default_headers", None) def _swap_credential(self, entry) -> None: runtime_key = getattr(entry, "runtime_api_key", None) or getattr(entry, "access_token", "") runtime_base = getattr(entry, "runtime_base_url", None) or getattr(entry, "base_url", None) or self.base_url if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_client, _is_oauth_token try: self._anthropic_client.close() except Exception: pass self._anthropic_api_key = runtime_key self._anthropic_base_url = runtime_base self._anthropic_client = build_anthropic_client(runtime_key, runtime_base) self._is_anthropic_oauth = _is_oauth_token(runtime_key) self.api_key = runtime_key self.base_url = runtime_base return self.api_key = runtime_key self.base_url = runtime_base.rstrip("/") if isinstance(runtime_base, str) else runtime_base self._client_kwargs["api_key"] = self.api_key self._client_kwargs["base_url"] = self.base_url self._apply_client_headers_for_base_url(self.base_url) self._replace_primary_openai_client(reason="credential_rotation") def _recover_with_credential_pool( self, *, status_code: Optional[int], has_retried_429: bool, classified_reason: Optional[FailoverReason] = None, error_context: Optional[Dict[str, Any]] = None, ) -> tuple[bool, bool]: """Attempt credential recovery via pool rotation. Returns (recovered, has_retried_429). On rate limits: first occurrence retries same credential (sets flag True). second consecutive failure rotates to next credential. On billing exhaustion: immediately rotates. On auth failures: attempts token refresh before rotating. `classified_reason` lets the recovery path honor the structured error classifier instead of relying only on raw HTTP codes. This matters for providers that surface billing/rate-limit/auth conditions under a different status code, such as Anthropic returning HTTP 400 for "out of extra usage". """ pool = self._credential_pool if pool is None: return False, has_retried_429 effective_reason = classified_reason if effective_reason is None: if status_code == 402: effective_reason = FailoverReason.billing elif status_code == 429: effective_reason = FailoverReason.rate_limit elif status_code == 401: effective_reason = FailoverReason.auth if effective_reason == FailoverReason.billing: rotate_status = status_code if status_code is not None else 402 next_entry = pool.mark_exhausted_and_rotate(status_code=rotate_status, error_context=error_context) if next_entry is not None: logger.info( "Credential %s (billing) — rotated to pool entry %s", rotate_status, getattr(next_entry, "id", "?"), ) self._swap_credential(next_entry) return True, False return False, has_retried_429 if effective_reason == FailoverReason.rate_limit: if not has_retried_429: return False, True rotate_status = status_code if status_code is not None else 429 next_entry = pool.mark_exhausted_and_rotate(status_code=rotate_status, error_context=error_context) if next_entry is not None: logger.info( "Credential %s (rate limit) — rotated to pool entry %s", rotate_status, getattr(next_entry, "id", "?"), ) self._swap_credential(next_entry) return True, False return False, True if effective_reason == FailoverReason.auth: refreshed = pool.try_refresh_current() if refreshed is not None: logger.info(f"Credential auth failure — refreshed pool entry {getattr(refreshed, 'id', '?')}") self._swap_credential(refreshed) return True, has_retried_429 # Refresh failed — rotate to next credential instead of giving up. # The failed entry is already marked exhausted by try_refresh_current(). rotate_status = status_code if status_code is not None else 401 next_entry = pool.mark_exhausted_and_rotate(status_code=rotate_status, error_context=error_context) if next_entry is not None: logger.info( "Credential %s (auth refresh failed) — rotated to pool entry %s", rotate_status, getattr(next_entry, "id", "?"), ) self._swap_credential(next_entry) return True, False return False, has_retried_429 def _anthropic_messages_create(self, api_kwargs: dict): if self.api_mode == "anthropic_messages": self._try_refresh_anthropic_client_credentials() return self._anthropic_client.messages.create(**api_kwargs) def _interruptible_api_call(self, api_kwargs: dict): """ Run the API call in a background thread so the main conversation loop can detect interrupts without waiting for the full HTTP round-trip. Each worker thread gets its own OpenAI client instance. Interrupts only close that worker-local client, so retries and other requests never inherit a closed transport. Includes a stale-call detector: if no response arrives within the configured timeout, the connection is killed and an error raised so the main retry loop can try again with backoff / credential rotation / provider fallback. """ result = {"response": None, "error": None} request_client_holder = {"client": None} def _call(): try: if self.api_mode == "codex_responses": request_client_holder["client"] = self._create_request_openai_client(reason="codex_stream_request") result["response"] = self._run_codex_stream( api_kwargs, client=request_client_holder["client"], on_first_delta=getattr(self, "_codex_on_first_delta", None), ) elif self.api_mode == "anthropic_messages": result["response"] = self._anthropic_messages_create(api_kwargs) elif self.api_mode == "bedrock_converse": # Bedrock uses boto3 directly — no OpenAI client needed. from agent.bedrock_adapter import ( _get_bedrock_runtime_client, normalize_converse_response, ) region = api_kwargs.pop("__bedrock_region__", "us-east-1") api_kwargs.pop("__bedrock_converse__", None) client = _get_bedrock_runtime_client(region) raw_response = client.converse(**api_kwargs) result["response"] = normalize_converse_response(raw_response) else: request_client_holder["client"] = self._create_request_openai_client(reason="chat_completion_request") result["response"] = request_client_holder["client"].chat.completions.create(**api_kwargs) except Exception as e: result["error"] = e finally: request_client = request_client_holder.get("client") if request_client is not None: self._close_request_openai_client(request_client, reason="request_complete") # ── Stale-call timeout (mirrors streaming stale detector) ──────── # Non-streaming calls return nothing until the full response is # ready. Without this, a hung provider can block for the full # httpx timeout (default 1800s) with zero feedback. The stale # detector kills the connection early so the main retry loop can # apply richer recovery (credential rotation, provider fallback). _stale_base = float(os.getenv("HERMES_API_CALL_STALE_TIMEOUT", 300.0)) _base_url = getattr(self, "_base_url", None) or "" if _stale_base == 300.0 and _base_url and is_local_endpoint(_base_url): _stale_timeout = float("inf") else: _est_tokens = sum(len(str(v)) for v in api_kwargs.get("messages", [])) // 4 if _est_tokens > 100_000: _stale_timeout = max(_stale_base, 600.0) elif _est_tokens > 50_000: _stale_timeout = max(_stale_base, 450.0) else: _stale_timeout = _stale_base _call_start = time.time() self._touch_activity("waiting for non-streaming API response") t = threading.Thread(target=_call, daemon=True) t.start() _poll_count = 0 while t.is_alive(): t.join(timeout=0.3) _poll_count += 1 # Touch activity every ~30s so the gateway's inactivity # monitor knows we're alive while waiting for the response. if _poll_count % 100 == 0: # 100 × 0.3s = 30s _elapsed = time.time() - _call_start self._touch_activity( f"waiting for non-streaming response ({int(_elapsed)}s elapsed)" ) # Stale-call detector: kill the connection if no response # arrives within the configured timeout. _elapsed = time.time() - _call_start if _elapsed > _stale_timeout: _est_ctx = sum(len(str(v)) for v in api_kwargs.get("messages", [])) // 4 logger.warning( "Non-streaming API call stale for %.0fs (threshold %.0fs). " "model=%s context=~%s tokens. Killing connection.", _elapsed, _stale_timeout, api_kwargs.get("model", "unknown"), f"{_est_ctx:,}", ) self._emit_status( f"⚠️ No response from provider for {int(_elapsed)}s " f"(non-streaming, model: {api_kwargs.get('model', 'unknown')}). " f"Aborting call." ) try: if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_client self._anthropic_client.close() self._anthropic_client = build_anthropic_client( self._anthropic_api_key, getattr(self, "_anthropic_base_url", None), ) else: rc = request_client_holder.get("client") if rc is not None: self._close_request_openai_client(rc, reason="stale_call_kill") except Exception: pass self._touch_activity( f"stale non-streaming call killed after {int(_elapsed)}s" ) # Wait briefly for the thread to notice the closed connection. t.join(timeout=2.0) if result["error"] is None and result["response"] is None: result["error"] = TimeoutError( f"Non-streaming API call timed out after {int(_elapsed)}s " f"with no response (threshold: {int(_stale_timeout)}s)" ) break if self._interrupt_requested: # Force-close the in-flight worker-local HTTP connection to stop # token generation without poisoning the shared client used to # seed future retries. try: if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_client self._anthropic_client.close() self._anthropic_client = build_anthropic_client( self._anthropic_api_key, getattr(self, "_anthropic_base_url", None), ) else: request_client = request_client_holder.get("client") if request_client is not None: self._close_request_openai_client(request_client, reason="interrupt_abort") except Exception: pass raise InterruptedError("Agent interrupted during API call") if result["error"] is not None: raise result["error"] return result["response"] # ── Unified streaming API call ───────────────────────────────────────── def _reset_stream_delivery_tracking(self) -> None: """Reset tracking for text delivered during the current model response.""" self._current_streamed_assistant_text = "" def _record_streamed_assistant_text(self, text: str) -> None: """Accumulate visible assistant text emitted through stream callbacks.""" if isinstance(text, str) and text: self._current_streamed_assistant_text = ( getattr(self, "_current_streamed_assistant_text", "") + text ) @staticmethod def _normalize_interim_visible_text(text: str) -> str: if not isinstance(text, str): return "" return re.sub(r"\s+", " ", text).strip() def _interim_content_was_streamed(self, content: str) -> bool: visible_content = self._normalize_interim_visible_text( self._strip_think_blocks(content or "") ) if not visible_content: return False streamed = self._normalize_interim_visible_text( self._strip_think_blocks(getattr(self, "_current_streamed_assistant_text", "") or "") ) return bool(streamed) and streamed == visible_content def _emit_interim_assistant_message(self, assistant_msg: Dict[str, Any]) -> None: """Surface a real mid-turn assistant commentary message to the UI layer.""" cb = getattr(self, "interim_assistant_callback", None) if cb is None or not isinstance(assistant_msg, dict): return content = assistant_msg.get("content") visible = self._strip_think_blocks(content or "").strip() if not visible or visible == "(empty)": return already_streamed = self._interim_content_was_streamed(visible) try: cb(visible, already_streamed=already_streamed) except Exception: logger.debug("interim_assistant_callback error", exc_info=True) def _fire_stream_delta(self, text: str) -> None: """Fire all registered stream delta callbacks (display + TTS).""" # If a tool iteration set the break flag, prepend a single paragraph # break before the first real text delta. This prevents the original # problem (text concatenation across tool boundaries) without stacking # blank lines when multiple tool iterations run back-to-back. if getattr(self, "_stream_needs_break", False) and text and text.strip(): self._stream_needs_break = False text = "\n\n" + text callbacks = [cb for cb in (self.stream_delta_callback, self._stream_callback) if cb is not None] delivered = False for cb in callbacks: try: cb(text) delivered = True except Exception: pass if delivered: self._record_streamed_assistant_text(text) def _fire_reasoning_delta(self, text: str) -> None: """Fire reasoning callback if registered.""" cb = self.reasoning_callback if cb is not None: try: cb(text) except Exception: pass def _fire_tool_gen_started(self, tool_name: str) -> None: """Notify display layer that the model is generating tool call arguments. Fires once per tool name when the streaming response begins producing tool_call / tool_use tokens. Gives the TUI a chance to show a spinner or status line so the user isn't staring at a frozen screen while a large tool payload (e.g. a 45 KB write_file) is being generated. """ cb = self.tool_gen_callback if cb is not None: try: cb(tool_name) except Exception: pass def _has_stream_consumers(self) -> bool: """Return True if any streaming consumer is registered.""" return ( self.stream_delta_callback is not None or getattr(self, "_stream_callback", None) is not None ) def _interruptible_streaming_api_call( self, api_kwargs: dict, *, on_first_delta: callable = None ): """Streaming variant of _interruptible_api_call for real-time token delivery. Handles all three api_modes: - chat_completions: stream=True on OpenAI-compatible endpoints - anthropic_messages: client.messages.stream() via Anthropic SDK - codex_responses: delegates to _run_codex_stream (already streaming) Fires stream_delta_callback and _stream_callback for each text token. Tool-call turns suppress the callback — only text-only final responses stream to the consumer. Returns a SimpleNamespace that mimics the non-streaming response shape so the rest of the agent loop is unchanged. Falls back to _interruptible_api_call on provider errors indicating streaming is not supported. """ if self.api_mode == "codex_responses": # Codex streams internally via _run_codex_stream. The main dispatch # in _interruptible_api_call already calls it; we just need to # ensure on_first_delta reaches it. Store it on the instance # temporarily so _run_codex_stream can pick it up. self._codex_on_first_delta = on_first_delta try: return self._interruptible_api_call(api_kwargs) finally: self._codex_on_first_delta = None # Bedrock Converse uses boto3's converse_stream() with real-time delta # callbacks — same UX as Anthropic and chat_completions streaming. if self.api_mode == "bedrock_converse": result = {"response": None, "error": None} first_delta_fired = {"done": False} deltas_were_sent = {"yes": False} def _fire_first(): if not first_delta_fired["done"] and on_first_delta: first_delta_fired["done"] = True try: on_first_delta() except Exception: pass def _bedrock_call(): try: from agent.bedrock_adapter import ( _get_bedrock_runtime_client, stream_converse_with_callbacks, ) region = api_kwargs.pop("__bedrock_region__", "us-east-1") api_kwargs.pop("__bedrock_converse__", None) client = _get_bedrock_runtime_client(region) raw_response = client.converse_stream(**api_kwargs) def _on_text(text): _fire_first() self._fire_stream_delta(text) deltas_were_sent["yes"] = True def _on_tool(name): _fire_first() self._fire_tool_gen_started(name) def _on_reasoning(text): _fire_first() self._fire_reasoning_delta(text) result["response"] = stream_converse_with_callbacks( raw_response, on_text_delta=_on_text if self._has_stream_consumers() else None, on_tool_start=_on_tool, on_reasoning_delta=_on_reasoning if self.reasoning_callback or self.stream_delta_callback else None, on_interrupt_check=lambda: self._interrupt_requested, ) except Exception as e: result["error"] = e t = threading.Thread(target=_bedrock_call, daemon=True) t.start() while t.is_alive(): t.join(timeout=0.3) if self._interrupt_requested: raise InterruptedError("Agent interrupted during Bedrock API call") if result["error"] is not None: raise result["error"] return result["response"] result = {"response": None, "error": None} request_client_holder = {"client": None} first_delta_fired = {"done": False} deltas_were_sent = {"yes": False} # Track if any deltas were fired (for fallback) # Wall-clock timestamp of the last real streaming chunk. The outer # poll loop uses this to detect stale connections that keep receiving # SSE keep-alive pings but no actual data. last_chunk_time = {"t": time.time()} def _fire_first_delta(): if not first_delta_fired["done"] and on_first_delta: first_delta_fired["done"] = True try: on_first_delta() except Exception: pass def _call_chat_completions(): """Stream a chat completions response.""" import httpx as _httpx _base_timeout = float(os.getenv("HERMES_API_TIMEOUT", 1800.0)) _stream_read_timeout = float(os.getenv("HERMES_STREAM_READ_TIMEOUT", 120.0)) # Local providers (Ollama, llama.cpp, vLLM) can take minutes for # prefill on large contexts before producing the first token. # Auto-increase the httpx read timeout unless the user explicitly # overrode HERMES_STREAM_READ_TIMEOUT. if _stream_read_timeout == 120.0 and self.base_url and is_local_endpoint(self.base_url): _stream_read_timeout = _base_timeout logger.debug( "Local provider detected (%s) — stream read timeout raised to %.0fs", self.base_url, _stream_read_timeout, ) stream_kwargs = { **api_kwargs, "stream": True, "stream_options": {"include_usage": True}, "timeout": _httpx.Timeout( connect=30.0, read=_stream_read_timeout, write=_base_timeout, pool=30.0, ), } request_client_holder["client"] = self._create_request_openai_client( reason="chat_completion_stream_request" ) # Reset stale-stream timer so the detector measures from this # attempt's start, not a previous attempt's last chunk. last_chunk_time["t"] = time.time() self._touch_activity("waiting for provider response (streaming)") stream = request_client_holder["client"].chat.completions.create(**stream_kwargs) # Capture rate limit headers from the initial HTTP response. # The OpenAI SDK Stream object exposes the underlying httpx # response via .response before any chunks are consumed. self._capture_rate_limits(getattr(stream, "response", None)) content_parts: list = [] tool_calls_acc: dict = {} tool_gen_notified: set = set() # Ollama-compatible endpoints reuse index 0 for every tool call # in a parallel batch, distinguishing them only by id. Track # the last seen id per raw index so we can detect a new tool # call starting at the same index and redirect it to a fresh slot. _last_id_at_idx: dict = {} # raw_index -> last seen non-empty id _active_slot_by_idx: dict = {} # raw_index -> current slot in tool_calls_acc finish_reason = None model_name = None role = "assistant" reasoning_parts: list = [] usage_obj = None for chunk in stream: last_chunk_time["t"] = time.time() self._touch_activity("receiving stream response") if self._interrupt_requested: break if not chunk.choices: if hasattr(chunk, "model") and chunk.model: model_name = chunk.model # Usage comes in the final chunk with empty choices if hasattr(chunk, "usage") and chunk.usage: usage_obj = chunk.usage continue delta = chunk.choices[0].delta if hasattr(chunk, "model") and chunk.model: model_name = chunk.model # Accumulate reasoning content reasoning_text = getattr(delta, "reasoning_content", None) or getattr(delta, "reasoning", None) if reasoning_text: reasoning_parts.append(reasoning_text) _fire_first_delta() self._fire_reasoning_delta(reasoning_text) # Accumulate text content — fire callback only when no tool calls if delta and delta.content: content_parts.append(delta.content) if not tool_calls_acc: _fire_first_delta() self._fire_stream_delta(delta.content) deltas_were_sent["yes"] = True else: # Tool calls suppress regular content streaming (avoids # displaying chatty "I'll use the tool..." text alongside # tool calls). But reasoning tags embedded in suppressed # content should still reach the display — otherwise the # reasoning box only appears as a post-response fallback, # rendering it confusingly after the already-streamed # response. Route suppressed content through the stream # delta callback so its tag extraction can fire the # reasoning display. Non-reasoning text is harmlessly # suppressed by the CLI's _stream_delta when the stream # box is already closed (tool boundary flush). if self.stream_delta_callback: try: self.stream_delta_callback(delta.content) self._record_streamed_assistant_text(delta.content) except Exception: pass # Accumulate tool call deltas — notify display on first name if delta and delta.tool_calls: for tc_delta in delta.tool_calls: raw_idx = tc_delta.index if tc_delta.index is not None else 0 delta_id = tc_delta.id or "" # Ollama fix: detect a new tool call reusing the same # raw index (different id) and redirect to a fresh slot. if raw_idx not in _active_slot_by_idx: _active_slot_by_idx[raw_idx] = raw_idx if ( delta_id and raw_idx in _last_id_at_idx and delta_id != _last_id_at_idx[raw_idx] ): new_slot = max(tool_calls_acc, default=-1) + 1 _active_slot_by_idx[raw_idx] = new_slot if delta_id: _last_id_at_idx[raw_idx] = delta_id idx = _active_slot_by_idx[raw_idx] if idx not in tool_calls_acc: tool_calls_acc[idx] = { "id": tc_delta.id or "", "type": "function", "function": {"name": "", "arguments": ""}, "extra_content": None, } entry = tool_calls_acc[idx] if tc_delta.id: entry["id"] = tc_delta.id if tc_delta.function: if tc_delta.function.name: entry["function"]["name"] += tc_delta.function.name if tc_delta.function.arguments: entry["function"]["arguments"] += tc_delta.function.arguments extra = getattr(tc_delta, "extra_content", None) if extra is None and hasattr(tc_delta, "model_extra"): extra = (tc_delta.model_extra or {}).get("extra_content") if extra is not None: if hasattr(extra, "model_dump"): extra = extra.model_dump() entry["extra_content"] = extra # Fire once per tool when the full name is available name = entry["function"]["name"] if name and idx not in tool_gen_notified: tool_gen_notified.add(idx) _fire_first_delta() self._fire_tool_gen_started(name) if chunk.choices[0].finish_reason: finish_reason = chunk.choices[0].finish_reason # Usage in the final chunk if hasattr(chunk, "usage") and chunk.usage: usage_obj = chunk.usage # Build mock response matching non-streaming shape full_content = "".join(content_parts) or None mock_tool_calls = None has_truncated_tool_args = False if tool_calls_acc: mock_tool_calls = [] for idx in sorted(tool_calls_acc): tc = tool_calls_acc[idx] arguments = tc["function"]["arguments"] if arguments and arguments.strip(): try: json.loads(arguments) except json.JSONDecodeError: has_truncated_tool_args = True mock_tool_calls.append(SimpleNamespace( id=tc["id"], type=tc["type"], extra_content=tc.get("extra_content"), function=SimpleNamespace( name=tc["function"]["name"], arguments=arguments, ), )) effective_finish_reason = finish_reason or "stop" if has_truncated_tool_args: effective_finish_reason = "length" full_reasoning = "".join(reasoning_parts) or None mock_message = SimpleNamespace( role=role, content=full_content, tool_calls=mock_tool_calls, reasoning_content=full_reasoning, ) mock_choice = SimpleNamespace( index=0, message=mock_message, finish_reason=effective_finish_reason, ) return SimpleNamespace( id="stream-" + str(uuid.uuid4()), model=model_name, choices=[mock_choice], usage=usage_obj, ) def _call_anthropic(): """Stream an Anthropic Messages API response. Fires delta callbacks for real-time token delivery, but returns the native Anthropic Message object from get_final_message() so the rest of the agent loop (validation, tool extraction, etc.) works unchanged. """ has_tool_use = False # Reset stale-stream timer for this attempt last_chunk_time["t"] = time.time() # Use the Anthropic SDK's streaming context manager with self._anthropic_client.messages.stream(**api_kwargs) as stream: for event in stream: # Update stale-stream timer on every event so the # outer poll loop knows data is flowing. Without # this, the detector kills healthy long-running # Opus streams after 180 s even when events are # actively arriving (the chat_completions path # already does this at the top of its chunk loop). last_chunk_time["t"] = time.time() self._touch_activity("receiving stream response") if self._interrupt_requested: break event_type = getattr(event, "type", None) if event_type == "content_block_start": block = getattr(event, "content_block", None) if block and getattr(block, "type", None) == "tool_use": has_tool_use = True tool_name = getattr(block, "name", None) if tool_name: _fire_first_delta() self._fire_tool_gen_started(tool_name) elif event_type == "content_block_delta": delta = getattr(event, "delta", None) if delta: delta_type = getattr(delta, "type", None) if delta_type == "text_delta": text = getattr(delta, "text", "") if text and not has_tool_use: _fire_first_delta() self._fire_stream_delta(text) deltas_were_sent["yes"] = True elif delta_type == "thinking_delta": thinking_text = getattr(delta, "thinking", "") if thinking_text: _fire_first_delta() self._fire_reasoning_delta(thinking_text) # Return the native Anthropic Message for downstream processing return stream.get_final_message() def _call(): import httpx as _httpx _max_stream_retries = int(os.getenv("HERMES_STREAM_RETRIES", 2)) try: for _stream_attempt in range(_max_stream_retries + 1): try: if self.api_mode == "anthropic_messages": self._try_refresh_anthropic_client_credentials() result["response"] = _call_anthropic() else: result["response"] = _call_chat_completions() return # success except Exception as e: if deltas_were_sent["yes"]: # Streaming failed AFTER some tokens were already # delivered. Don't retry or fall back — partial # content already reached the user. logger.warning( "Streaming failed after partial delivery, not retrying: %s", e ) result["error"] = e return _is_timeout = isinstance( e, (_httpx.ReadTimeout, _httpx.ConnectTimeout, _httpx.PoolTimeout) ) _is_conn_err = isinstance( e, (_httpx.ConnectError, _httpx.RemoteProtocolError, ConnectionError) ) # SSE error events from proxies (e.g. OpenRouter sends # {"error":{"message":"Network connection lost."}}) are # raised as APIError by the OpenAI SDK. These are # semantically identical to httpx connection drops — # the upstream stream died — and should be retried with # a fresh connection. Distinguish from HTTP errors: # APIError from SSE has no status_code, while # APIStatusError (4xx/5xx) always has one. _is_sse_conn_err = False if not _is_timeout and not _is_conn_err: from openai import APIError as _APIError if isinstance(e, _APIError) and not getattr(e, "status_code", None): _err_lower_sse = str(e).lower() _SSE_CONN_PHRASES = ( "connection lost", "connection reset", "connection closed", "connection terminated", "network error", "network connection", "terminated", "peer closed", "broken pipe", "upstream connect error", ) _is_sse_conn_err = any( phrase in _err_lower_sse for phrase in _SSE_CONN_PHRASES ) if _is_timeout or _is_conn_err or _is_sse_conn_err: # Transient network / timeout error. Retry the # streaming request with a fresh connection first. if _stream_attempt < _max_stream_retries: logger.info( "Streaming attempt %s/%s failed (%s: %s), " "retrying with fresh connection...", _stream_attempt + 1, _max_stream_retries + 1, type(e).__name__, e, ) self._emit_status( f"⚠️ Connection to provider dropped " f"({type(e).__name__}). Reconnecting… " f"(attempt {_stream_attempt + 2}/{_max_stream_retries + 1})" ) self._touch_activity( f"stream retry {_stream_attempt + 2}/{_max_stream_retries + 1} " f"after {type(e).__name__}" ) # Close the stale request client before retry stale = request_client_holder.get("client") if stale is not None: self._close_request_openai_client( stale, reason="stream_retry_cleanup" ) request_client_holder["client"] = None # Also rebuild the primary client to purge # any dead connections from the pool. try: self._replace_primary_openai_client( reason="stream_retry_pool_cleanup" ) except Exception: pass continue self._emit_status( "❌ Connection to provider failed after " f"{_max_stream_retries + 1} attempts. " "The provider may be experiencing issues — " "try again in a moment." ) logger.warning( "Streaming exhausted %s retries on transient error: %s", _max_stream_retries + 1, e, ) else: _err_lower = str(e).lower() _is_stream_unsupported = ( "stream" in _err_lower and "not supported" in _err_lower ) if _is_stream_unsupported: self._disable_streaming = True self._safe_print( "\n⚠ Streaming is not supported for this " "model/provider. Switching to non-streaming.\n" " To avoid this delay, set display.streaming: false " "in config.yaml\n" ) logger.info( "Streaming failed before delivery: %s", e, ) # Propagate the error to the main retry loop instead of # falling back to non-streaming inline. The main loop has # richer recovery: credential rotation, provider fallback, # backoff, and — for "stream not supported" — will switch # to non-streaming on the next attempt via _disable_streaming. result["error"] = e return finally: request_client = request_client_holder.get("client") if request_client is not None: self._close_request_openai_client(request_client, reason="stream_request_complete") _stream_stale_timeout_base = float(os.getenv("HERMES_STREAM_STALE_TIMEOUT", 180.0)) # Local providers (Ollama, oMLX, llama-cpp) can take 300+ seconds # for prefill on large contexts. Disable the stale detector unless # the user explicitly set HERMES_STREAM_STALE_TIMEOUT. if _stream_stale_timeout_base == 180.0 and self.base_url and is_local_endpoint(self.base_url): _stream_stale_timeout = float("inf") logger.debug("Local provider detected (%s) — stale stream timeout disabled", self.base_url) else: # Scale the stale timeout for large contexts: slow models (like Opus) # can legitimately think for minutes before producing the first token # when the context is large. Without this, the stale detector kills # healthy connections during the model's thinking phase, producing # spurious RemoteProtocolError ("peer closed connection"). _est_tokens = sum(len(str(v)) for v in api_kwargs.get("messages", [])) // 4 if _est_tokens > 100_000: _stream_stale_timeout = max(_stream_stale_timeout_base, 300.0) elif _est_tokens > 50_000: _stream_stale_timeout = max(_stream_stale_timeout_base, 240.0) else: _stream_stale_timeout = _stream_stale_timeout_base t = threading.Thread(target=_call, daemon=True) t.start() _last_heartbeat = time.time() _HEARTBEAT_INTERVAL = 30.0 # seconds between gateway activity touches while t.is_alive(): t.join(timeout=0.3) # Periodic heartbeat: touch the agent's activity tracker so the # gateway's inactivity monitor knows we're alive while waiting # for stream chunks. Without this, long thinking pauses (e.g. # reasoning models) or slow prefill on local providers (Ollama) # trigger false inactivity timeouts. The _call thread touches # activity on each chunk, but the gap between API call start # and first chunk can exceed the gateway timeout — especially # when the stale-stream timeout is disabled (local providers). _hb_now = time.time() if _hb_now - _last_heartbeat >= _HEARTBEAT_INTERVAL: _last_heartbeat = _hb_now _waiting_secs = int(_hb_now - last_chunk_time["t"]) self._touch_activity( f"waiting for stream response ({_waiting_secs}s, no chunks yet)" ) # Detect stale streams: connections kept alive by SSE pings # but delivering no real chunks. Kill the client so the # inner retry loop can start a fresh connection. _stale_elapsed = time.time() - last_chunk_time["t"] if _stale_elapsed > _stream_stale_timeout: _est_ctx = sum(len(str(v)) for v in api_kwargs.get("messages", [])) // 4 logger.warning( "Stream stale for %.0fs (threshold %.0fs) — no chunks received. " "model=%s context=~%s tokens. Killing connection.", _stale_elapsed, _stream_stale_timeout, api_kwargs.get("model", "unknown"), f"{_est_ctx:,}", ) self._emit_status( f"⚠️ No response from provider for {int(_stale_elapsed)}s " f"(model: {api_kwargs.get('model', 'unknown')}, " f"context: ~{_est_ctx:,} tokens). " f"Reconnecting..." ) try: rc = request_client_holder.get("client") if rc is not None: self._close_request_openai_client(rc, reason="stale_stream_kill") except Exception: pass # Rebuild the primary client too — its connection pool # may hold dead sockets from the same provider outage. try: self._replace_primary_openai_client(reason="stale_stream_pool_cleanup") except Exception: pass # Reset the timer so we don't kill repeatedly while # the inner thread processes the closure. last_chunk_time["t"] = time.time() self._touch_activity( f"stale stream detected after {int(_stale_elapsed)}s, reconnecting" ) if self._interrupt_requested: try: if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_client self._anthropic_client.close() self._anthropic_client = build_anthropic_client( self._anthropic_api_key, getattr(self, "_anthropic_base_url", None), ) else: request_client = request_client_holder.get("client") if request_client is not None: self._close_request_openai_client(request_client, reason="stream_interrupt_abort") except Exception: pass raise InterruptedError("Agent interrupted during streaming API call") if result["error"] is not None: if deltas_were_sent["yes"]: # Streaming failed AFTER some tokens were already delivered to # the platform. Re-raising would let the outer retry loop make # a new API call, creating a duplicate message. Return a # partial "stop" response instead so the outer loop treats this # turn as complete (no retry, no fallback). # Recover whatever content was already streamed to the user. # _current_streamed_assistant_text accumulates text fired # through _fire_stream_delta, so it has exactly what the # user saw before the connection died. _partial_text = ( getattr(self, "_current_streamed_assistant_text", "") or "" ).strip() or None logger.warning( "Partial stream delivered before error; returning stub " "response with %s chars of recovered content to prevent " "duplicate messages: %s", len(_partial_text or ""), result["error"], ) _stub_msg = SimpleNamespace( role="assistant", content=_partial_text, tool_calls=None, reasoning_content=None, ) return SimpleNamespace( id="partial-stream-stub", model=getattr(self, "model", "unknown"), choices=[SimpleNamespace( index=0, message=_stub_msg, finish_reason="stop", )], usage=None, ) raise result["error"] return result["response"] # ── Provider fallback ────────────────────────────────────────────────── def _try_activate_fallback(self) -> bool: """Switch to the next fallback model/provider in the chain. Called when the current model is failing after retries. Swaps the OpenAI client, model slug, and provider in-place so the retry loop can continue with the new backend. Advances through the chain on each call; returns False when exhausted. Uses the centralized provider router (resolve_provider_client) for auth resolution and client construction — no duplicated provider→key mappings. """ if self._fallback_index >= len(self._fallback_chain): return False fb = self._fallback_chain[self._fallback_index] self._fallback_index += 1 fb_provider = (fb.get("provider") or "").strip().lower() fb_model = (fb.get("model") or "").strip() if not fb_provider or not fb_model: return self._try_activate_fallback() # skip invalid, try next # Use centralized router for client construction. # raw_codex=True because the main agent needs direct responses.stream() # access for Codex providers. try: from agent.auxiliary_client import resolve_provider_client # Pass base_url and api_key from fallback config so custom # endpoints (e.g. Ollama Cloud) resolve correctly instead of # falling through to OpenRouter defaults. fb_base_url_hint = (fb.get("base_url") or "").strip() or None fb_api_key_hint = (fb.get("api_key") or "").strip() or None # For Ollama Cloud endpoints, pull OLLAMA_API_KEY from env # when no explicit key is in the fallback config. if fb_base_url_hint and "ollama.com" in fb_base_url_hint.lower() and not fb_api_key_hint: fb_api_key_hint = os.getenv("OLLAMA_API_KEY") or None fb_client, _resolved_fb_model = resolve_provider_client( fb_provider, model=fb_model, raw_codex=True, explicit_base_url=fb_base_url_hint, explicit_api_key=fb_api_key_hint) if fb_client is None: logging.warning( "Fallback to %s failed: provider not configured", fb_provider) return self._try_activate_fallback() # try next in chain try: from hermes_cli.model_normalize import normalize_model_for_provider fb_model = normalize_model_for_provider(fb_model, fb_provider) except Exception: pass # Determine api_mode from provider / base URL / model fb_api_mode = "chat_completions" fb_base_url = str(fb_client.base_url) if fb_provider == "openai-codex": fb_api_mode = "codex_responses" elif fb_provider == "anthropic" or fb_base_url.rstrip("/").lower().endswith("/anthropic"): fb_api_mode = "anthropic_messages" elif self._is_direct_openai_url(fb_base_url): fb_api_mode = "codex_responses" elif self._provider_model_requires_responses_api( fb_model, provider=fb_provider, ): # GPT-5.x models usually need Responses API, but keep # provider-specific exceptions like Copilot gpt-5-mini on # chat completions. fb_api_mode = "codex_responses" elif fb_provider == "bedrock" or "bedrock-runtime" in fb_base_url.lower(): fb_api_mode = "bedrock_converse" old_model = self.model self.model = fb_model self.provider = fb_provider self.base_url = fb_base_url self.api_mode = fb_api_mode self._fallback_activated = True if fb_api_mode == "anthropic_messages": # Build native Anthropic client instead of using OpenAI client from agent.anthropic_adapter import build_anthropic_client, resolve_anthropic_token, _is_oauth_token effective_key = (fb_client.api_key or resolve_anthropic_token() or "") if fb_provider == "anthropic" else (fb_client.api_key or "") self.api_key = effective_key self._anthropic_api_key = effective_key self._anthropic_base_url = fb_base_url self._anthropic_client = build_anthropic_client(effective_key, self._anthropic_base_url) self._is_anthropic_oauth = _is_oauth_token(effective_key) self.client = None self._client_kwargs = {} else: # Swap OpenAI client and config in-place self.api_key = fb_client.api_key self.client = fb_client # Preserve provider-specific headers that # resolve_provider_client() may have baked into # fb_client via the default_headers kwarg. The OpenAI # SDK stores these in _custom_headers. Without this, # subsequent request-client rebuilds (via # _create_request_openai_client) drop the headers, # causing 403s from providers like Kimi Coding that # require a User-Agent sentinel. fb_headers = getattr(fb_client, "_custom_headers", None) if not fb_headers: fb_headers = getattr(fb_client, "default_headers", None) self._client_kwargs = { "api_key": fb_client.api_key, "base_url": fb_base_url, **({"default_headers": dict(fb_headers)} if fb_headers else {}), } # Re-evaluate prompt caching for the new provider/model is_native_anthropic = fb_api_mode == "anthropic_messages" and fb_provider == "anthropic" self._use_prompt_caching = ( ("openrouter" in fb_base_url.lower() and "claude" in fb_model.lower()) or is_native_anthropic ) # Update context compressor limits for the fallback model. # Without this, compression decisions use the primary model's # context window (e.g. 200K) instead of the fallback's (e.g. 32K), # causing oversized sessions to overflow the fallback. if hasattr(self, 'context_compressor') and self.context_compressor: from agent.model_metadata import get_model_context_length fb_context_length = get_model_context_length( self.model, base_url=self.base_url, api_key=self.api_key, provider=self.provider, ) self.context_compressor.update_model( model=self.model, context_length=fb_context_length, base_url=self.base_url, api_key=getattr(self, "api_key", ""), provider=self.provider, ) self._emit_status( f"🔄 Primary model failed — switching to fallback: " f"{fb_model} via {fb_provider}" ) logging.info( "Fallback activated: %s → %s (%s)", old_model, fb_model, fb_provider, ) return True except Exception as e: logging.error("Failed to activate fallback %s: %s", fb_model, e) return self._try_activate_fallback() # try next in chain # ── Per-turn primary restoration ───────────────────────────────────── def _restore_primary_runtime(self) -> bool: """Restore the primary runtime at the start of a new turn. In long-lived CLI sessions a single AIAgent instance spans multiple turns. Without restoration, one transient failure pins the session to the fallback provider for every subsequent turn. Calling this at the top of ``run_conversation()`` makes fallback turn-scoped. The gateway caches agents across messages (``_agent_cache`` in ``gateway/run.py``), so this restoration IS needed there too. """ if not self._fallback_activated: return False rt = self._primary_runtime try: # ── Core runtime state ── self.model = rt["model"] self.provider = rt["provider"] self.base_url = rt["base_url"] # setter updates _base_url_lower self.api_mode = rt["api_mode"] self.api_key = rt["api_key"] self._client_kwargs = dict(rt["client_kwargs"]) self._use_prompt_caching = rt["use_prompt_caching"] # ── Rebuild client for the primary provider ── if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_client self._anthropic_api_key = rt["anthropic_api_key"] self._anthropic_base_url = rt["anthropic_base_url"] self._anthropic_client = build_anthropic_client( rt["anthropic_api_key"], rt["anthropic_base_url"], ) self._is_anthropic_oauth = rt["is_anthropic_oauth"] self.client = None else: self.client = self._create_openai_client( dict(rt["client_kwargs"]), reason="restore_primary", shared=True, ) # ── Restore context engine state ── cc = self.context_compressor cc.update_model( model=rt["compressor_model"], context_length=rt["compressor_context_length"], base_url=rt["compressor_base_url"], api_key=rt["compressor_api_key"], provider=rt["compressor_provider"], ) # ── Reset fallback chain for the new turn ── self._fallback_activated = False self._fallback_index = 0 logging.info( "Primary runtime restored for new turn: %s (%s)", self.model, self.provider, ) return True except Exception as e: logging.warning("Failed to restore primary runtime: %s", e) return False # Which error types indicate a transient transport failure worth # one more attempt with a rebuilt client / connection pool. _TRANSIENT_TRANSPORT_ERRORS = frozenset({ "ReadTimeout", "ConnectTimeout", "PoolTimeout", "ConnectError", "RemoteProtocolError", "APIConnectionError", "APITimeoutError", }) def _try_recover_primary_transport( self, api_error: Exception, *, retry_count: int, max_retries: int, ) -> bool: """Attempt one extra primary-provider recovery cycle for transient transport failures. After ``max_retries`` exhaust, rebuild the primary client (clearing stale connection pools) and give it one more attempt before falling back. This is most useful for direct endpoints (custom, Z.AI, Anthropic, OpenAI, local models) where a TCP-level hiccup does not mean the provider is down. Skipped for proxy/aggregator providers (OpenRouter, Nous) which already manage connection pools and retries server-side — if our retries through them are exhausted, one more rebuilt client won't help. """ if self._fallback_activated: return False # Only for transient transport errors error_type = type(api_error).__name__ if error_type not in self._TRANSIENT_TRANSPORT_ERRORS: return False # Skip for aggregator providers — they manage their own retry infra if self._is_openrouter_url(): return False provider_lower = (self.provider or "").strip().lower() if provider_lower in ("nous", "nous-research"): return False try: # Close existing client to release stale connections if getattr(self, "client", None) is not None: try: self._close_openai_client( self.client, reason="primary_recovery", shared=True, ) except Exception: pass # Rebuild from primary snapshot rt = self._primary_runtime self._client_kwargs = dict(rt["client_kwargs"]) self.model = rt["model"] self.provider = rt["provider"] self.base_url = rt["base_url"] self.api_mode = rt["api_mode"] self.api_key = rt["api_key"] if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_client self._anthropic_api_key = rt["anthropic_api_key"] self._anthropic_base_url = rt["anthropic_base_url"] self._anthropic_client = build_anthropic_client( rt["anthropic_api_key"], rt["anthropic_base_url"], ) self._is_anthropic_oauth = rt["is_anthropic_oauth"] self.client = None else: self.client = self._create_openai_client( dict(rt["client_kwargs"]), reason="primary_recovery", shared=True, ) wait_time = min(3 + retry_count, 8) self._vprint( f"{self.log_prefix}🔁 Transient {error_type} on {self.provider} — " f"rebuilt client, waiting {wait_time}s before one last primary attempt.", force=True, ) time.sleep(wait_time) return True except Exception as e: logging.warning("Primary transport recovery failed: %s", e) return False # ── End provider fallback ────────────────────────────────────────────── @staticmethod def _content_has_image_parts(content: Any) -> bool: if not isinstance(content, list): return False for part in content: if isinstance(part, dict) and part.get("type") in {"image_url", "input_image"}: return True return False @staticmethod def _materialize_data_url_for_vision(image_url: str) -> tuple[str, Optional[Path]]: header, _, data = str(image_url or "").partition(",") mime = "image/jpeg" if header.startswith("data:"): mime_part = header[len("data:"):].split(";", 1)[0].strip() if mime_part.startswith("image/"): mime = mime_part suffix = { "image/png": ".png", "image/gif": ".gif", "image/webp": ".webp", "image/jpeg": ".jpg", "image/jpg": ".jpg", }.get(mime, ".jpg") tmp = tempfile.NamedTemporaryFile(prefix="anthropic_image_", suffix=suffix, delete=False) with tmp: tmp.write(base64.b64decode(data)) path = Path(tmp.name) return str(path), path def _describe_image_for_anthropic_fallback(self, image_url: str, role: str) -> str: cache_key = hashlib.sha256(str(image_url or "").encode("utf-8")).hexdigest() cached = self._anthropic_image_fallback_cache.get(cache_key) if cached: return cached role_label = { "assistant": "assistant", "tool": "tool result", }.get(role, "user") analysis_prompt = ( "Describe everything visible in this image in thorough detail. " "Include any text, code, UI, data, objects, people, layout, colors, " "and any other notable visual information." ) vision_source = str(image_url or "") cleanup_path: Optional[Path] = None if vision_source.startswith("data:"): vision_source, cleanup_path = self._materialize_data_url_for_vision(vision_source) description = "" try: from tools.vision_tools import vision_analyze_tool result_json = asyncio.run( vision_analyze_tool(image_url=vision_source, user_prompt=analysis_prompt) ) result = json.loads(result_json) if isinstance(result_json, str) else {} description = (result.get("analysis") or "").strip() except Exception as e: description = f"Image analysis failed: {e}" finally: if cleanup_path and cleanup_path.exists(): try: cleanup_path.unlink() except OSError: pass if not description: description = "Image analysis failed." note = f"[The {role_label} attached an image. Here's what it contains:\n{description}]" if vision_source and not str(image_url or "").startswith("data:"): note += ( f"\n[If you need a closer look, use vision_analyze with image_url: {vision_source}]" ) self._anthropic_image_fallback_cache[cache_key] = note return note def _preprocess_anthropic_content(self, content: Any, role: str) -> Any: if not self._content_has_image_parts(content): return content text_parts: List[str] = [] image_notes: List[str] = [] for part in content: if isinstance(part, str): if part.strip(): text_parts.append(part.strip()) continue if not isinstance(part, dict): continue ptype = part.get("type") if ptype in {"text", "input_text"}: text = str(part.get("text", "") or "").strip() if text: text_parts.append(text) continue if ptype in {"image_url", "input_image"}: image_data = part.get("image_url", {}) image_url = image_data.get("url", "") if isinstance(image_data, dict) else str(image_data or "") if image_url: image_notes.append(self._describe_image_for_anthropic_fallback(image_url, role)) else: image_notes.append("[An image was attached but no image source was available.]") continue text = str(part.get("text", "") or "").strip() if text: text_parts.append(text) prefix = "\n\n".join(note for note in image_notes if note).strip() suffix = "\n".join(text for text in text_parts if text).strip() if prefix and suffix: return f"{prefix}\n\n{suffix}" if prefix: return prefix if suffix: return suffix return "[A multimodal message was converted to text for Anthropic compatibility.]" def _prepare_anthropic_messages_for_api(self, api_messages: list) -> list: if not any( isinstance(msg, dict) and self._content_has_image_parts(msg.get("content")) for msg in api_messages ): return api_messages transformed = copy.deepcopy(api_messages) for msg in transformed: if not isinstance(msg, dict): continue msg["content"] = self._preprocess_anthropic_content( msg.get("content"), str(msg.get("role", "user") or "user"), ) return transformed def _anthropic_preserve_dots(self) -> bool: """True when using an anthropic-compatible endpoint that preserves dots in model names. Alibaba/DashScope keeps dots (e.g. qwen3.5-plus). MiniMax keeps dots (e.g. MiniMax-M2.7). OpenCode Go/Zen keeps dots for non-Claude models (e.g. minimax-m2.5-free). ZAI/Zhipu keeps dots (e.g. glm-4.7, glm-5.1).""" if (getattr(self, "provider", "") or "").lower() in {"alibaba", "minimax", "minimax-cn", "opencode-go", "opencode-zen", "zai"}: return True base = (getattr(self, "base_url", "") or "").lower() return "dashscope" in base or "aliyuncs" in base or "minimax" in base or "opencode.ai/zen/" in base or "bigmodel.cn" in base def _is_qwen_portal(self) -> bool: """Return True when the base URL targets Qwen Portal.""" return "portal.qwen.ai" in self._base_url_lower def _qwen_prepare_chat_messages(self, api_messages: list) -> list: prepared = copy.deepcopy(api_messages) if not prepared: return prepared for msg in prepared: if not isinstance(msg, dict): continue content = msg.get("content") if isinstance(content, str): msg["content"] = [{"type": "text", "text": content}] elif isinstance(content, list): # Normalize: convert bare strings to text dicts, keep dicts as-is. # deepcopy already created independent copies, no need for dict(). normalized_parts = [] for part in content: if isinstance(part, str): normalized_parts.append({"type": "text", "text": part}) elif isinstance(part, dict): normalized_parts.append(part) if normalized_parts: msg["content"] = normalized_parts # Inject cache_control on the last part of the system message. for msg in prepared: if isinstance(msg, dict) and msg.get("role") == "system": content = msg.get("content") if isinstance(content, list) and content and isinstance(content[-1], dict): content[-1]["cache_control"] = {"type": "ephemeral"} break return prepared def _qwen_prepare_chat_messages_inplace(self, messages: list) -> None: """In-place variant — mutates an already-copied message list.""" if not messages: return for msg in messages: if not isinstance(msg, dict): continue content = msg.get("content") if isinstance(content, str): msg["content"] = [{"type": "text", "text": content}] elif isinstance(content, list): normalized_parts = [] for part in content: if isinstance(part, str): normalized_parts.append({"type": "text", "text": part}) elif isinstance(part, dict): normalized_parts.append(part) if normalized_parts: msg["content"] = normalized_parts for msg in messages: if isinstance(msg, dict) and msg.get("role") == "system": content = msg.get("content") if isinstance(content, list) and content and isinstance(content[-1], dict): content[-1]["cache_control"] = {"type": "ephemeral"} break def _build_api_kwargs(self, api_messages: list) -> dict: """Build the keyword arguments dict for the active API mode.""" if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_kwargs anthropic_messages = self._prepare_anthropic_messages_for_api(api_messages) # Pass context_length (total input+output window) so the adapter can # clamp max_tokens (output cap) when the user configured a smaller # context window than the model's native output limit. ctx_len = getattr(self, "context_compressor", None) ctx_len = ctx_len.context_length if ctx_len else None # _ephemeral_max_output_tokens is set for one call when the API # returns "max_tokens too large given prompt" — it caps output to # the available window space without touching context_length. ephemeral_out = getattr(self, "_ephemeral_max_output_tokens", None) if ephemeral_out is not None: self._ephemeral_max_output_tokens = None # consume immediately return build_anthropic_kwargs( model=self.model, messages=anthropic_messages, tools=self.tools, max_tokens=ephemeral_out if ephemeral_out is not None else self.max_tokens, reasoning_config=self.reasoning_config, is_oauth=self._is_anthropic_oauth, preserve_dots=self._anthropic_preserve_dots(), context_length=ctx_len, base_url=getattr(self, "_anthropic_base_url", None), fast_mode=(self.request_overrides or {}).get("speed") == "fast", ) # AWS Bedrock native Converse API — bypasses the OpenAI client entirely. # The adapter handles message/tool conversion and boto3 calls directly. if self.api_mode == "bedrock_converse": from agent.bedrock_adapter import build_converse_kwargs region = getattr(self, "_bedrock_region", None) or "us-east-1" guardrail = getattr(self, "_bedrock_guardrail_config", None) return { "__bedrock_converse__": True, "__bedrock_region__": region, **build_converse_kwargs( model=self.model, messages=api_messages, tools=self.tools, max_tokens=self.max_tokens or 4096, temperature=None, # Let the model use its default guardrail_config=guardrail, ), } if self.api_mode == "codex_responses": instructions = "" payload_messages = api_messages if api_messages and api_messages[0].get("role") == "system": instructions = str(api_messages[0].get("content") or "").strip() payload_messages = api_messages[1:] if not instructions: instructions = DEFAULT_AGENT_IDENTITY is_github_responses = ( "models.github.ai" in self.base_url.lower() or "api.githubcopilot.com" in self.base_url.lower() ) is_codex_backend = ( self.provider == "openai-codex" or "chatgpt.com/backend-api/codex" in self.base_url.lower() ) # Resolve reasoning effort: config > default (medium) reasoning_effort = "medium" reasoning_enabled = True if self.reasoning_config and isinstance(self.reasoning_config, dict): if self.reasoning_config.get("enabled") is False: reasoning_enabled = False elif self.reasoning_config.get("effort"): reasoning_effort = self.reasoning_config["effort"] # Clamp effort levels not supported by the Responses API model. # GPT-5.4 supports none/low/medium/high/xhigh but not "minimal". # "minimal" is valid on OpenRouter and GPT-5 but fails on 5.2/5.4. _effort_clamp = {"minimal": "low"} reasoning_effort = _effort_clamp.get(reasoning_effort, reasoning_effort) kwargs = { "model": self.model, "instructions": instructions, "input": self._chat_messages_to_responses_input(payload_messages), "tools": self._responses_tools(), "tool_choice": "auto", "parallel_tool_calls": True, "store": False, } if not is_github_responses: kwargs["prompt_cache_key"] = self.session_id is_xai_responses = self.provider == "xai" or "api.x.ai" in (self.base_url or "").lower() if reasoning_enabled and is_xai_responses: # xAI reasons automatically — no effort param, just include encrypted content kwargs["include"] = ["reasoning.encrypted_content"] elif reasoning_enabled: if is_github_responses: # Copilot's Responses route advertises reasoning-effort support, # but not OpenAI-specific prompt cache or encrypted reasoning # fields. Keep the payload to the documented subset. github_reasoning = self._github_models_reasoning_extra_body() if github_reasoning is not None: kwargs["reasoning"] = github_reasoning else: kwargs["reasoning"] = {"effort": reasoning_effort, "summary": "auto"} kwargs["include"] = ["reasoning.encrypted_content"] elif not is_github_responses and not is_xai_responses: kwargs["include"] = [] if self.request_overrides: kwargs.update(self.request_overrides) if self.max_tokens is not None and not is_codex_backend: kwargs["max_output_tokens"] = self.max_tokens if is_xai_responses and getattr(self, "session_id", None): kwargs["extra_headers"] = {"x-grok-conv-id": self.session_id} return kwargs sanitized_messages = api_messages needs_sanitization = False for msg in api_messages: if not isinstance(msg, dict): continue if "codex_reasoning_items" in msg: needs_sanitization = True break tool_calls = msg.get("tool_calls") if isinstance(tool_calls, list): for tool_call in tool_calls: if not isinstance(tool_call, dict): continue if "call_id" in tool_call or "response_item_id" in tool_call: needs_sanitization = True break if needs_sanitization: break if needs_sanitization: sanitized_messages = copy.deepcopy(api_messages) for msg in sanitized_messages: if not isinstance(msg, dict): continue # Codex-only replay state must not leak into strict chat-completions APIs. msg.pop("codex_reasoning_items", None) tool_calls = msg.get("tool_calls") if isinstance(tool_calls, list): for tool_call in tool_calls: if isinstance(tool_call, dict): tool_call.pop("call_id", None) tool_call.pop("response_item_id", None) # Qwen portal: normalize content to list-of-dicts, inject cache_control. # Must run AFTER codex sanitization so we transform the final messages. # If sanitization already deepcopied, reuse that copy (in-place). if self._is_qwen_portal(): if sanitized_messages is api_messages: # No sanitization was done — we need our own copy. sanitized_messages = self._qwen_prepare_chat_messages(sanitized_messages) else: # Already a deepcopy — transform in place to avoid a second deepcopy. self._qwen_prepare_chat_messages_inplace(sanitized_messages) # GPT-5 and Codex models respond better to 'developer' than 'system' # for instruction-following. Swap the role at the API boundary so # internal message representation stays uniform ("system"). _model_lower = (self.model or "").lower() if ( sanitized_messages and sanitized_messages[0].get("role") == "system" and any(p in _model_lower for p in DEVELOPER_ROLE_MODELS) ): # Shallow-copy the list + first message only — rest stays shared. sanitized_messages = list(sanitized_messages) sanitized_messages[0] = {**sanitized_messages[0], "role": "developer"} provider_preferences = {} if self.providers_allowed: provider_preferences["only"] = self.providers_allowed if self.providers_ignored: provider_preferences["ignore"] = self.providers_ignored if self.providers_order: provider_preferences["order"] = self.providers_order if self.provider_sort: provider_preferences["sort"] = self.provider_sort if self.provider_require_parameters: provider_preferences["require_parameters"] = True if self.provider_data_collection: provider_preferences["data_collection"] = self.provider_data_collection api_kwargs = { "model": self.model, "messages": sanitized_messages, "timeout": float(os.getenv("HERMES_API_TIMEOUT", 1800.0)), } if self._is_qwen_portal(): api_kwargs["metadata"] = { "sessionId": self.session_id or "hermes", "promptId": str(uuid.uuid4()), } if self.tools: api_kwargs["tools"] = self.tools if self.max_tokens is not None: api_kwargs.update(self._max_tokens_param(self.max_tokens)) elif self._is_qwen_portal(): # Qwen Portal defaults to a very low max_tokens when omitted. # Reasoning models (qwen3-coder-plus) exhaust that budget on # thinking tokens alone, causing the portal to return # finish_reason="stop" with truncated output — the agent sees # this as an intentional stop and exits the loop. Send 65536 # (the documented max output for qwen3-coder models) so the # model has adequate output budget for tool calls. api_kwargs.update(self._max_tokens_param(65536)) elif (self._is_openrouter_url() or "nousresearch" in self._base_url_lower) and "claude" in (self.model or "").lower(): # OpenRouter and Nous Portal translate requests to Anthropic's # Messages API, which requires max_tokens as a mandatory field. # When we omit it, the proxy picks a default that can be too # low — the model spends its output budget on thinking and has # almost nothing left for the actual response (especially large # tool calls like write_file). Sending the model's real output # limit ensures full capacity. try: from agent.anthropic_adapter import _get_anthropic_max_output _model_output_limit = _get_anthropic_max_output(self.model) api_kwargs["max_tokens"] = _model_output_limit except Exception: pass # fail open — let the proxy pick its default extra_body = {} _is_openrouter = self._is_openrouter_url() _is_github_models = ( "models.github.ai" in self._base_url_lower or "api.githubcopilot.com" in self._base_url_lower ) # Provider preferences (only, ignore, order, sort) are OpenRouter- # specific. Only send to OpenRouter-compatible endpoints. # TODO: Nous Portal will add transparent proxy support — re-enable # for _is_nous when their backend is updated. if provider_preferences and _is_openrouter: extra_body["provider"] = provider_preferences _is_nous = "nousresearch" in self._base_url_lower if self._supports_reasoning_extra_body(): if _is_github_models: github_reasoning = self._github_models_reasoning_extra_body() if github_reasoning is not None: extra_body["reasoning"] = github_reasoning else: if self.reasoning_config is not None: rc = dict(self.reasoning_config) # Nous Portal requires reasoning enabled — don't send # enabled=false to it (would cause 400). if _is_nous and rc.get("enabled") is False: pass # omit reasoning entirely for Nous when disabled else: extra_body["reasoning"] = rc else: extra_body["reasoning"] = { "enabled": True, "effort": "medium" } # Nous Portal product attribution if _is_nous: extra_body["tags"] = ["product=hermes-agent"] # Ollama num_ctx: override the 2048 default so the model actually # uses the context window it was trained for. Passed via the OpenAI # SDK's extra_body → options.num_ctx, which Ollama's OpenAI-compat # endpoint forwards to the runner as --ctx-size. if self._ollama_num_ctx: options = extra_body.get("options", {}) options["num_ctx"] = self._ollama_num_ctx extra_body["options"] = options # Ollama / custom provider: pass think=false when reasoning is disabled. # Ollama does not recognise the OpenRouter-style `reasoning` extra_body # field, so we use its native `think` parameter instead. # This prevents thinking-capable models (Qwen3, etc.) from generating # blocks and producing empty-response errors when the user has # set reasoning_effort: none. if self.provider == "custom" and self.reasoning_config and isinstance(self.reasoning_config, dict): _effort = (self.reasoning_config.get("effort") or "").strip().lower() _enabled = self.reasoning_config.get("enabled", True) if _effort == "none" or _enabled is False: extra_body["think"] = False if self._is_qwen_portal(): extra_body["vl_high_resolution_images"] = True if extra_body: api_kwargs["extra_body"] = extra_body # Priority Processing / generic request overrides (e.g. service_tier). # Applied last so overrides win over any defaults set above. if self.request_overrides: api_kwargs.update(self.request_overrides) return api_kwargs def _supports_reasoning_extra_body(self) -> bool: """Return True when reasoning extra_body is safe to send for this route/model. OpenRouter forwards unknown extra_body fields to upstream providers. Some providers/routes reject `reasoning` with 400s, so gate it to known reasoning-capable model families and direct Nous Portal. """ if "nousresearch" in self._base_url_lower: return True if "ai-gateway.vercel.sh" in self._base_url_lower: return True if "models.github.ai" in self._base_url_lower or "api.githubcopilot.com" in self._base_url_lower: try: from hermes_cli.models import github_model_reasoning_efforts return bool(github_model_reasoning_efforts(self.model)) except Exception: return False if "openrouter" not in self._base_url_lower: return False if "api.mistral.ai" in self._base_url_lower: return False model = (self.model or "").lower() reasoning_model_prefixes = ( "deepseek/", "anthropic/", "openai/", "x-ai/", "google/gemini-2", "qwen/qwen3", ) return any(model.startswith(prefix) for prefix in reasoning_model_prefixes) def _github_models_reasoning_extra_body(self) -> dict | None: """Format reasoning payload for GitHub Models/OpenAI-compatible routes.""" try: from hermes_cli.models import github_model_reasoning_efforts except Exception: return None supported_efforts = github_model_reasoning_efforts(self.model) if not supported_efforts: return None if self.reasoning_config and isinstance(self.reasoning_config, dict): if self.reasoning_config.get("enabled") is False: return None requested_effort = str( self.reasoning_config.get("effort", "medium") ).strip().lower() else: requested_effort = "medium" if requested_effort == "xhigh" and "high" in supported_efforts: requested_effort = "high" elif requested_effort not in supported_efforts: if requested_effort == "minimal" and "low" in supported_efforts: requested_effort = "low" elif "medium" in supported_efforts: requested_effort = "medium" else: requested_effort = supported_efforts[0] return {"effort": requested_effort} def _build_assistant_message(self, assistant_message, finish_reason: str) -> dict: """Build a normalized assistant message dict from an API response message. Handles reasoning extraction, reasoning_details, and optional tool_calls so both the tool-call path and the final-response path share one builder. """ reasoning_text = self._extract_reasoning(assistant_message) _from_structured = bool(reasoning_text) # Fallback: extract inline blocks from content when no structured # reasoning fields are present (some models/providers embed thinking # directly in the content rather than returning separate API fields). if not reasoning_text: content = assistant_message.content or "" think_blocks = re.findall(r'(.*?)', content, flags=re.DOTALL) if think_blocks: combined = "\n\n".join(b.strip() for b in think_blocks if b.strip()) reasoning_text = combined or None if reasoning_text and self.verbose_logging: logging.debug(f"Captured reasoning ({len(reasoning_text)} chars): {reasoning_text}") if reasoning_text and self.reasoning_callback: # Skip callback when streaming is active — reasoning was already # displayed during the stream via one of two paths: # (a) _fire_reasoning_delta (structured reasoning_content deltas) # (b) _stream_delta tag extraction (/) # When streaming is NOT active, always fire so non-streaming modes # (gateway, batch, quiet) still get reasoning. # Any reasoning that wasn't shown during streaming is caught by the # CLI post-response display fallback (cli.py _reasoning_shown_this_turn). if not self.stream_delta_callback: try: self.reasoning_callback(reasoning_text) except Exception: pass # Sanitize surrogates from API response — some models (e.g. Kimi/GLM via Ollama) # can return invalid surrogate code points that crash json.dumps() on persist. _raw_content = assistant_message.content or "" _san_content = _sanitize_surrogates(_raw_content) if reasoning_text: reasoning_text = _sanitize_surrogates(reasoning_text) msg = { "role": "assistant", "content": _san_content, "reasoning": reasoning_text, "finish_reason": finish_reason, } if hasattr(assistant_message, 'reasoning_details') and assistant_message.reasoning_details: # Pass reasoning_details back unmodified so providers (OpenRouter, # Anthropic, OpenAI) can maintain reasoning continuity across turns. # Each provider may include opaque fields (signature, encrypted_content) # that must be preserved exactly. raw_details = assistant_message.reasoning_details preserved = [] for d in raw_details: if isinstance(d, dict): preserved.append(d) elif hasattr(d, "__dict__"): preserved.append(d.__dict__) elif hasattr(d, "model_dump"): preserved.append(d.model_dump()) if preserved: msg["reasoning_details"] = preserved # Codex Responses API: preserve encrypted reasoning items for # multi-turn continuity. These get replayed as input on the next turn. codex_items = getattr(assistant_message, "codex_reasoning_items", None) if codex_items: msg["codex_reasoning_items"] = codex_items if assistant_message.tool_calls: tool_calls = [] for tool_call in assistant_message.tool_calls: raw_id = getattr(tool_call, "id", None) call_id = getattr(tool_call, "call_id", None) if not isinstance(call_id, str) or not call_id.strip(): embedded_call_id, _ = self._split_responses_tool_id(raw_id) call_id = embedded_call_id if not isinstance(call_id, str) or not call_id.strip(): if isinstance(raw_id, str) and raw_id.strip(): call_id = raw_id.strip() else: _fn = getattr(tool_call, "function", None) _fn_name = getattr(_fn, "name", "") if _fn else "" _fn_args = getattr(_fn, "arguments", "{}") if _fn else "{}" call_id = self._deterministic_call_id(_fn_name, _fn_args, len(tool_calls)) call_id = call_id.strip() response_item_id = getattr(tool_call, "response_item_id", None) if not isinstance(response_item_id, str) or not response_item_id.strip(): _, embedded_response_item_id = self._split_responses_tool_id(raw_id) response_item_id = embedded_response_item_id response_item_id = self._derive_responses_function_call_id( call_id, response_item_id if isinstance(response_item_id, str) else None, ) tc_dict = { "id": call_id, "call_id": call_id, "response_item_id": response_item_id, "type": tool_call.type, "function": { "name": tool_call.function.name, "arguments": tool_call.function.arguments }, } # Preserve extra_content (e.g. Gemini thought_signature) so it # is sent back on subsequent API calls. Without this, Gemini 3 # thinking models reject the request with a 400 error. extra = getattr(tool_call, "extra_content", None) if extra is not None: if hasattr(extra, "model_dump"): extra = extra.model_dump() tc_dict["extra_content"] = extra tool_calls.append(tc_dict) msg["tool_calls"] = tool_calls return msg @staticmethod def _sanitize_tool_calls_for_strict_api(api_msg: dict) -> dict: """Strip Codex Responses API fields from tool_calls for strict providers. Providers like Mistral, Fireworks, and other strict OpenAI-compatible APIs validate the Chat Completions schema and reject unknown fields (call_id, response_item_id) with 400 or 422 errors. These fields are preserved in the internal message history — this method only modifies the outgoing API copy. Creates new tool_call dicts rather than mutating in-place, so the original messages list retains call_id/response_item_id for Codex Responses API compatibility (e.g. if the session falls back to a Codex provider later). Fields stripped: call_id, response_item_id """ tool_calls = api_msg.get("tool_calls") if not isinstance(tool_calls, list): return api_msg _STRIP_KEYS = {"call_id", "response_item_id"} api_msg["tool_calls"] = [ {k: v for k, v in tc.items() if k not in _STRIP_KEYS} if isinstance(tc, dict) else tc for tc in tool_calls ] return api_msg def _should_sanitize_tool_calls(self) -> bool: """Determine if tool_calls need sanitization for strict APIs. Codex Responses API uses fields like call_id and response_item_id that are not part of the standard Chat Completions schema. These fields must be stripped when calling any other API to avoid validation errors (400 Bad Request). Returns: bool: True if sanitization is needed (non-Codex API), False otherwise. """ return self.api_mode != "codex_responses" def flush_memories(self, messages: list = None, min_turns: int = None): """Give the model one turn to persist memories before context is lost. Called before compression, session reset, or CLI exit. Injects a flush message, makes one API call, executes any memory tool calls, then strips all flush artifacts from the message list. Args: messages: The current conversation messages. If None, uses self._session_messages (last run_conversation state). min_turns: Minimum user turns required to trigger the flush. None = use config value (flush_min_turns). 0 = always flush (used for compression). """ if self._memory_flush_min_turns == 0 and min_turns is None: return if "memory" not in self.valid_tool_names or not self._memory_store: return effective_min = min_turns if min_turns is not None else self._memory_flush_min_turns if self._user_turn_count < effective_min: return if messages is None: messages = getattr(self, '_session_messages', None) if not messages or len(messages) < 3: return flush_content = ( "[System: The session is being compressed. " "Save anything worth remembering — prioritize user preferences, " "corrections, and recurring patterns over task-specific details.]" ) _sentinel = f"__flush_{id(self)}_{time.monotonic()}" flush_msg = {"role": "user", "content": flush_content, "_flush_sentinel": _sentinel} messages.append(flush_msg) try: # Build API messages for the flush call _needs_sanitize = self._should_sanitize_tool_calls() api_messages = [] for msg in messages: api_msg = msg.copy() if msg.get("role") == "assistant": reasoning = msg.get("reasoning") if reasoning: api_msg["reasoning_content"] = reasoning api_msg.pop("reasoning", None) api_msg.pop("finish_reason", None) api_msg.pop("_flush_sentinel", None) api_msg.pop("_thinking_prefill", None) if _needs_sanitize: self._sanitize_tool_calls_for_strict_api(api_msg) api_messages.append(api_msg) if self._cached_system_prompt: api_messages = [{"role": "system", "content": self._cached_system_prompt}] + api_messages # Make one API call with only the memory tool available memory_tool_def = None for t in (self.tools or []): if t.get("function", {}).get("name") == "memory": memory_tool_def = t break if not memory_tool_def: messages.pop() # remove flush msg return # Use auxiliary client for the flush call when available -- # it's cheaper and avoids Codex Responses API incompatibility. from agent.auxiliary_client import call_llm as _call_llm _aux_available = True try: response = _call_llm( task="flush_memories", messages=api_messages, tools=[memory_tool_def], temperature=0.3, max_tokens=5120, # timeout resolved from auxiliary.flush_memories.timeout config ) except RuntimeError: _aux_available = False response = None if not _aux_available and self.api_mode == "codex_responses": # No auxiliary client -- use the Codex Responses path directly codex_kwargs = self._build_api_kwargs(api_messages) codex_kwargs["tools"] = self._responses_tools([memory_tool_def]) codex_kwargs["temperature"] = 0.3 if "max_output_tokens" in codex_kwargs: codex_kwargs["max_output_tokens"] = 5120 response = self._run_codex_stream(codex_kwargs) elif not _aux_available and self.api_mode == "anthropic_messages": # Native Anthropic — use the Anthropic client directly from agent.anthropic_adapter import build_anthropic_kwargs as _build_ant_kwargs ant_kwargs = _build_ant_kwargs( model=self.model, messages=api_messages, tools=[memory_tool_def], max_tokens=5120, reasoning_config=None, preserve_dots=self._anthropic_preserve_dots(), ) response = self._anthropic_messages_create(ant_kwargs) elif not _aux_available: api_kwargs = { "model": self.model, "messages": api_messages, "tools": [memory_tool_def], "temperature": 0.3, **self._max_tokens_param(5120), } from agent.auxiliary_client import _get_task_timeout response = self._ensure_primary_openai_client(reason="flush_memories").chat.completions.create( **api_kwargs, timeout=_get_task_timeout("flush_memories") ) # Extract tool calls from the response, handling all API formats tool_calls = [] if self.api_mode == "codex_responses" and not _aux_available: assistant_msg, _ = self._normalize_codex_response(response) if assistant_msg and assistant_msg.tool_calls: tool_calls = assistant_msg.tool_calls elif self.api_mode == "anthropic_messages" and not _aux_available: from agent.anthropic_adapter import normalize_anthropic_response as _nar_flush _flush_msg, _ = _nar_flush(response, strip_tool_prefix=self._is_anthropic_oauth) if _flush_msg and _flush_msg.tool_calls: tool_calls = _flush_msg.tool_calls elif hasattr(response, "choices") and response.choices: assistant_message = response.choices[0].message if assistant_message.tool_calls: tool_calls = assistant_message.tool_calls for tc in tool_calls: if tc.function.name == "memory": try: args = json.loads(tc.function.arguments) flush_target = args.get("target", "memory") from tools.memory_tool import memory_tool as _memory_tool _memory_tool( action=args.get("action"), target=flush_target, content=args.get("content"), old_text=args.get("old_text"), store=self._memory_store, ) if not self.quiet_mode: print(f" 🧠 Memory flush: saved to {args.get('target', 'memory')}") except Exception as e: logger.debug("Memory flush tool call failed: %s", e) except Exception as e: logger.debug("Memory flush API call failed: %s", e) finally: # Strip flush artifacts: remove everything from the flush message onward. # Use sentinel marker instead of identity check for robustness. while messages and messages[-1].get("_flush_sentinel") != _sentinel: messages.pop() if not messages: break if messages and messages[-1].get("_flush_sentinel") == _sentinel: messages.pop() def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default", focus_topic: str = None) -> tuple: """Compress conversation context and split the session in SQLite. Args: focus_topic: Optional focus string for guided compression — the summariser will prioritise preserving information related to this topic. Inspired by Claude Code's ``/compact ``. Returns: (compressed_messages, new_system_prompt) tuple """ _pre_msg_count = len(messages) logger.info( "context compression started: session=%s messages=%d tokens=~%s model=%s focus=%r", self.session_id or "none", _pre_msg_count, f"{approx_tokens:,}" if approx_tokens else "unknown", self.model, focus_topic, ) # Pre-compression memory flush: let the model save memories before they're lost self.flush_memories(messages, min_turns=0) # Notify external memory provider before compression discards context if self._memory_manager: try: self._memory_manager.on_pre_compress(messages) except Exception: pass compressed = self.context_compressor.compress(messages, current_tokens=approx_tokens, focus_topic=focus_topic) todo_snapshot = self._todo_store.format_for_injection() if todo_snapshot: compressed.append({"role": "user", "content": todo_snapshot}) self._invalidate_system_prompt() new_system_prompt = self._build_system_prompt(system_message) self._cached_system_prompt = new_system_prompt if self._session_db: try: # Propagate title to the new session with auto-numbering old_title = self._session_db.get_session_title(self.session_id) # Trigger memory extraction on the old session before it rotates. self.commit_memory_session(messages) self._session_db.end_session(self.session_id, "compression") old_session_id = self.session_id self.session_id = f"{datetime.now().strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4().hex[:6]}" # Update session_log_file to point to the new session's JSON file self.session_log_file = self.logs_dir / f"session_{self.session_id}.json" self._session_db.create_session( session_id=self.session_id, source=self.platform or os.environ.get("HERMES_SESSION_SOURCE", "cli"), model=self.model, parent_session_id=old_session_id, ) # Auto-number the title for the continuation session if old_title: try: new_title = self._session_db.get_next_title_in_lineage(old_title) self._session_db.set_session_title(self.session_id, new_title) except (ValueError, Exception) as e: logger.debug("Could not propagate title on compression: %s", e) self._session_db.update_system_prompt(self.session_id, new_system_prompt) # Reset flush cursor — new session starts with no messages written self._last_flushed_db_idx = 0 except Exception as e: logger.warning("Session DB compression split failed — new session will NOT be indexed: %s", e) # Warn on repeated compressions (quality degrades with each pass) _cc = self.context_compressor.compression_count if _cc >= 2: self._vprint( f"{self.log_prefix}⚠️ Session compressed {_cc} times — " f"accuracy may degrade. Consider /new to start fresh.", force=True, ) # Update token estimate after compaction so pressure calculations # use the post-compression count, not the stale pre-compression one. _compressed_est = ( estimate_tokens_rough(new_system_prompt) + estimate_messages_tokens_rough(compressed) ) self.context_compressor.last_prompt_tokens = _compressed_est self.context_compressor.last_completion_tokens = 0 # Clear the file-read dedup cache. After compression the original # read content is summarised away — if the model re-reads the same # file it needs the full content, not a "file unchanged" stub. try: from tools.file_tools import reset_file_dedup reset_file_dedup(task_id) except Exception: pass logger.info( "context compression done: session=%s messages=%d->%d tokens=~%s", self.session_id or "none", _pre_msg_count, len(compressed), f"{_compressed_est:,}", ) return compressed, new_system_prompt def _execute_tool_calls(self, assistant_message, messages: list, effective_task_id: str, api_call_count: int = 0) -> None: """Execute tool calls from the assistant message and append results to messages. Dispatches to concurrent execution only for batches that look independent: read-only tools may always share the parallel path, while file reads/writes may do so only when their target paths do not overlap. """ tool_calls = assistant_message.tool_calls # Allow _vprint during tool execution even with stream consumers self._executing_tools = True try: if not _should_parallelize_tool_batch(tool_calls): return self._execute_tool_calls_sequential( assistant_message, messages, effective_task_id, api_call_count ) return self._execute_tool_calls_concurrent( assistant_message, messages, effective_task_id, api_call_count ) finally: self._executing_tools = False def _invoke_tool(self, function_name: str, function_args: dict, effective_task_id: str, tool_call_id: Optional[str] = None) -> str: """Invoke a single tool and return the result string. No display logic. Handles both agent-level tools (todo, memory, etc.) and registry-dispatched tools. Used by the concurrent execution path; the sequential path retains its own inline invocation for backward-compatible display handling. """ # Check plugin hooks for a block directive before executing anything. block_message: Optional[str] = None try: from hermes_cli.plugins import get_pre_tool_call_block_message block_message = get_pre_tool_call_block_message( function_name, function_args, task_id=effective_task_id or "", ) except Exception: pass if block_message is not None: return json.dumps({"error": block_message}, ensure_ascii=False) if function_name == "todo": from tools.todo_tool import todo_tool as _todo_tool return _todo_tool( todos=function_args.get("todos"), merge=function_args.get("merge", False), store=self._todo_store, ) elif function_name == "session_search": if not self._session_db: return json.dumps({"success": False, "error": "Session database not available."}) from tools.session_search_tool import session_search as _session_search return _session_search( query=function_args.get("query", ""), role_filter=function_args.get("role_filter"), limit=function_args.get("limit", 3), db=self._session_db, current_session_id=self.session_id, ) elif function_name == "memory": target = function_args.get("target", "memory") from tools.memory_tool import memory_tool as _memory_tool result = _memory_tool( action=function_args.get("action"), target=target, content=function_args.get("content"), old_text=function_args.get("old_text"), store=self._memory_store, ) # Bridge: notify external memory provider of built-in memory writes if self._memory_manager and function_args.get("action") in ("add", "replace"): try: self._memory_manager.on_memory_write( function_args.get("action", ""), target, function_args.get("content", ""), ) except Exception: pass return result elif self._memory_manager and self._memory_manager.has_tool(function_name): return self._memory_manager.handle_tool_call(function_name, function_args) elif function_name == "clarify": from tools.clarify_tool import clarify_tool as _clarify_tool return _clarify_tool( question=function_args.get("question", ""), choices=function_args.get("choices"), callback=self.clarify_callback, ) elif function_name == "delegate_task": from tools.delegate_tool import delegate_task as _delegate_task return _delegate_task( goal=function_args.get("goal"), context=function_args.get("context"), toolsets=function_args.get("toolsets"), tasks=function_args.get("tasks"), max_iterations=function_args.get("max_iterations"), parent_agent=self, ) else: return handle_function_call( function_name, function_args, effective_task_id, tool_call_id=tool_call_id, session_id=self.session_id or "", enabled_tools=list(self.valid_tool_names) if self.valid_tool_names else None, skip_pre_tool_call_hook=True, ) @staticmethod def _wrap_verbose(label: str, text: str, indent: str = " ") -> str: """Word-wrap verbose tool output to fit the terminal width. Splits *text* on existing newlines and wraps each line individually, preserving intentional line breaks (e.g. pretty-printed JSON). Returns a ready-to-print string with *label* on the first line and continuation lines indented. """ import shutil as _shutil import textwrap as _tw cols = _shutil.get_terminal_size((120, 24)).columns wrap_width = max(40, cols - len(indent)) out_lines: list[str] = [] for raw_line in text.split("\n"): if len(raw_line) <= wrap_width: out_lines.append(raw_line) else: wrapped = _tw.wrap(raw_line, width=wrap_width, break_long_words=True, break_on_hyphens=False) out_lines.extend(wrapped or [raw_line]) body = ("\n" + indent).join(out_lines) return f"{indent}{label}{body}" def _execute_tool_calls_concurrent(self, assistant_message, messages: list, effective_task_id: str, api_call_count: int = 0) -> None: """Execute multiple tool calls concurrently using a thread pool. Results are collected in the original tool-call order and appended to messages so the API sees them in the expected sequence. """ tool_calls = assistant_message.tool_calls num_tools = len(tool_calls) # ── Pre-flight: interrupt check ────────────────────────────────── if self._interrupt_requested: print(f"{self.log_prefix}⚡ Interrupt: skipping {num_tools} tool call(s)") for tc in tool_calls: messages.append({ "role": "tool", "content": f"[Tool execution cancelled — {tc.function.name} was skipped due to user interrupt]", "tool_call_id": tc.id, }) return # ── Parse args + pre-execution bookkeeping ─────────────────────── parsed_calls = [] # list of (tool_call, function_name, function_args) for tool_call in tool_calls: function_name = tool_call.function.name # Reset nudge counters if function_name == "memory": self._turns_since_memory = 0 elif function_name == "skill_manage": self._iters_since_skill = 0 try: function_args = json.loads(tool_call.function.arguments) except json.JSONDecodeError: function_args = {} if not isinstance(function_args, dict): function_args = {} # Checkpoint for file-mutating tools if function_name in ("write_file", "patch") and self._checkpoint_mgr.enabled: try: file_path = function_args.get("path", "") if file_path: work_dir = self._checkpoint_mgr.get_working_dir_for_path(file_path) self._checkpoint_mgr.ensure_checkpoint(work_dir, f"before {function_name}") except Exception: pass # Checkpoint before destructive terminal commands if function_name == "terminal" and self._checkpoint_mgr.enabled: try: cmd = function_args.get("command", "") if _is_destructive_command(cmd): cwd = function_args.get("workdir") or os.getenv("TERMINAL_CWD", os.getcwd()) self._checkpoint_mgr.ensure_checkpoint( cwd, f"before terminal: {cmd[:60]}" ) except Exception: pass parsed_calls.append((tool_call, function_name, function_args)) # ── Logging / callbacks ────────────────────────────────────────── tool_names_str = ", ".join(name for _, name, _ in parsed_calls) if not self.quiet_mode: print(f" ⚡ Concurrent: {num_tools} tool calls — {tool_names_str}") for i, (tc, name, args) in enumerate(parsed_calls, 1): args_str = json.dumps(args, ensure_ascii=False) if self.verbose_logging: print(f" 📞 Tool {i}: {name}({list(args.keys())})") print(self._wrap_verbose("Args: ", json.dumps(args, indent=2, ensure_ascii=False))) else: args_preview = args_str[:self.log_prefix_chars] + "..." if len(args_str) > self.log_prefix_chars else args_str print(f" 📞 Tool {i}: {name}({list(args.keys())}) - {args_preview}") for tc, name, args in parsed_calls: if self.tool_progress_callback: try: preview = _build_tool_preview(name, args) self.tool_progress_callback("tool.started", name, preview, args) except Exception as cb_err: logging.debug(f"Tool progress callback error: {cb_err}") for tc, name, args in parsed_calls: if self.tool_start_callback: try: self.tool_start_callback(tc.id, name, args) except Exception as cb_err: logging.debug(f"Tool start callback error: {cb_err}") # ── Concurrent execution ───────────────────────────────────────── # Each slot holds (function_name, function_args, function_result, duration, error_flag) results = [None] * num_tools # Touch activity before launching workers so the gateway knows # we're executing tools (not stuck). self._current_tool = tool_names_str self._touch_activity(f"executing {num_tools} tools concurrently: {tool_names_str}") def _run_tool(index, tool_call, function_name, function_args): """Worker function executed in a thread.""" # Set the activity callback on THIS worker thread so # _wait_for_process (terminal commands) can fire heartbeats. # The callback is thread-local; the main thread's callback # is invisible to worker threads. try: from tools.environments.base import set_activity_callback set_activity_callback(self._touch_activity) except Exception: pass start = time.time() try: result = self._invoke_tool(function_name, function_args, effective_task_id, tool_call.id) except Exception as tool_error: result = f"Error executing tool '{function_name}': {tool_error}" logger.error("_invoke_tool raised for %s: %s", function_name, tool_error, exc_info=True) duration = time.time() - start is_error, _ = _detect_tool_failure(function_name, result) if is_error: logger.info("tool %s failed (%.2fs): %s", function_name, duration, result[:200]) else: logger.info("tool %s completed (%.2fs, %d chars)", function_name, duration, len(result)) results[index] = (function_name, function_args, result, duration, is_error) # Start spinner for CLI mode (skip when TUI handles tool progress) spinner = None if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner(): face = random.choice(KawaiiSpinner.get_waiting_faces()) spinner = KawaiiSpinner(f"{face} ⚡ running {num_tools} tools concurrently", spinner_type='dots', print_fn=self._print_fn) spinner.start() try: max_workers = min(num_tools, _MAX_TOOL_WORKERS) with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for i, (tc, name, args) in enumerate(parsed_calls): f = executor.submit(_run_tool, i, tc, name, args) futures.append(f) # Wait for all to complete with periodic heartbeats so the # gateway's inactivity monitor doesn't kill us during long # concurrent tool batches. Also check for user interrupts # so we don't block indefinitely when the user sends /stop # or a new message during concurrent tool execution. _conc_start = time.time() _interrupt_logged = False while True: done, not_done = concurrent.futures.wait( futures, timeout=5.0, ) if not not_done: break # Check for interrupt — the per-thread interrupt signal # already causes individual tools (terminal, execute_code) # to abort, but tools without interrupt checks (web_search, # read_file) will run to completion. Cancel any futures # that haven't started yet so we don't block on them. if self._interrupt_requested: if not _interrupt_logged: _interrupt_logged = True self._vprint( f"{self.log_prefix}⚡ Interrupt: cancelling " f"{len(not_done)} pending concurrent tool(s)", force=True, ) for f in not_done: f.cancel() # Give already-running tools a moment to notice the # per-thread interrupt signal and exit gracefully. concurrent.futures.wait(not_done, timeout=3.0) break _conc_elapsed = int(time.time() - _conc_start) # Heartbeat every ~30s (6 × 5s poll intervals) if _conc_elapsed > 0 and _conc_elapsed % 30 < 6: _still_running = [ parsed_calls[futures.index(f)][1] for f in not_done if f in futures ] self._touch_activity( f"concurrent tools running ({_conc_elapsed}s, " f"{len(not_done)} remaining: {', '.join(_still_running[:3])})" ) finally: if spinner: # Build a summary message for the spinner stop completed = sum(1 for r in results if r is not None) total_dur = sum(r[3] for r in results if r is not None) spinner.stop(f"⚡ {completed}/{num_tools} tools completed in {total_dur:.1f}s total") # ── Post-execution: display per-tool results ───────────────────── for i, (tc, name, args) in enumerate(parsed_calls): r = results[i] if r is None: # Tool was cancelled (interrupt) or thread didn't return if self._interrupt_requested: function_result = f"[Tool execution cancelled — {name} was skipped due to user interrupt]" else: function_result = f"Error executing tool '{name}': thread did not return a result" tool_duration = 0.0 else: function_name, function_args, function_result, tool_duration, is_error = r if is_error: result_preview = function_result[:200] if len(function_result) > 200 else function_result logger.warning("Tool %s returned error (%.2fs): %s", function_name, tool_duration, result_preview) if self.tool_progress_callback: try: self.tool_progress_callback( "tool.completed", function_name, None, None, duration=tool_duration, is_error=is_error, ) except Exception as cb_err: logging.debug(f"Tool progress callback error: {cb_err}") if self.verbose_logging: logging.debug(f"Tool {function_name} completed in {tool_duration:.2f}s") logging.debug(f"Tool result ({len(function_result)} chars): {function_result}") # Print cute message per tool if self._should_emit_quiet_tool_messages(): cute_msg = _get_cute_tool_message_impl(name, args, tool_duration, result=function_result) self._safe_print(f" {cute_msg}") elif not self.quiet_mode: if self.verbose_logging: print(f" ✅ Tool {i+1} completed in {tool_duration:.2f}s") print(self._wrap_verbose("Result: ", function_result)) else: response_preview = function_result[:self.log_prefix_chars] + "..." if len(function_result) > self.log_prefix_chars else function_result print(f" ✅ Tool {i+1} completed in {tool_duration:.2f}s - {response_preview}") self._current_tool = None self._touch_activity(f"tool completed: {name} ({tool_duration:.1f}s)") if self.tool_complete_callback: try: self.tool_complete_callback(tc.id, name, args, function_result) except Exception as cb_err: logging.debug(f"Tool complete callback error: {cb_err}") function_result = maybe_persist_tool_result( content=function_result, tool_name=name, tool_use_id=tc.id, env=get_active_env(effective_task_id), ) subdir_hints = self._subdirectory_hints.check_tool_call(name, args) if subdir_hints: function_result += subdir_hints tool_msg = { "role": "tool", "content": function_result, "tool_call_id": tc.id, } messages.append(tool_msg) # ── Per-turn aggregate budget enforcement ───────────────────────── num_tools = len(parsed_calls) if num_tools > 0: turn_tool_msgs = messages[-num_tools:] enforce_turn_budget(turn_tool_msgs, env=get_active_env(effective_task_id)) def _execute_tool_calls_sequential(self, assistant_message, messages: list, effective_task_id: str, api_call_count: int = 0) -> None: """Execute tool calls sequentially (original behavior). Used for single calls or interactive tools.""" for i, tool_call in enumerate(assistant_message.tool_calls, 1): # SAFETY: check interrupt BEFORE starting each tool. # If the user sent "stop" during a previous tool's execution, # do NOT start any more tools -- skip them all immediately. if self._interrupt_requested: remaining_calls = assistant_message.tool_calls[i-1:] if remaining_calls: self._vprint(f"{self.log_prefix}⚡ Interrupt: skipping {len(remaining_calls)} tool call(s)", force=True) for skipped_tc in remaining_calls: skipped_name = skipped_tc.function.name skip_msg = { "role": "tool", "content": f"[Tool execution cancelled — {skipped_name} was skipped due to user interrupt]", "tool_call_id": skipped_tc.id, } messages.append(skip_msg) break function_name = tool_call.function.name try: function_args = json.loads(tool_call.function.arguments) except json.JSONDecodeError as e: logging.warning(f"Unexpected JSON error after validation: {e}") function_args = {} if not isinstance(function_args, dict): function_args = {} # Check plugin hooks for a block directive before executing. _block_msg: Optional[str] = None try: from hermes_cli.plugins import get_pre_tool_call_block_message _block_msg = get_pre_tool_call_block_message( function_name, function_args, task_id=effective_task_id or "", ) except Exception: pass if _block_msg is not None: # Tool blocked by plugin policy — skip counter resets. # Execution is handled below in the tool dispatch chain. pass else: # Reset nudge counters when the relevant tool is actually used if function_name == "memory": self._turns_since_memory = 0 elif function_name == "skill_manage": self._iters_since_skill = 0 if not self.quiet_mode: args_str = json.dumps(function_args, ensure_ascii=False) if self.verbose_logging: print(f" 📞 Tool {i}: {function_name}({list(function_args.keys())})") print(self._wrap_verbose("Args: ", json.dumps(function_args, indent=2, ensure_ascii=False))) else: args_preview = args_str[:self.log_prefix_chars] + "..." if len(args_str) > self.log_prefix_chars else args_str print(f" 📞 Tool {i}: {function_name}({list(function_args.keys())}) - {args_preview}") if _block_msg is None: self._current_tool = function_name self._touch_activity(f"executing tool: {function_name}") # Set activity callback for long-running tool execution (terminal # commands, etc.) so the gateway's inactivity monitor doesn't kill # the agent while a command is running. if _block_msg is None: try: from tools.environments.base import set_activity_callback set_activity_callback(self._touch_activity) except Exception: pass if _block_msg is None and self.tool_progress_callback: try: preview = _build_tool_preview(function_name, function_args) self.tool_progress_callback("tool.started", function_name, preview, function_args) except Exception as cb_err: logging.debug(f"Tool progress callback error: {cb_err}") if _block_msg is None and self.tool_start_callback: try: self.tool_start_callback(tool_call.id, function_name, function_args) except Exception as cb_err: logging.debug(f"Tool start callback error: {cb_err}") # Checkpoint: snapshot working dir before file-mutating tools if _block_msg is None and function_name in ("write_file", "patch") and self._checkpoint_mgr.enabled: try: file_path = function_args.get("path", "") if file_path: work_dir = self._checkpoint_mgr.get_working_dir_for_path(file_path) self._checkpoint_mgr.ensure_checkpoint( work_dir, f"before {function_name}" ) except Exception: pass # never block tool execution # Checkpoint before destructive terminal commands if _block_msg is None and function_name == "terminal" and self._checkpoint_mgr.enabled: try: cmd = function_args.get("command", "") if _is_destructive_command(cmd): cwd = function_args.get("workdir") or os.getenv("TERMINAL_CWD", os.getcwd()) self._checkpoint_mgr.ensure_checkpoint( cwd, f"before terminal: {cmd[:60]}" ) except Exception: pass # never block tool execution tool_start_time = time.time() if _block_msg is not None: # Tool blocked by plugin policy — return error without executing. function_result = json.dumps({"error": _block_msg}, ensure_ascii=False) tool_duration = 0.0 elif function_name == "todo": from tools.todo_tool import todo_tool as _todo_tool function_result = _todo_tool( todos=function_args.get("todos"), merge=function_args.get("merge", False), store=self._todo_store, ) tool_duration = time.time() - tool_start_time if self._should_emit_quiet_tool_messages(): self._vprint(f" {_get_cute_tool_message_impl('todo', function_args, tool_duration, result=function_result)}") elif function_name == "session_search": if not self._session_db: function_result = json.dumps({"success": False, "error": "Session database not available."}) else: from tools.session_search_tool import session_search as _session_search function_result = _session_search( query=function_args.get("query", ""), role_filter=function_args.get("role_filter"), limit=function_args.get("limit", 3), db=self._session_db, current_session_id=self.session_id, ) tool_duration = time.time() - tool_start_time if self._should_emit_quiet_tool_messages(): self._vprint(f" {_get_cute_tool_message_impl('session_search', function_args, tool_duration, result=function_result)}") elif function_name == "memory": target = function_args.get("target", "memory") from tools.memory_tool import memory_tool as _memory_tool function_result = _memory_tool( action=function_args.get("action"), target=target, content=function_args.get("content"), old_text=function_args.get("old_text"), store=self._memory_store, ) # Bridge: notify external memory provider of built-in memory writes if self._memory_manager and function_args.get("action") in ("add", "replace"): try: self._memory_manager.on_memory_write( function_args.get("action", ""), target, function_args.get("content", ""), ) except Exception: pass tool_duration = time.time() - tool_start_time if self._should_emit_quiet_tool_messages(): self._vprint(f" {_get_cute_tool_message_impl('memory', function_args, tool_duration, result=function_result)}") elif function_name == "clarify": from tools.clarify_tool import clarify_tool as _clarify_tool function_result = _clarify_tool( question=function_args.get("question", ""), choices=function_args.get("choices"), callback=self.clarify_callback, ) tool_duration = time.time() - tool_start_time if self._should_emit_quiet_tool_messages(): self._vprint(f" {_get_cute_tool_message_impl('clarify', function_args, tool_duration, result=function_result)}") elif function_name == "delegate_task": from tools.delegate_tool import delegate_task as _delegate_task tasks_arg = function_args.get("tasks") if tasks_arg and isinstance(tasks_arg, list): spinner_label = f"🔀 delegating {len(tasks_arg)} tasks" else: goal_preview = (function_args.get("goal") or "")[:30] spinner_label = f"🔀 {goal_preview}" if goal_preview else "🔀 delegating" spinner = None if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner(): face = random.choice(KawaiiSpinner.get_waiting_faces()) spinner = KawaiiSpinner(f"{face} {spinner_label}", spinner_type='dots', print_fn=self._print_fn) spinner.start() self._delegate_spinner = spinner _delegate_result = None try: function_result = _delegate_task( goal=function_args.get("goal"), context=function_args.get("context"), toolsets=function_args.get("toolsets"), tasks=tasks_arg, max_iterations=function_args.get("max_iterations"), parent_agent=self, ) _delegate_result = function_result finally: self._delegate_spinner = None tool_duration = time.time() - tool_start_time cute_msg = _get_cute_tool_message_impl('delegate_task', function_args, tool_duration, result=_delegate_result) if spinner: spinner.stop(cute_msg) elif self._should_emit_quiet_tool_messages(): self._vprint(f" {cute_msg}") elif self._context_engine_tool_names and function_name in self._context_engine_tool_names: # Context engine tools (lcm_grep, lcm_describe, lcm_expand, etc.) spinner = None if self.quiet_mode and not self.tool_progress_callback: face = random.choice(KawaiiSpinner.get_waiting_faces()) emoji = _get_tool_emoji(function_name) preview = _build_tool_preview(function_name, function_args) or function_name spinner = KawaiiSpinner(f"{face} {emoji} {preview}", spinner_type='dots', print_fn=self._print_fn) spinner.start() _ce_result = None try: function_result = self.context_compressor.handle_tool_call(function_name, function_args, messages=messages) _ce_result = function_result except Exception as tool_error: function_result = json.dumps({"error": f"Context engine tool '{function_name}' failed: {tool_error}"}) logger.error("context_engine.handle_tool_call raised for %s: %s", function_name, tool_error, exc_info=True) finally: tool_duration = time.time() - tool_start_time cute_msg = _get_cute_tool_message_impl(function_name, function_args, tool_duration, result=_ce_result) if spinner: spinner.stop(cute_msg) elif self.quiet_mode: self._vprint(f" {cute_msg}") elif self._memory_manager and self._memory_manager.has_tool(function_name): # Memory provider tools (hindsight_retain, honcho_search, etc.) # These are not in the tool registry — route through MemoryManager. spinner = None if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner(): face = random.choice(KawaiiSpinner.get_waiting_faces()) emoji = _get_tool_emoji(function_name) preview = _build_tool_preview(function_name, function_args) or function_name spinner = KawaiiSpinner(f"{face} {emoji} {preview}", spinner_type='dots', print_fn=self._print_fn) spinner.start() _mem_result = None try: function_result = self._memory_manager.handle_tool_call(function_name, function_args) _mem_result = function_result except Exception as tool_error: function_result = json.dumps({"error": f"Memory tool '{function_name}' failed: {tool_error}"}) logger.error("memory_manager.handle_tool_call raised for %s: %s", function_name, tool_error, exc_info=True) finally: tool_duration = time.time() - tool_start_time cute_msg = _get_cute_tool_message_impl(function_name, function_args, tool_duration, result=_mem_result) if spinner: spinner.stop(cute_msg) elif self._should_emit_quiet_tool_messages(): self._vprint(f" {cute_msg}") elif self.quiet_mode: spinner = None if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner(): face = random.choice(KawaiiSpinner.get_waiting_faces()) emoji = _get_tool_emoji(function_name) preview = _build_tool_preview(function_name, function_args) or function_name spinner = KawaiiSpinner(f"{face} {emoji} {preview}", spinner_type='dots', print_fn=self._print_fn) spinner.start() _spinner_result = None try: function_result = handle_function_call( function_name, function_args, effective_task_id, tool_call_id=tool_call.id, session_id=self.session_id or "", enabled_tools=list(self.valid_tool_names) if self.valid_tool_names else None, skip_pre_tool_call_hook=True, ) _spinner_result = function_result except Exception as tool_error: function_result = f"Error executing tool '{function_name}': {tool_error}" logger.error("handle_function_call raised for %s: %s", function_name, tool_error, exc_info=True) finally: tool_duration = time.time() - tool_start_time cute_msg = _get_cute_tool_message_impl(function_name, function_args, tool_duration, result=_spinner_result) if spinner: spinner.stop(cute_msg) elif self._should_emit_quiet_tool_messages(): self._vprint(f" {cute_msg}") else: try: function_result = handle_function_call( function_name, function_args, effective_task_id, tool_call_id=tool_call.id, session_id=self.session_id or "", enabled_tools=list(self.valid_tool_names) if self.valid_tool_names else None, skip_pre_tool_call_hook=True, ) except Exception as tool_error: function_result = f"Error executing tool '{function_name}': {tool_error}" logger.error("handle_function_call raised for %s: %s", function_name, tool_error, exc_info=True) tool_duration = time.time() - tool_start_time result_preview = function_result if self.verbose_logging else ( function_result[:200] if len(function_result) > 200 else function_result ) # Log tool errors to the persistent error log so [error] tags # in the UI always have a corresponding detailed entry on disk. _is_error_result, _ = _detect_tool_failure(function_name, function_result) if _is_error_result: logger.warning("Tool %s returned error (%.2fs): %s", function_name, tool_duration, result_preview) else: logger.info("tool %s completed (%.2fs, %d chars)", function_name, tool_duration, len(function_result)) if self.tool_progress_callback: try: self.tool_progress_callback( "tool.completed", function_name, None, None, duration=tool_duration, is_error=_is_error_result, ) except Exception as cb_err: logging.debug(f"Tool progress callback error: {cb_err}") self._current_tool = None self._touch_activity(f"tool completed: {function_name} ({tool_duration:.1f}s)") if self.verbose_logging: logging.debug(f"Tool {function_name} completed in {tool_duration:.2f}s") logging.debug(f"Tool result ({len(function_result)} chars): {function_result}") if self.tool_complete_callback: try: self.tool_complete_callback(tool_call.id, function_name, function_args, function_result) except Exception as cb_err: logging.debug(f"Tool complete callback error: {cb_err}") function_result = maybe_persist_tool_result( content=function_result, tool_name=function_name, tool_use_id=tool_call.id, env=get_active_env(effective_task_id), ) # Discover subdirectory context files from tool arguments subdir_hints = self._subdirectory_hints.check_tool_call(function_name, function_args) if subdir_hints: function_result += subdir_hints tool_msg = { "role": "tool", "content": function_result, "tool_call_id": tool_call.id } messages.append(tool_msg) if not self.quiet_mode: if self.verbose_logging: print(f" ✅ Tool {i} completed in {tool_duration:.2f}s") print(self._wrap_verbose("Result: ", function_result)) else: response_preview = function_result[:self.log_prefix_chars] + "..." if len(function_result) > self.log_prefix_chars else function_result print(f" ✅ Tool {i} completed in {tool_duration:.2f}s - {response_preview}") if self._interrupt_requested and i < len(assistant_message.tool_calls): remaining = len(assistant_message.tool_calls) - i self._vprint(f"{self.log_prefix}⚡ Interrupt: skipping {remaining} remaining tool call(s)", force=True) for skipped_tc in assistant_message.tool_calls[i:]: skipped_name = skipped_tc.function.name skip_msg = { "role": "tool", "content": f"[Tool execution skipped — {skipped_name} was not started. User sent a new message]", "tool_call_id": skipped_tc.id } messages.append(skip_msg) break if self.tool_delay > 0 and i < len(assistant_message.tool_calls): time.sleep(self.tool_delay) # ── Per-turn aggregate budget enforcement ───────────────────────── num_tools_seq = len(assistant_message.tool_calls) if num_tools_seq > 0: enforce_turn_budget(messages[-num_tools_seq:], env=get_active_env(effective_task_id)) def _handle_max_iterations(self, messages: list, api_call_count: int) -> str: """Request a summary when max iterations are reached. Returns the final response text.""" print(f"⚠️ Reached maximum iterations ({self.max_iterations}). Requesting summary...") summary_request = ( "You've reached the maximum number of tool-calling iterations allowed. " "Please provide a final response summarizing what you've found and accomplished so far, " "without calling any more tools." ) messages.append({"role": "user", "content": summary_request}) try: # Build API messages, stripping internal-only fields # (finish_reason, reasoning) that strict APIs like Mistral reject with 422 _needs_sanitize = self._should_sanitize_tool_calls() api_messages = [] for msg in messages: api_msg = msg.copy() for internal_field in ("reasoning", "finish_reason", "_thinking_prefill"): api_msg.pop(internal_field, None) if _needs_sanitize: self._sanitize_tool_calls_for_strict_api(api_msg) api_messages.append(api_msg) effective_system = self._cached_system_prompt or "" if self.ephemeral_system_prompt: effective_system = (effective_system + "\n\n" + self.ephemeral_system_prompt).strip() if effective_system: api_messages = [{"role": "system", "content": effective_system}] + api_messages if self.prefill_messages: sys_offset = 1 if effective_system else 0 for idx, pfm in enumerate(self.prefill_messages): api_messages.insert(sys_offset + idx, pfm.copy()) summary_extra_body = {} _is_nous = "nousresearch" in self._base_url_lower if self._supports_reasoning_extra_body(): if self.reasoning_config is not None: summary_extra_body["reasoning"] = self.reasoning_config else: summary_extra_body["reasoning"] = { "enabled": True, "effort": "medium" } if _is_nous: summary_extra_body["tags"] = ["product=hermes-agent"] if self.api_mode == "codex_responses": codex_kwargs = self._build_api_kwargs(api_messages) codex_kwargs.pop("tools", None) summary_response = self._run_codex_stream(codex_kwargs) assistant_message, _ = self._normalize_codex_response(summary_response) final_response = (assistant_message.content or "").strip() if assistant_message else "" else: summary_kwargs = { "model": self.model, "messages": api_messages, } if self.max_tokens is not None: summary_kwargs.update(self._max_tokens_param(self.max_tokens)) # Include provider routing preferences provider_preferences = {} if self.providers_allowed: provider_preferences["only"] = self.providers_allowed if self.providers_ignored: provider_preferences["ignore"] = self.providers_ignored if self.providers_order: provider_preferences["order"] = self.providers_order if self.provider_sort: provider_preferences["sort"] = self.provider_sort if provider_preferences: summary_extra_body["provider"] = provider_preferences if summary_extra_body: summary_kwargs["extra_body"] = summary_extra_body if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_kwargs as _bak, normalize_anthropic_response as _nar _ant_kw = _bak(model=self.model, messages=api_messages, tools=None, max_tokens=self.max_tokens, reasoning_config=self.reasoning_config, is_oauth=self._is_anthropic_oauth, preserve_dots=self._anthropic_preserve_dots()) summary_response = self._anthropic_messages_create(_ant_kw) _msg, _ = _nar(summary_response, strip_tool_prefix=self._is_anthropic_oauth) final_response = (_msg.content or "").strip() else: summary_response = self._ensure_primary_openai_client(reason="iteration_limit_summary").chat.completions.create(**summary_kwargs) if summary_response.choices and summary_response.choices[0].message.content: final_response = summary_response.choices[0].message.content else: final_response = "" if final_response: if "" in final_response: final_response = re.sub(r'.*?\s*', '', final_response, flags=re.DOTALL).strip() if final_response: messages.append({"role": "assistant", "content": final_response}) else: final_response = "I reached the iteration limit and couldn't generate a summary." else: # Retry summary generation if self.api_mode == "codex_responses": codex_kwargs = self._build_api_kwargs(api_messages) codex_kwargs.pop("tools", None) retry_response = self._run_codex_stream(codex_kwargs) retry_msg, _ = self._normalize_codex_response(retry_response) final_response = (retry_msg.content or "").strip() if retry_msg else "" elif self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_kwargs as _bak2, normalize_anthropic_response as _nar2 _ant_kw2 = _bak2(model=self.model, messages=api_messages, tools=None, is_oauth=self._is_anthropic_oauth, max_tokens=self.max_tokens, reasoning_config=self.reasoning_config, preserve_dots=self._anthropic_preserve_dots()) retry_response = self._anthropic_messages_create(_ant_kw2) _retry_msg, _ = _nar2(retry_response, strip_tool_prefix=self._is_anthropic_oauth) final_response = (_retry_msg.content or "").strip() else: summary_kwargs = { "model": self.model, "messages": api_messages, } if self.max_tokens is not None: summary_kwargs.update(self._max_tokens_param(self.max_tokens)) if summary_extra_body: summary_kwargs["extra_body"] = summary_extra_body summary_response = self._ensure_primary_openai_client(reason="iteration_limit_summary_retry").chat.completions.create(**summary_kwargs) if summary_response.choices and summary_response.choices[0].message.content: final_response = summary_response.choices[0].message.content else: final_response = "" if final_response: if "" in final_response: final_response = re.sub(r'.*?\s*', '', final_response, flags=re.DOTALL).strip() if final_response: messages.append({"role": "assistant", "content": final_response}) else: final_response = "I reached the iteration limit and couldn't generate a summary." else: final_response = "I reached the iteration limit and couldn't generate a summary." except Exception as e: logging.warning(f"Failed to get summary response: {e}") final_response = f"I reached the maximum iterations ({self.max_iterations}) but couldn't summarize. Error: {str(e)}" return final_response def run_conversation( self, user_message: str, system_message: str = None, conversation_history: List[Dict[str, Any]] = None, task_id: str = None, stream_callback: Optional[callable] = None, persist_user_message: Optional[str] = None, ) -> Dict[str, Any]: """ Run a complete conversation with tool calling until completion. Args: user_message (str): The user's message/question system_message (str): Custom system message (optional, overrides ephemeral_system_prompt if provided) conversation_history (List[Dict]): Previous conversation messages (optional) task_id (str): Unique identifier for this task to isolate VMs between concurrent tasks (optional, auto-generated if not provided) stream_callback: Optional callback invoked with each text delta during streaming. Used by the TTS pipeline to start audio generation before the full response. When None (default), API calls use the standard non-streaming path. persist_user_message: Optional clean user message to store in transcripts/history when user_message contains API-only synthetic prefixes. or queuing follow-up prefetch work. Returns: Dict: Complete conversation result with final response and message history """ # Guard stdio against OSError from broken pipes (systemd/headless/daemon). # Installed once, transparent when streams are healthy, prevents crash on write. _install_safe_stdio() # Tag all log records on this thread with the session ID so # ``hermes logs --session `` can filter a single conversation. from hermes_logging import set_session_context set_session_context(self.session_id) # If the previous turn activated fallback, restore the primary # runtime so this turn gets a fresh attempt with the preferred model. # No-op when _fallback_activated is False (gateway, first turn, etc.). self._restore_primary_runtime() # Sanitize surrogate characters from user input. Clipboard paste from # rich-text editors (Google Docs, Word, etc.) can inject lone surrogates # that are invalid UTF-8 and crash JSON serialization in the OpenAI SDK. if isinstance(user_message, str): user_message = _sanitize_surrogates(user_message) if isinstance(persist_user_message, str): persist_user_message = _sanitize_surrogates(persist_user_message) # Strip leaked blocks from user input. When Honcho's # saveMessages persists a turn that included injected context, the block # can reappear in the next turn's user message via message history. # Stripping here prevents stale memory tags from leaking into the # conversation and being visible to the user or the model as user text. if isinstance(user_message, str): user_message = sanitize_context(user_message) if isinstance(persist_user_message, str): persist_user_message = sanitize_context(persist_user_message) # Store stream callback for _interruptible_api_call to pick up self._stream_callback = stream_callback self._persist_user_message_idx = None self._persist_user_message_override = persist_user_message # Generate unique task_id if not provided to isolate VMs between concurrent tasks effective_task_id = task_id or str(uuid.uuid4()) # Reset retry counters and iteration budget at the start of each turn # so subagent usage from a previous turn doesn't eat into the next one. self._invalid_tool_retries = 0 self._invalid_json_retries = 0 self._empty_content_retries = 0 self._incomplete_scratchpad_retries = 0 self._codex_incomplete_retries = 0 self._thinking_prefill_retries = 0 self._post_tool_empty_retried = False self._last_content_with_tools = None self._last_content_tools_all_housekeeping = False self._mute_post_response = False self._unicode_sanitization_passes = 0 # Pre-turn connection health check: detect and clean up dead TCP # connections left over from provider outages or dropped streams. # This prevents the next API call from hanging on a zombie socket. if self.api_mode != "anthropic_messages": try: if self._cleanup_dead_connections(): self._emit_status( "🔌 Detected stale connections from a previous provider " "issue — cleaned up automatically. Proceeding with fresh " "connection." ) except Exception: pass # Replay compression warning through status_callback for gateway # platforms (the callback was not wired during __init__). if self._compression_warning: self._replay_compression_warning() self._compression_warning = None # send once # NOTE: _turns_since_memory and _iters_since_skill are NOT reset here. # They are initialized in __init__ and must persist across run_conversation # calls so that nudge logic accumulates correctly in CLI mode. self.iteration_budget = IterationBudget(self.max_iterations) # Log conversation turn start for debugging/observability _msg_preview = (user_message[:80] + "...") if len(user_message) > 80 else user_message _msg_preview = _msg_preview.replace("\n", " ") logger.info( "conversation turn: session=%s model=%s provider=%s platform=%s history=%d msg=%r", self.session_id or "none", self.model, self.provider or "unknown", self.platform or "unknown", len(conversation_history or []), _msg_preview, ) # Initialize conversation (copy to avoid mutating the caller's list) messages = list(conversation_history) if conversation_history else [] # Hydrate todo store from conversation history (gateway creates a fresh # AIAgent per message, so the in-memory store is empty -- we need to # recover the todo state from the most recent todo tool response in history) if conversation_history and not self._todo_store.has_items(): self._hydrate_todo_store(conversation_history) # Prefill messages (few-shot priming) are injected at API-call time only, # never stored in the messages list. This keeps them ephemeral: they won't # be saved to session DB, session logs, or batch trajectories, but they're # automatically re-applied on every API call (including session continuations). # Track user turns for memory flush and periodic nudge logic self._user_turn_count += 1 # Preserve the original user message (no nudge injection). original_user_message = persist_user_message if persist_user_message is not None else user_message # Track memory nudge trigger (turn-based, checked here). # Skill trigger is checked AFTER the agent loop completes, based on # how many tool iterations THIS turn used. _should_review_memory = False if (self._memory_nudge_interval > 0 and "memory" in self.valid_tool_names and self._memory_store): self._turns_since_memory += 1 if self._turns_since_memory >= self._memory_nudge_interval: _should_review_memory = True self._turns_since_memory = 0 # Add user message user_msg = {"role": "user", "content": user_message} messages.append(user_msg) current_turn_user_idx = len(messages) - 1 self._persist_user_message_idx = current_turn_user_idx if not self.quiet_mode: self._safe_print(f"💬 Starting conversation: '{user_message[:60]}{'...' if len(user_message) > 60 else ''}'") # ── System prompt (cached per session for prefix caching) ── # Built once on first call, reused for all subsequent calls. # Only rebuilt after context compression events (which invalidate # the cache and reload memory from disk). # # For continuing sessions (gateway creates a fresh AIAgent per # message), we load the stored system prompt from the session DB # instead of rebuilding. Rebuilding would pick up memory changes # from disk that the model already knows about (it wrote them!), # producing a different system prompt and breaking the Anthropic # prefix cache. if self._cached_system_prompt is None: stored_prompt = None if conversation_history and self._session_db: try: session_row = self._session_db.get_session(self.session_id) if session_row: stored_prompt = session_row.get("system_prompt") or None except Exception: pass # Fall through to build fresh if stored_prompt: # Continuing session — reuse the exact system prompt from # the previous turn so the Anthropic cache prefix matches. self._cached_system_prompt = stored_prompt else: # First turn of a new session — build from scratch. self._cached_system_prompt = self._build_system_prompt(system_message) # Plugin hook: on_session_start # Fired once when a brand-new session is created (not on # continuation). Plugins can use this to initialise # session-scoped state (e.g. warm a memory cache). try: from hermes_cli.plugins import invoke_hook as _invoke_hook _invoke_hook( "on_session_start", session_id=self.session_id, model=self.model, platform=getattr(self, "platform", None) or "", ) except Exception as exc: logger.warning("on_session_start hook failed: %s", exc) # Store the system prompt snapshot in SQLite if self._session_db: try: self._session_db.update_system_prompt(self.session_id, self._cached_system_prompt) except Exception as e: logger.debug("Session DB update_system_prompt failed: %s", e) active_system_prompt = self._cached_system_prompt # ── Preflight context compression ── # Before entering the main loop, check if the loaded conversation # history already exceeds the model's context threshold. This handles # cases where a user switches to a model with a smaller context window # while having a large existing session — compress proactively rather # than waiting for an API error (which might be caught as a non-retryable # 4xx and abort the request entirely). if ( self.compression_enabled and len(messages) > self.context_compressor.protect_first_n + self.context_compressor.protect_last_n + 1 ): # Include tool schema tokens — with many tools these can add # 20-30K+ tokens that the old sys+msg estimate missed entirely. _preflight_tokens = estimate_request_tokens_rough( messages, system_prompt=active_system_prompt or "", tools=self.tools or None, ) if _preflight_tokens >= self.context_compressor.threshold_tokens: logger.info( "Preflight compression: ~%s tokens >= %s threshold (model %s, ctx %s)", f"{_preflight_tokens:,}", f"{self.context_compressor.threshold_tokens:,}", self.model, f"{self.context_compressor.context_length:,}", ) if not self.quiet_mode: self._safe_print( f"📦 Preflight compression: ~{_preflight_tokens:,} tokens " f">= {self.context_compressor.threshold_tokens:,} threshold" ) # May need multiple passes for very large sessions with small # context windows (each pass summarises the middle N turns). for _pass in range(3): _orig_len = len(messages) messages, active_system_prompt = self._compress_context( messages, system_message, approx_tokens=_preflight_tokens, task_id=effective_task_id, ) if len(messages) >= _orig_len: break # Cannot compress further # Compression created a new session — clear the history # reference so _flush_messages_to_session_db writes ALL # compressed messages to the new session's SQLite, not # skipping them because conversation_history is still the # pre-compression length. conversation_history = None # Fix: reset retry counters after compression so the model # gets a fresh budget on the compressed context. Without # this, pre-compression retries carry over and the model # hits "(empty)" immediately after compression-induced # context loss. self._empty_content_retries = 0 self._thinking_prefill_retries = 0 self._last_content_with_tools = None self._last_content_tools_all_housekeeping = False self._mute_post_response = False # Re-estimate after compression _preflight_tokens = estimate_request_tokens_rough( messages, system_prompt=active_system_prompt or "", tools=self.tools or None, ) if _preflight_tokens < self.context_compressor.threshold_tokens: break # Under threshold # Plugin hook: pre_llm_call # Fired once per turn before the tool-calling loop. Plugins can # return a dict with a ``context`` key (or a plain string) whose # value is appended to the current turn's user message. # # Context is ALWAYS injected into the user message, never the # system prompt. This preserves the prompt cache prefix — the # system prompt stays identical across turns so cached tokens # are reused. The system prompt is Hermes's territory; plugins # contribute context alongside the user's input. # # All injected context is ephemeral (not persisted to session DB). _plugin_user_context = "" try: from hermes_cli.plugins import invoke_hook as _invoke_hook _pre_results = _invoke_hook( "pre_llm_call", session_id=self.session_id, user_message=original_user_message, conversation_history=list(messages), is_first_turn=(not bool(conversation_history)), model=self.model, platform=getattr(self, "platform", None) or "", sender_id=getattr(self, "_user_id", None) or "", ) _ctx_parts: list[str] = [] for r in _pre_results: if isinstance(r, dict) and r.get("context"): _ctx_parts.append(str(r["context"])) elif isinstance(r, str) and r.strip(): _ctx_parts.append(r) if _ctx_parts: _plugin_user_context = "\n\n".join(_ctx_parts) except Exception as exc: logger.warning("pre_llm_call hook failed: %s", exc) # Main conversation loop api_call_count = 0 final_response = None interrupted = False codex_ack_continuations = 0 length_continue_retries = 0 truncated_tool_call_retries = 0 truncated_response_prefix = "" compression_attempts = 0 _turn_exit_reason = "unknown" # Diagnostic: why the loop ended # Record the execution thread so interrupt()/clear_interrupt() can # scope the tool-level interrupt signal to THIS agent's thread only. # Must be set before any thread-scoped interrupt syncing. self._execution_thread_id = threading.current_thread().ident # Always clear stale per-thread state from a previous turn. If an # interrupt arrived before startup finished, preserve it and bind it # to this execution thread now instead of dropping it on the floor. _set_interrupt(False, self._execution_thread_id) if self._interrupt_requested: _set_interrupt(True, self._execution_thread_id) self._interrupt_thread_signal_pending = False else: self._interrupt_message = None self._interrupt_thread_signal_pending = False # Notify memory providers of the new turn so cadence tracking works. # Must happen BEFORE prefetch_all() so providers know which turn it is # and can gate context/dialectic refresh via contextCadence/dialecticCadence. if self._memory_manager: try: _turn_msg = original_user_message if isinstance(original_user_message, str) else "" self._memory_manager.on_turn_start(self._user_turn_count, _turn_msg) except Exception: pass # External memory provider: prefetch once before the tool loop. # Reuse the cached result on every iteration to avoid re-calling # prefetch_all() on each tool call (10 tool calls = 10x latency + cost). # Use original_user_message (clean input) — user_message may contain # injected skill content that bloats / breaks provider queries. _ext_prefetch_cache = "" if self._memory_manager: try: _query = original_user_message if isinstance(original_user_message, str) else "" _ext_prefetch_cache = self._memory_manager.prefetch_all(_query) or "" except Exception: pass while (api_call_count < self.max_iterations and self.iteration_budget.remaining > 0) or self._budget_grace_call: # Reset per-turn checkpoint dedup so each iteration can take one snapshot self._checkpoint_mgr.new_turn() # Check for interrupt request (e.g., user sent new message) if self._interrupt_requested: interrupted = True _turn_exit_reason = "interrupted_by_user" if not self.quiet_mode: self._safe_print("\n⚡ Breaking out of tool loop due to interrupt...") break api_call_count += 1 self._api_call_count = api_call_count self._touch_activity(f"starting API call #{api_call_count}") # Grace call: the budget is exhausted but we gave the model one # more chance. Consume the grace flag so the loop exits after # this iteration regardless of outcome. if self._budget_grace_call: self._budget_grace_call = False elif not self.iteration_budget.consume(): _turn_exit_reason = "budget_exhausted" if not self.quiet_mode: self._safe_print(f"\n⚠️ Iteration budget exhausted ({self.iteration_budget.used}/{self.iteration_budget.max_total} iterations used)") break # Fire step_callback for gateway hooks (agent:step event) if self.step_callback is not None: try: prev_tools = [] for _idx, _m in enumerate(reversed(messages)): if _m.get("role") == "assistant" and _m.get("tool_calls"): _fwd_start = len(messages) - _idx _results_by_id = {} for _tm in messages[_fwd_start:]: if _tm.get("role") != "tool": break _tcid = _tm.get("tool_call_id") if _tcid: _results_by_id[_tcid] = _tm.get("content", "") prev_tools = [ { "name": tc["function"]["name"], "result": _results_by_id.get(tc.get("id")), } for tc in _m["tool_calls"] if isinstance(tc, dict) ] break self.step_callback(api_call_count, prev_tools) except Exception as _step_err: logger.debug("step_callback error (iteration %s): %s", api_call_count, _step_err) # Track tool-calling iterations for skill nudge. # Counter resets whenever skill_manage is actually used. if (self._skill_nudge_interval > 0 and "skill_manage" in self.valid_tool_names): self._iters_since_skill += 1 # Prepare messages for API call # If we have an ephemeral system prompt, prepend it to the messages # Note: Reasoning is embedded in content via tags for trajectory storage. # However, providers like Moonshot AI require a separate 'reasoning_content' field # on assistant messages with tool_calls. We handle both cases here. api_messages = [] for idx, msg in enumerate(messages): api_msg = msg.copy() # Inject ephemeral context into the current turn's user message. # Sources: memory manager prefetch + plugin pre_llm_call hooks # with target="user_message" (the default). Both are # API-call-time only — the original message in `messages` is # never mutated, so nothing leaks into session persistence. if idx == current_turn_user_idx and msg.get("role") == "user": _injections = [] if _ext_prefetch_cache: _fenced = build_memory_context_block(_ext_prefetch_cache) if _fenced: _injections.append(_fenced) if _plugin_user_context: _injections.append(_plugin_user_context) if _injections: _base = api_msg.get("content", "") if isinstance(_base, str): api_msg["content"] = _base + "\n\n" + "\n\n".join(_injections) # For ALL assistant messages, pass reasoning back to the API # This ensures multi-turn reasoning context is preserved if msg.get("role") == "assistant": reasoning_text = msg.get("reasoning") if reasoning_text: # Add reasoning_content for API compatibility (Moonshot AI, Novita, OpenRouter) api_msg["reasoning_content"] = reasoning_text # Remove 'reasoning' field - it's for trajectory storage only # We've copied it to 'reasoning_content' for the API above if "reasoning" in api_msg: api_msg.pop("reasoning") # Remove finish_reason - not accepted by strict APIs (e.g. Mistral) if "finish_reason" in api_msg: api_msg.pop("finish_reason") # Strip internal thinking-prefill marker api_msg.pop("_thinking_prefill", None) # Strip Codex Responses API fields (call_id, response_item_id) for # strict providers like Mistral, Fireworks, etc. that reject unknown fields. # Uses new dicts so the internal messages list retains the fields # for Codex Responses compatibility. if self._should_sanitize_tool_calls(): self._sanitize_tool_calls_for_strict_api(api_msg) # Keep 'reasoning_details' - OpenRouter uses this for multi-turn reasoning context # The signature field helps maintain reasoning continuity api_messages.append(api_msg) # Build the final system message: cached prompt + ephemeral system prompt. # Ephemeral additions are API-call-time only (not persisted to session DB). # External recall context is injected into the user message, not the system # prompt, so the stable cache prefix remains unchanged. effective_system = active_system_prompt or "" if self.ephemeral_system_prompt: effective_system = (effective_system + "\n\n" + self.ephemeral_system_prompt).strip() # NOTE: Plugin context from pre_llm_call hooks is injected into the # user message (see injection block above), NOT the system prompt. # This is intentional — system prompt modifications break the prompt # cache prefix. The system prompt is reserved for Hermes internals. if effective_system: api_messages = [{"role": "system", "content": effective_system}] + api_messages # Inject ephemeral prefill messages right after the system prompt # but before conversation history. Same API-call-time-only pattern. if self.prefill_messages: sys_offset = 1 if effective_system else 0 for idx, pfm in enumerate(self.prefill_messages): api_messages.insert(sys_offset + idx, pfm.copy()) # Apply Anthropic prompt caching for Claude models via OpenRouter. # Auto-detected: if model name contains "claude" and base_url is OpenRouter, # inject cache_control breakpoints (system + last 3 messages) to reduce # input token costs by ~75% on multi-turn conversations. if self._use_prompt_caching: api_messages = apply_anthropic_cache_control(api_messages, cache_ttl=self._cache_ttl, native_anthropic=(self.api_mode == 'anthropic_messages')) # Safety net: strip orphaned tool results / add stubs for missing # results before sending to the API. Runs unconditionally — not # gated on context_compressor — so orphans from session loading or # manual message manipulation are always caught. api_messages = self._sanitize_api_messages(api_messages) # Normalize message whitespace and tool-call JSON for consistent # prefix matching. Ensures bit-perfect prefixes across turns, # which enables KV cache reuse on local inference servers # (llama.cpp, vLLM, Ollama) and improves cache hit rates for # cloud providers. Operates on api_messages (the API copy) so # the original conversation history in `messages` is untouched. for am in api_messages: if isinstance(am.get("content"), str): am["content"] = am["content"].strip() for am in api_messages: tcs = am.get("tool_calls") if not tcs: continue new_tcs = [] for tc in tcs: if isinstance(tc, dict) and "function" in tc: try: args_obj = json.loads(tc["function"]["arguments"]) tc = {**tc, "function": { **tc["function"], "arguments": json.dumps( args_obj, separators=(",", ":"), sort_keys=True, ), }} except Exception: pass new_tcs.append(tc) am["tool_calls"] = new_tcs # Proactively strip any surrogate characters before the API call. # Models served via Ollama (Kimi K2.5, GLM-5, Qwen) can return # lone surrogates (U+D800-U+DFFF) that crash json.dumps() inside # the OpenAI SDK. Sanitizing here prevents the 3-retry cycle. _sanitize_messages_surrogates(api_messages) # Calculate approximate request size for logging total_chars = sum(len(str(msg)) for msg in api_messages) approx_tokens = estimate_messages_tokens_rough(api_messages) # Thinking spinner for quiet mode (animated during API call) thinking_spinner = None if not self.quiet_mode: self._vprint(f"\n{self.log_prefix}🔄 Making API call #{api_call_count}/{self.max_iterations}...") self._vprint(f"{self.log_prefix} 📊 Request size: {len(api_messages)} messages, ~{approx_tokens:,} tokens (~{total_chars:,} chars)") self._vprint(f"{self.log_prefix} 🔧 Available tools: {len(self.tools) if self.tools else 0}") else: # Animated thinking spinner in quiet mode face = random.choice(KawaiiSpinner.get_thinking_faces()) verb = random.choice(KawaiiSpinner.get_thinking_verbs()) if self.thinking_callback: # CLI TUI mode: use prompt_toolkit widget instead of raw spinner # (works in both streaming and non-streaming modes) self.thinking_callback(f"{face} {verb}...") elif not self._has_stream_consumers() and self._should_start_quiet_spinner(): # Raw KawaiiSpinner only when no streaming consumers and the # spinner output has a safe sink. spinner_type = random.choice(['brain', 'sparkle', 'pulse', 'moon', 'star']) thinking_spinner = KawaiiSpinner(f"{face} {verb}...", spinner_type=spinner_type, print_fn=self._print_fn) thinking_spinner.start() # Log request details if verbose if self.verbose_logging: logging.debug(f"API Request - Model: {self.model}, Messages: {len(messages)}, Tools: {len(self.tools) if self.tools else 0}") logging.debug(f"Last message role: {messages[-1]['role'] if messages else 'none'}") logging.debug(f"Total message size: ~{approx_tokens:,} tokens") api_start_time = time.time() retry_count = 0 max_retries = 3 primary_recovery_attempted = False max_compression_attempts = 3 codex_auth_retry_attempted=False anthropic_auth_retry_attempted=False nous_auth_retry_attempted=False thinking_sig_retry_attempted = False has_retried_429 = False restart_with_compressed_messages = False restart_with_length_continuation = False finish_reason = "stop" response = None # Guard against UnboundLocalError if all retries fail api_kwargs = None # Guard against UnboundLocalError in except handler while retry_count < max_retries: # ── Nous Portal rate limit guard ────────────────────── # If another session already recorded that Nous is rate- # limited, skip the API call entirely. Each attempt # (including SDK-level retries) counts against RPH and # deepens the rate limit hole. if self.provider == "nous": try: from agent.nous_rate_guard import ( nous_rate_limit_remaining, format_remaining as _fmt_nous_remaining, ) _nous_remaining = nous_rate_limit_remaining() if _nous_remaining is not None and _nous_remaining > 0: _nous_msg = ( f"Nous Portal rate limit active — " f"resets in {_fmt_nous_remaining(_nous_remaining)}." ) self._vprint( f"{self.log_prefix}⏳ {_nous_msg} Trying fallback...", force=True, ) self._emit_status(f"⏳ {_nous_msg}") if self._try_activate_fallback(): retry_count = 0 compression_attempts = 0 primary_recovery_attempted = False continue # No fallback available — return with clear message self._persist_session(messages, conversation_history) return { "final_response": ( f"⏳ {_nous_msg}\n\n" "No fallback provider available. " "Try again after the reset, or add a " "fallback provider in config.yaml." ), "messages": messages, "api_calls": api_call_count, "completed": False, "failed": True, "error": _nous_msg, } except ImportError: pass except Exception: pass # Never let rate guard break the agent loop try: self._reset_stream_delivery_tracking() api_kwargs = self._build_api_kwargs(api_messages) if self._force_ascii_payload: _sanitize_structure_non_ascii(api_kwargs) if self.api_mode == "codex_responses": api_kwargs = self._preflight_codex_api_kwargs(api_kwargs, allow_stream=False) try: from hermes_cli.plugins import invoke_hook as _invoke_hook _invoke_hook( "pre_api_request", task_id=effective_task_id, session_id=self.session_id or "", platform=self.platform or "", model=self.model, provider=self.provider, base_url=self.base_url, api_mode=self.api_mode, api_call_count=api_call_count, message_count=len(api_messages), tool_count=len(self.tools or []), approx_input_tokens=approx_tokens, request_char_count=total_chars, max_tokens=self.max_tokens, ) except Exception: pass if env_var_enabled("HERMES_DUMP_REQUESTS"): self._dump_api_request_debug(api_kwargs, reason="preflight") # Always prefer the streaming path — even without stream # consumers. Streaming gives us fine-grained health # checking (90s stale-stream detection, 60s read timeout) # that the non-streaming path lacks. Without this, # subagents and other quiet-mode callers can hang # indefinitely when the provider keeps the connection # alive with SSE pings but never delivers a response. # The streaming path is a no-op for callbacks when no # consumers are registered, and falls back to non- # streaming automatically if the provider doesn't # support it. def _stop_spinner(): nonlocal thinking_spinner if thinking_spinner: thinking_spinner.stop("") thinking_spinner = None if self.thinking_callback: self.thinking_callback("") _use_streaming = True # Provider signaled "stream not supported" on a previous # attempt — switch to non-streaming for the rest of this # session instead of re-failing every retry. if getattr(self, "_disable_streaming", False): _use_streaming = False elif not self._has_stream_consumers(): # No display/TTS consumer. Still prefer streaming for # health checking, but skip for Mock clients in tests # (mocks return SimpleNamespace, not stream iterators). from unittest.mock import Mock if isinstance(getattr(self, "client", None), Mock): _use_streaming = False if _use_streaming: response = self._interruptible_streaming_api_call( api_kwargs, on_first_delta=_stop_spinner ) else: response = self._interruptible_api_call(api_kwargs) api_duration = time.time() - api_start_time # Stop thinking spinner silently -- the response box or tool # execution messages that follow are more informative. if thinking_spinner: thinking_spinner.stop("") thinking_spinner = None if self.thinking_callback: self.thinking_callback("") if not self.quiet_mode: self._vprint(f"{self.log_prefix}⏱️ API call completed in {api_duration:.2f}s") if self.verbose_logging: # Log response with provider info if available resp_model = getattr(response, 'model', 'N/A') if response else 'N/A' logging.debug(f"API Response received - Model: {resp_model}, Usage: {response.usage if hasattr(response, 'usage') else 'N/A'}") # Validate response shape before proceeding response_invalid = False error_details = [] if self.api_mode == "codex_responses": output_items = getattr(response, "output", None) if response is not None else None if response is None: response_invalid = True error_details.append("response is None") elif not isinstance(output_items, list): response_invalid = True error_details.append("response.output is not a list") elif not output_items: # Stream backfill may have failed, but # _normalize_codex_response can still recover # from response.output_text. Only mark invalid # when that fallback is also absent. _out_text = getattr(response, "output_text", None) _out_text_stripped = _out_text.strip() if isinstance(_out_text, str) else "" if _out_text_stripped: logger.debug( "Codex response.output is empty but output_text is present " "(%d chars); deferring to normalization.", len(_out_text_stripped), ) else: _resp_status = getattr(response, "status", None) _resp_incomplete = getattr(response, "incomplete_details", None) logger.warning( "Codex response.output is empty after stream backfill " "(status=%s, incomplete_details=%s, model=%s). %s", _resp_status, _resp_incomplete, getattr(response, "model", None), f"api_mode={self.api_mode} provider={self.provider}", ) response_invalid = True error_details.append("response.output is empty") elif self.api_mode == "anthropic_messages": content_blocks = getattr(response, "content", None) if response is not None else None if response is None: response_invalid = True error_details.append("response is None") elif not isinstance(content_blocks, list): response_invalid = True error_details.append("response.content is not a list") elif not content_blocks: response_invalid = True error_details.append("response.content is empty") else: if response is None or not hasattr(response, 'choices') or response.choices is None or not response.choices: response_invalid = True if response is None: error_details.append("response is None") elif not hasattr(response, 'choices'): error_details.append("response has no 'choices' attribute") elif response.choices is None: error_details.append("response.choices is None") else: error_details.append("response.choices is empty") if response_invalid: # Stop spinner before printing error messages if thinking_spinner: thinking_spinner.stop("(´;ω;`) oops, retrying...") thinking_spinner = None if self.thinking_callback: self.thinking_callback("") # Invalid response — could be rate limiting, provider timeout, # upstream server error, or malformed response. retry_count += 1 # Eager fallback: empty/malformed responses are a common # rate-limit symptom. Switch to fallback immediately # rather than retrying with extended backoff. if self._fallback_index < len(self._fallback_chain): self._emit_status("⚠️ Empty/malformed response — switching to fallback...") if self._try_activate_fallback(): retry_count = 0 compression_attempts = 0 primary_recovery_attempted = False continue # Check for error field in response (some providers include this) error_msg = "Unknown" provider_name = "Unknown" if response and hasattr(response, 'error') and response.error: error_msg = str(response.error) # Try to extract provider from error metadata if hasattr(response.error, 'metadata') and response.error.metadata: provider_name = response.error.metadata.get('provider_name', 'Unknown') elif response and hasattr(response, 'message') and response.message: error_msg = str(response.message) # Try to get provider from model field (OpenRouter often returns actual model used) if provider_name == "Unknown" and response and hasattr(response, 'model') and response.model: provider_name = f"model={response.model}" # Check for x-openrouter-provider or similar metadata if provider_name == "Unknown" and response: # Log all response attributes for debugging resp_attrs = {k: str(v)[:100] for k, v in vars(response).items() if not k.startswith('_')} if self.verbose_logging: logging.debug(f"Response attributes for invalid response: {resp_attrs}") # Extract error code from response for contextual diagnostics _resp_error_code = None if response and hasattr(response, 'error') and response.error: _code_raw = getattr(response.error, 'code', None) if _code_raw is None and isinstance(response.error, dict): _code_raw = response.error.get('code') if _code_raw is not None: try: _resp_error_code = int(_code_raw) except (TypeError, ValueError): pass # Build a human-readable failure hint from the error code # and response time, instead of always assuming rate limiting. if _resp_error_code == 524: _failure_hint = f"upstream provider timed out (Cloudflare 524, {api_duration:.0f}s)" elif _resp_error_code == 504: _failure_hint = f"upstream gateway timeout (504, {api_duration:.0f}s)" elif _resp_error_code == 429: _failure_hint = f"rate limited by upstream provider (429)" elif _resp_error_code in (500, 502): _failure_hint = f"upstream server error ({_resp_error_code}, {api_duration:.0f}s)" elif _resp_error_code in (503, 529): _failure_hint = f"upstream provider overloaded ({_resp_error_code})" elif _resp_error_code is not None: _failure_hint = f"upstream error (code {_resp_error_code}, {api_duration:.0f}s)" elif api_duration < 10: _failure_hint = f"fast response ({api_duration:.1f}s) — likely rate limited" elif api_duration > 60: _failure_hint = f"slow response ({api_duration:.0f}s) — likely upstream timeout" else: _failure_hint = f"response time {api_duration:.1f}s" self._vprint(f"{self.log_prefix}⚠️ Invalid API response (attempt {retry_count}/{max_retries}): {', '.join(error_details)}", force=True) self._vprint(f"{self.log_prefix} 🏢 Provider: {provider_name}", force=True) cleaned_provider_error = self._clean_error_message(error_msg) self._vprint(f"{self.log_prefix} 📝 Provider message: {cleaned_provider_error}", force=True) self._vprint(f"{self.log_prefix} ⏱️ {_failure_hint}", force=True) if retry_count >= max_retries: # Try fallback before giving up self._emit_status(f"⚠️ Max retries ({max_retries}) for invalid responses — trying fallback...") if self._try_activate_fallback(): retry_count = 0 compression_attempts = 0 primary_recovery_attempted = False continue self._emit_status(f"❌ Max retries ({max_retries}) exceeded for invalid responses. Giving up.") logging.error(f"{self.log_prefix}Invalid API response after {max_retries} retries.") self._persist_session(messages, conversation_history) return { "messages": messages, "completed": False, "api_calls": api_call_count, "error": f"Invalid API response after {max_retries} retries: {_failure_hint}", "failed": True # Mark as failure for filtering } # Backoff before retry — jittered exponential: 5s base, 120s cap wait_time = jittered_backoff(retry_count, base_delay=5.0, max_delay=120.0) self._vprint(f"{self.log_prefix}⏳ Retrying in {wait_time:.1f}s ({_failure_hint})...", force=True) logging.warning(f"Invalid API response (retry {retry_count}/{max_retries}): {', '.join(error_details)} | Provider: {provider_name}") # Sleep in small increments to stay responsive to interrupts sleep_end = time.time() + wait_time _backoff_touch_counter = 0 while time.time() < sleep_end: if self._interrupt_requested: self._vprint(f"{self.log_prefix}⚡ Interrupt detected during retry wait, aborting.", force=True) self._persist_session(messages, conversation_history) self.clear_interrupt() return { "final_response": f"Operation interrupted during retry ({_failure_hint}, attempt {retry_count}/{max_retries}).", "messages": messages, "api_calls": api_call_count, "completed": False, "interrupted": True, } time.sleep(0.2) # Touch activity every ~30s so the gateway's inactivity # monitor knows we're alive during backoff waits. _backoff_touch_counter += 1 if _backoff_touch_counter % 150 == 0: # 150 × 0.2s = 30s self._touch_activity( f"retry backoff ({retry_count}/{max_retries}), " f"{int(sleep_end - time.time())}s remaining" ) continue # Retry the API call # Check finish_reason before proceeding if self.api_mode == "codex_responses": status = getattr(response, "status", None) incomplete_details = getattr(response, "incomplete_details", None) incomplete_reason = None if isinstance(incomplete_details, dict): incomplete_reason = incomplete_details.get("reason") else: incomplete_reason = getattr(incomplete_details, "reason", None) if status == "incomplete" and incomplete_reason in {"max_output_tokens", "length"}: finish_reason = "length" else: finish_reason = "stop" elif self.api_mode == "anthropic_messages": stop_reason_map = {"end_turn": "stop", "tool_use": "tool_calls", "max_tokens": "length", "stop_sequence": "stop"} finish_reason = stop_reason_map.get(response.stop_reason, "stop") else: finish_reason = response.choices[0].finish_reason assistant_message = response.choices[0].message if self._should_treat_stop_as_truncated( finish_reason, assistant_message, messages, ): self._vprint( f"{self.log_prefix}⚠️ Treating suspicious Ollama/GLM stop response as truncated", force=True, ) finish_reason = "length" if finish_reason == "length": self._vprint(f"{self.log_prefix}⚠️ Response truncated (finish_reason='length') - model hit max output tokens", force=True) # ── Detect thinking-budget exhaustion ────────────── # When the model spends ALL output tokens on reasoning # and has none left for the response, continuation # retries are pointless. Detect this early and give a # targeted error instead of wasting 3 API calls. _trunc_content = None _trunc_has_tool_calls = False if self.api_mode in ("chat_completions", "bedrock_converse"): _trunc_msg = response.choices[0].message if (hasattr(response, "choices") and response.choices) else None _trunc_content = getattr(_trunc_msg, "content", None) if _trunc_msg else None _trunc_has_tool_calls = bool(getattr(_trunc_msg, "tool_calls", None)) if _trunc_msg else False elif self.api_mode == "anthropic_messages": # Anthropic response.content is a list of blocks _text_parts = [] for _blk in getattr(response, "content", []): if getattr(_blk, "type", None) == "text": _text_parts.append(getattr(_blk, "text", "")) _trunc_content = "\n".join(_text_parts) if _text_parts else None # A response is "thinking exhausted" only when the model # actually produced reasoning blocks but no visible text after # them. Models that do not use tags (e.g. GLM-4.7 on # NVIDIA Build, minimax) may return content=None or an empty # string for unrelated reasons — treat those as normal # truncations that deserve continuation retries, not as # thinking-budget exhaustion. _has_think_tags = bool( _trunc_content and re.search( r'<(?:think|thinking|reasoning|REASONING_SCRATCHPAD)[^>]*>', _trunc_content, re.IGNORECASE, ) ) _thinking_exhausted = ( not _trunc_has_tool_calls and _has_think_tags and ( (_trunc_content is not None and not self._has_content_after_think_block(_trunc_content)) or _trunc_content is None ) ) if _thinking_exhausted: _exhaust_error = ( "Model used all output tokens on reasoning with none left " "for the response. Try lowering reasoning effort or " "increasing max_tokens." ) self._vprint( f"{self.log_prefix}💭 Reasoning exhausted the output token budget — " f"no visible response was produced.", force=True, ) # Return a user-friendly message as the response so # CLI (response box) and gateway (chat message) both # display it naturally instead of a suppressed error. _exhaust_response = ( "⚠️ **Thinking Budget Exhausted**\n\n" "The model used all its output tokens on reasoning " "and had none left for the actual response.\n\n" "To fix this:\n" "→ Lower reasoning effort: `/thinkon low` or `/thinkon minimal`\n" "→ Increase the output token limit: " "set `model.max_tokens` in config.yaml" ) self._cleanup_task_resources(effective_task_id) self._persist_session(messages, conversation_history) return { "final_response": _exhaust_response, "messages": messages, "api_calls": api_call_count, "completed": False, "partial": True, "error": _exhaust_error, } if self.api_mode in ("chat_completions", "bedrock_converse"): assistant_message = response.choices[0].message if not assistant_message.tool_calls: length_continue_retries += 1 interim_msg = self._build_assistant_message(assistant_message, finish_reason) messages.append(interim_msg) if assistant_message.content: truncated_response_prefix += assistant_message.content if length_continue_retries < 3: self._vprint( f"{self.log_prefix}↻ Requesting continuation " f"({length_continue_retries}/3)..." ) continue_msg = { "role": "user", "content": ( "[System: Your previous response was truncated by the output " "length limit. Continue exactly where you left off. Do not " "restart or repeat prior text. Finish the answer directly.]" ), } messages.append(continue_msg) self._session_messages = messages self._save_session_log(messages) restart_with_length_continuation = True break partial_response = self._strip_think_blocks(truncated_response_prefix).strip() self._cleanup_task_resources(effective_task_id) self._persist_session(messages, conversation_history) return { "final_response": partial_response or None, "messages": messages, "api_calls": api_call_count, "completed": False, "partial": True, "error": "Response remained truncated after 3 continuation attempts", } if self.api_mode in ("chat_completions", "bedrock_converse"): assistant_message = response.choices[0].message if assistant_message.tool_calls: if truncated_tool_call_retries < 1: truncated_tool_call_retries += 1 self._vprint( f"{self.log_prefix}⚠️ Truncated tool call detected — retrying API call...", force=True, ) # Don't append the broken response to messages; # just re-run the same API call from the current # message state, giving the model another chance. continue self._vprint( f"{self.log_prefix}⚠️ Truncated tool call response detected again — refusing to execute incomplete tool arguments.", force=True, ) self._cleanup_task_resources(effective_task_id) self._persist_session(messages, conversation_history) return { "final_response": None, "messages": messages, "api_calls": api_call_count, "completed": False, "partial": True, "error": "Response truncated due to output length limit", } # If we have prior messages, roll back to last complete state if len(messages) > 1: self._vprint(f"{self.log_prefix} ⏪ Rolling back to last complete assistant turn") rolled_back_messages = self._get_messages_up_to_last_assistant(messages) self._cleanup_task_resources(effective_task_id) self._persist_session(messages, conversation_history) return { "final_response": None, "messages": rolled_back_messages, "api_calls": api_call_count, "completed": False, "partial": True, "error": "Response truncated due to output length limit" } else: # First message was truncated - mark as failed self._vprint(f"{self.log_prefix}❌ First response truncated - cannot recover", force=True) self._persist_session(messages, conversation_history) return { "final_response": None, "messages": messages, "api_calls": api_call_count, "completed": False, "failed": True, "error": "First response truncated due to output length limit" } # Track actual token usage from response for context management if hasattr(response, 'usage') and response.usage: canonical_usage = normalize_usage( response.usage, provider=self.provider, api_mode=self.api_mode, ) prompt_tokens = canonical_usage.prompt_tokens completion_tokens = canonical_usage.output_tokens total_tokens = canonical_usage.total_tokens usage_dict = { "prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens, } self.context_compressor.update_from_response(usage_dict) # Cache discovered context length after successful call. # Only persist limits confirmed by the provider (parsed # from the error message), not guessed probe tiers. if getattr(self.context_compressor, "_context_probed", False): ctx = self.context_compressor.context_length if getattr(self.context_compressor, "_context_probe_persistable", False): save_context_length(self.model, self.base_url, ctx) self._safe_print(f"{self.log_prefix}💾 Cached context length: {ctx:,} tokens for {self.model}") self.context_compressor._context_probed = False self.context_compressor._context_probe_persistable = False self.session_prompt_tokens += prompt_tokens self.session_completion_tokens += completion_tokens self.session_total_tokens += total_tokens self.session_api_calls += 1 self.session_input_tokens += canonical_usage.input_tokens self.session_output_tokens += canonical_usage.output_tokens self.session_cache_read_tokens += canonical_usage.cache_read_tokens self.session_cache_write_tokens += canonical_usage.cache_write_tokens self.session_reasoning_tokens += canonical_usage.reasoning_tokens # Log API call details for debugging/observability _cache_pct = "" if canonical_usage.cache_read_tokens and prompt_tokens: _cache_pct = f" cache={canonical_usage.cache_read_tokens}/{prompt_tokens} ({100*canonical_usage.cache_read_tokens/prompt_tokens:.0f}%)" logger.info( "API call #%d: model=%s provider=%s in=%d out=%d total=%d latency=%.1fs%s", self.session_api_calls, self.model, self.provider or "unknown", prompt_tokens, completion_tokens, total_tokens, api_duration, _cache_pct, ) cost_result = estimate_usage_cost( self.model, canonical_usage, provider=self.provider, base_url=self.base_url, api_key=getattr(self, "api_key", ""), ) if cost_result.amount_usd is not None: self.session_estimated_cost_usd += float(cost_result.amount_usd) self.session_cost_status = cost_result.status self.session_cost_source = cost_result.source # Persist token counts to session DB for /insights. # Do this for every platform with a session_id so non-CLI # sessions (gateway, cron, delegated runs) cannot lose # token/accounting data if a higher-level persistence path # is skipped or fails. Gateway/session-store writes use # absolute totals, so they safely overwrite these per-call # deltas instead of double-counting them. if self._session_db and self.session_id: try: self._session_db.update_token_counts( self.session_id, input_tokens=canonical_usage.input_tokens, output_tokens=canonical_usage.output_tokens, cache_read_tokens=canonical_usage.cache_read_tokens, cache_write_tokens=canonical_usage.cache_write_tokens, reasoning_tokens=canonical_usage.reasoning_tokens, estimated_cost_usd=float(cost_result.amount_usd) if cost_result.amount_usd is not None else None, cost_status=cost_result.status, cost_source=cost_result.source, billing_provider=self.provider, billing_base_url=self.base_url, billing_mode="subscription_included" if cost_result.status == "included" else None, model=self.model, ) except Exception: pass # never block the agent loop if self.verbose_logging: logging.debug(f"Token usage: prompt={usage_dict['prompt_tokens']:,}, completion={usage_dict['completion_tokens']:,}, total={usage_dict['total_tokens']:,}") # Log cache hit stats when prompt caching is active if self._use_prompt_caching: if self.api_mode == "anthropic_messages": # Anthropic uses cache_read_input_tokens / cache_creation_input_tokens cached = getattr(response.usage, 'cache_read_input_tokens', 0) or 0 written = getattr(response.usage, 'cache_creation_input_tokens', 0) or 0 else: # OpenRouter uses prompt_tokens_details.cached_tokens details = getattr(response.usage, 'prompt_tokens_details', None) cached = getattr(details, 'cached_tokens', 0) or 0 if details else 0 written = getattr(details, 'cache_write_tokens', 0) or 0 if details else 0 prompt = usage_dict["prompt_tokens"] hit_pct = (cached / prompt * 100) if prompt > 0 else 0 if not self.quiet_mode: self._vprint(f"{self.log_prefix} 💾 Cache: {cached:,}/{prompt:,} tokens ({hit_pct:.0f}% hit, {written:,} written)") has_retried_429 = False # Reset on success # Clear Nous rate limit state on successful request — # proves the limit has reset and other sessions can # resume hitting Nous. if self.provider == "nous": try: from agent.nous_rate_guard import clear_nous_rate_limit clear_nous_rate_limit() except Exception: pass self._touch_activity(f"API call #{api_call_count} completed") break # Success, exit retry loop except InterruptedError: if thinking_spinner: thinking_spinner.stop("") thinking_spinner = None if self.thinking_callback: self.thinking_callback("") api_elapsed = time.time() - api_start_time self._vprint(f"{self.log_prefix}⚡ Interrupted during API call.", force=True) self._persist_session(messages, conversation_history) interrupted = True final_response = f"Operation interrupted: waiting for model response ({api_elapsed:.1f}s elapsed)." break except Exception as api_error: # Stop spinner before printing error messages if thinking_spinner: thinking_spinner.stop("(╥_╥) error, retrying...") thinking_spinner = None if self.thinking_callback: self.thinking_callback("") # ----------------------------------------------------------- # UnicodeEncodeError recovery. Two common causes: # 1. Lone surrogates (U+D800..U+DFFF) from clipboard paste # (Google Docs, rich-text editors) — sanitize and retry. # 2. ASCII codec on systems with LANG=C or non-UTF-8 locale # (e.g. Chromebooks) — any non-ASCII character fails. # Detect via the error message mentioning 'ascii' codec. # We sanitize messages in-place and may retry twice: # first to strip surrogates, then once more for pure # ASCII-only locale sanitization if needed. # ----------------------------------------------------------- if isinstance(api_error, UnicodeEncodeError) and getattr(self, '_unicode_sanitization_passes', 0) < 2: _err_str = str(api_error).lower() _is_ascii_codec = "'ascii'" in _err_str or "ascii" in _err_str _surrogates_found = _sanitize_messages_surrogates(messages) if _surrogates_found: self._unicode_sanitization_passes += 1 self._vprint( f"{self.log_prefix}⚠️ Stripped invalid surrogate characters from messages. Retrying...", force=True, ) continue if _is_ascii_codec: self._force_ascii_payload = True # ASCII codec: the system encoding can't handle # non-ASCII characters at all. Sanitize all # non-ASCII content from messages/tool schemas and retry. # Sanitize both the canonical `messages` list and # `api_messages` (the API-copy built before the retry # loop, which may contain extra fields like # reasoning_content that are not in `messages`). _messages_sanitized = _sanitize_messages_non_ascii(messages) if isinstance(api_messages, list): _sanitize_messages_non_ascii(api_messages) # Also sanitize the last api_kwargs if already built, # so a leftover non-ASCII value in a transformed field # (e.g. extra_body, reasoning_content) doesn't survive # into the next attempt via _build_api_kwargs cache paths. if isinstance(api_kwargs, dict): _sanitize_structure_non_ascii(api_kwargs) _prefill_sanitized = False if isinstance(getattr(self, "prefill_messages", None), list): _prefill_sanitized = _sanitize_messages_non_ascii(self.prefill_messages) _tools_sanitized = False if isinstance(getattr(self, "tools", None), list): _tools_sanitized = _sanitize_tools_non_ascii(self.tools) _system_sanitized = False if isinstance(active_system_prompt, str): _sanitized_system = _strip_non_ascii(active_system_prompt) if _sanitized_system != active_system_prompt: active_system_prompt = _sanitized_system self._cached_system_prompt = _sanitized_system _system_sanitized = True if isinstance(getattr(self, "ephemeral_system_prompt", None), str): _sanitized_ephemeral = _strip_non_ascii(self.ephemeral_system_prompt) if _sanitized_ephemeral != self.ephemeral_system_prompt: self.ephemeral_system_prompt = _sanitized_ephemeral _system_sanitized = True _headers_sanitized = False _default_headers = ( self._client_kwargs.get("default_headers") if isinstance(getattr(self, "_client_kwargs", None), dict) else None ) if isinstance(_default_headers, dict): _headers_sanitized = _sanitize_structure_non_ascii(_default_headers) # Sanitize the API key — non-ASCII characters in # credentials (e.g. ʋ instead of v from a bad # copy-paste) cause httpx to fail when encoding # the Authorization header as ASCII. This is the # most common cause of persistent UnicodeEncodeError # that survives message/tool sanitization (#6843). _credential_sanitized = False _raw_key = getattr(self, "api_key", None) or "" if _raw_key: _clean_key = _strip_non_ascii(_raw_key) if _clean_key != _raw_key: self.api_key = _clean_key if isinstance(getattr(self, "_client_kwargs", None), dict): self._client_kwargs["api_key"] = _clean_key # Also update the live client — it holds its # own copy of api_key which auth_headers reads # dynamically on every request. if getattr(self, "client", None) is not None and hasattr(self.client, "api_key"): self.client.api_key = _clean_key _credential_sanitized = True self._vprint( f"{self.log_prefix}⚠️ API key contained non-ASCII characters " f"(bad copy-paste?) — stripped them. If auth fails, " f"re-copy the key from your provider's dashboard.", force=True, ) # Always retry on ASCII codec detection — # _force_ascii_payload guarantees the full # api_kwargs payload is sanitized on the # next iteration (line ~8475). Even when # per-component checks above find nothing # (e.g. non-ASCII only in api_messages' # reasoning_content), the flag catches it. # Bounded by _unicode_sanitization_passes < 2. self._unicode_sanitization_passes += 1 _any_sanitized = ( _messages_sanitized or _prefill_sanitized or _tools_sanitized or _system_sanitized or _headers_sanitized or _credential_sanitized ) if _any_sanitized: self._vprint( f"{self.log_prefix}⚠️ System encoding is ASCII — stripped non-ASCII characters from request payload. Retrying...", force=True, ) else: self._vprint( f"{self.log_prefix}⚠️ System encoding is ASCII — enabling full-payload sanitization for retry...", force=True, ) continue status_code = getattr(api_error, "status_code", None) error_context = self._extract_api_error_context(api_error) # ── Classify the error for structured recovery decisions ── _compressor = getattr(self, "context_compressor", None) _ctx_len = getattr(_compressor, "context_length", 200000) if _compressor else 200000 classified = classify_api_error( api_error, provider=getattr(self, "provider", "") or "", model=getattr(self, "model", "") or "", approx_tokens=approx_tokens, context_length=_ctx_len, num_messages=len(api_messages) if api_messages else 0, ) logger.debug( "Error classified: reason=%s status=%s retryable=%s compress=%s rotate=%s fallback=%s", classified.reason.value, classified.status_code, classified.retryable, classified.should_compress, classified.should_rotate_credential, classified.should_fallback, ) recovered_with_pool, has_retried_429 = self._recover_with_credential_pool( status_code=status_code, has_retried_429=has_retried_429, classified_reason=classified.reason, error_context=error_context, ) if recovered_with_pool: continue if ( self.api_mode == "codex_responses" and self.provider == "openai-codex" and status_code == 401 and not codex_auth_retry_attempted ): codex_auth_retry_attempted = True if self._try_refresh_codex_client_credentials(force=True): self._vprint(f"{self.log_prefix}🔐 Codex auth refreshed after 401. Retrying request...") continue if ( self.api_mode == "chat_completions" and self.provider == "nous" and status_code == 401 and not nous_auth_retry_attempted ): nous_auth_retry_attempted = True if self._try_refresh_nous_client_credentials(force=True): print(f"{self.log_prefix}🔐 Nous agent key refreshed after 401. Retrying request...") continue if ( self.api_mode == "anthropic_messages" and status_code == 401 and hasattr(self, '_anthropic_api_key') and not anthropic_auth_retry_attempted ): anthropic_auth_retry_attempted = True from agent.anthropic_adapter import _is_oauth_token if self._try_refresh_anthropic_client_credentials(): print(f"{self.log_prefix}🔐 Anthropic credentials refreshed after 401. Retrying request...") continue # Credential refresh didn't help — show diagnostic info key = self._anthropic_api_key auth_method = "Bearer (OAuth/setup-token)" if _is_oauth_token(key) else "x-api-key (API key)" print(f"{self.log_prefix}🔐 Anthropic 401 — authentication failed.") print(f"{self.log_prefix} Auth method: {auth_method}") print(f"{self.log_prefix} Token prefix: {key[:12]}..." if key and len(key) > 12 else f"{self.log_prefix} Token: (empty or short)") print(f"{self.log_prefix} Troubleshooting:") from hermes_constants import display_hermes_home as _dhh_fn _dhh = _dhh_fn() print(f"{self.log_prefix} • Check ANTHROPIC_TOKEN in {_dhh}/.env for Hermes-managed OAuth/setup tokens") print(f"{self.log_prefix} • Check ANTHROPIC_API_KEY in {_dhh}/.env for API keys or legacy token values") print(f"{self.log_prefix} • For API keys: verify at https://console.anthropic.com/settings/keys") print(f"{self.log_prefix} • For Claude Code: run 'claude /login' to refresh, then retry") print(f"{self.log_prefix} • Legacy cleanup: hermes config set ANTHROPIC_TOKEN \"\"") print(f"{self.log_prefix} • Clear stale keys: hermes config set ANTHROPIC_API_KEY \"\"") # ── Thinking block signature recovery ───────────────── # Anthropic signs thinking blocks against the full turn # content. Any upstream mutation (context compression, # session truncation, message merging) invalidates the # signature → HTTP 400. Recovery: strip reasoning_details # from all messages so the next retry sends no thinking # blocks at all. One-shot — don't retry infinitely. if ( classified.reason == FailoverReason.thinking_signature and not thinking_sig_retry_attempted ): thinking_sig_retry_attempted = True for _m in messages: if isinstance(_m, dict): _m.pop("reasoning_details", None) self._vprint( f"{self.log_prefix}⚠️ Thinking block signature invalid — " f"stripped all thinking blocks, retrying...", force=True, ) logging.warning( "%sThinking block signature recovery: stripped " "reasoning_details from %d messages", self.log_prefix, len(messages), ) continue retry_count += 1 elapsed_time = time.time() - api_start_time self._touch_activity( f"API error recovery (attempt {retry_count}/{max_retries})" ) error_type = type(api_error).__name__ error_msg = str(api_error).lower() _error_summary = self._summarize_api_error(api_error) logger.warning( "API call failed (attempt %s/%s) error_type=%s %s summary=%s", retry_count, max_retries, error_type, self._client_log_context(), _error_summary, ) _provider = getattr(self, "provider", "unknown") _base = getattr(self, "base_url", "unknown") _model = getattr(self, "model", "unknown") _status_code_str = f" [HTTP {status_code}]" if status_code else "" self._vprint(f"{self.log_prefix}⚠️ API call failed (attempt {retry_count}/{max_retries}): {error_type}{_status_code_str}", force=True) self._vprint(f"{self.log_prefix} 🔌 Provider: {_provider} Model: {_model}", force=True) self._vprint(f"{self.log_prefix} 🌐 Endpoint: {_base}", force=True) self._vprint(f"{self.log_prefix} 📝 Error: {_error_summary}", force=True) if status_code and status_code < 500: _err_body = getattr(api_error, "body", None) _err_body_str = str(_err_body)[:300] if _err_body else None if _err_body_str: self._vprint(f"{self.log_prefix} 📋 Details: {_err_body_str}", force=True) self._vprint(f"{self.log_prefix} ⏱️ Elapsed: {elapsed_time:.2f}s Context: {len(api_messages)} msgs, ~{approx_tokens:,} tokens") # Actionable hint for OpenRouter "no tool endpoints" error. # This fires regardless of whether fallback succeeds — the # user needs to know WHY their model failed so they can fix # their provider routing, not just silently fall back. if ( self._is_openrouter_url() and "support tool use" in error_msg ): self._vprint( f"{self.log_prefix} 💡 No OpenRouter providers for {_model} support tool calling with your current settings.", force=True, ) if self.providers_allowed: self._vprint( f"{self.log_prefix} Your provider_routing.only restriction is filtering out tool-capable providers.", force=True, ) self._vprint( f"{self.log_prefix} Try removing the restriction or adding providers that support tools for this model.", force=True, ) self._vprint( f"{self.log_prefix} Check which providers support tools: https://openrouter.ai/models/{_model}", force=True, ) # Check for interrupt before deciding to retry if self._interrupt_requested: self._vprint(f"{self.log_prefix}⚡ Interrupt detected during error handling, aborting retries.", force=True) self._persist_session(messages, conversation_history) self.clear_interrupt() return { "final_response": f"Operation interrupted: handling API error ({error_type}: {self._clean_error_message(str(api_error))}).", "messages": messages, "api_calls": api_call_count, "completed": False, "interrupted": True, } # Check for 413 payload-too-large BEFORE generic 4xx handler. # A 413 is a payload-size error — the correct response is to # compress history and retry, not abort immediately. status_code = getattr(api_error, "status_code", None) # ── Anthropic Sonnet long-context tier gate ─────────── # Anthropic returns HTTP 429 "Extra usage is required for # long context requests" when a Claude Max (or similar) # subscription doesn't include the 1M-context tier. This # is NOT a transient rate limit — retrying or switching # credentials won't help. Reduce context to 200k (the # standard tier) and compress. if classified.reason == FailoverReason.long_context_tier: _reduced_ctx = 200000 compressor = self.context_compressor old_ctx = compressor.context_length if old_ctx > _reduced_ctx: compressor.update_model( model=self.model, context_length=_reduced_ctx, base_url=self.base_url, api_key=getattr(self, "api_key", ""), provider=self.provider, ) # Context probing flags — only set on built-in # compressor (plugin engines manage their own). if hasattr(compressor, "_context_probed"): compressor._context_probed = True # Don't persist — this is a subscription-tier # limitation, not a model capability. If the # user later enables extra usage the 1M limit # should come back automatically. compressor._context_probe_persistable = False self._vprint( f"{self.log_prefix}⚠️ Anthropic long-context tier " f"requires extra usage — reducing context: " f"{old_ctx:,} → {_reduced_ctx:,} tokens", force=True, ) compression_attempts += 1 if compression_attempts <= max_compression_attempts: original_len = len(messages) messages, active_system_prompt = self._compress_context( messages, system_message, approx_tokens=approx_tokens, task_id=effective_task_id, ) # Compression created a new session — clear history # so _flush_messages_to_session_db writes compressed # messages to the new session, not skipping them. conversation_history = None if len(messages) < original_len or old_ctx > _reduced_ctx: self._emit_status( f"🗜️ Context reduced to {_reduced_ctx:,} tokens " f"(was {old_ctx:,}), retrying..." ) time.sleep(2) restart_with_compressed_messages = True break # Fall through to normal error handling if compression # is exhausted or didn't help. # Eager fallback for rate-limit errors (429 or quota exhaustion). # When a fallback model is configured, switch immediately instead # of burning through retries with exponential backoff -- the # primary provider won't recover within the retry window. is_rate_limited = classified.reason in ( FailoverReason.rate_limit, FailoverReason.billing, ) if is_rate_limited and self._fallback_index < len(self._fallback_chain): # Don't eagerly fallback if credential pool rotation may # still recover. The pool's retry-then-rotate cycle needs # at least one more attempt to fire — jumping to a fallback # provider here short-circuits it. pool = self._credential_pool pool_may_recover = pool is not None and pool.has_available() if not pool_may_recover: self._emit_status("⚠️ Rate limited — switching to fallback provider...") if self._try_activate_fallback(): retry_count = 0 compression_attempts = 0 primary_recovery_attempted = False continue # ── Nous Portal: record rate limit & skip retries ───── # When Nous returns a 429, record the reset time to a # shared file so ALL sessions (cron, gateway, auxiliary) # know not to pile on. Then skip further retries — # each one burns another RPH request and deepens the # rate limit hole. The retry loop's top-of-iteration # guard will catch this on the next pass and try # fallback or bail with a clear message. if ( is_rate_limited and self.provider == "nous" and classified.reason == FailoverReason.rate_limit and not recovered_with_pool ): try: from agent.nous_rate_guard import record_nous_rate_limit _err_resp = getattr(api_error, "response", None) _err_hdrs = ( getattr(_err_resp, "headers", None) if _err_resp else None ) record_nous_rate_limit( headers=_err_hdrs, error_context=error_context, ) except Exception: pass # Skip straight to max_retries — the top-of-loop # guard will handle fallback or bail cleanly. retry_count = max_retries continue is_payload_too_large = ( classified.reason == FailoverReason.payload_too_large ) if is_payload_too_large: compression_attempts += 1 if compression_attempts > max_compression_attempts: self._vprint(f"{self.log_prefix}❌ Max compression attempts ({max_compression_attempts}) reached for payload-too-large error.", force=True) self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True) logging.error(f"{self.log_prefix}413 compression failed after {max_compression_attempts} attempts.") self._persist_session(messages, conversation_history) return { "messages": messages, "completed": False, "api_calls": api_call_count, "error": f"Request payload too large: max compression attempts ({max_compression_attempts}) reached.", "partial": True, "failed": True, "compression_exhausted": True, } self._emit_status(f"⚠️ Request payload too large (413) — compression attempt {compression_attempts}/{max_compression_attempts}...") original_len = len(messages) messages, active_system_prompt = self._compress_context( messages, system_message, approx_tokens=approx_tokens, task_id=effective_task_id, ) # Compression created a new session — clear history # so _flush_messages_to_session_db writes compressed # messages to the new session, not skipping them. conversation_history = None if len(messages) < original_len: self._emit_status(f"🗜️ Compressed {original_len} → {len(messages)} messages, retrying...") time.sleep(2) # Brief pause between compression retries restart_with_compressed_messages = True break else: self._vprint(f"{self.log_prefix}❌ Payload too large and cannot compress further.", force=True) self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True) logging.error(f"{self.log_prefix}413 payload too large. Cannot compress further.") self._persist_session(messages, conversation_history) return { "messages": messages, "completed": False, "api_calls": api_call_count, "error": "Request payload too large (413). Cannot compress further.", "partial": True, "failed": True, "compression_exhausted": True, } # Check for context-length errors BEFORE generic 4xx handler. # The classifier detects context overflow from: explicit error # messages, generic 400 + large session heuristic (#1630), and # server disconnect + large session pattern (#2153). is_context_length_error = ( classified.reason == FailoverReason.context_overflow ) if is_context_length_error: compressor = self.context_compressor old_ctx = compressor.context_length # ── Distinguish two very different errors ─────────── # 1. "Prompt too long": the INPUT exceeds the context window. # Fix: reduce context_length + compress history. # 2. "max_tokens too large": input is fine, but # input_tokens + requested max_tokens > context_window. # Fix: reduce max_tokens (the OUTPUT cap) for this call. # Do NOT shrink context_length — the window is unchanged. # # Note: max_tokens = output token cap (one response). # context_length = total window (input + output combined). available_out = parse_available_output_tokens_from_error(error_msg) if available_out is not None: # Error is purely about the output cap being too large. # Cap output to the available space and retry without # touching context_length or triggering compression. safe_out = max(1, available_out - 64) # small safety margin self._ephemeral_max_output_tokens = safe_out self._vprint( f"{self.log_prefix}⚠️ Output cap too large for current prompt — " f"retrying with max_tokens={safe_out:,} " f"(available_tokens={available_out:,}; context_length unchanged at {old_ctx:,})", force=True, ) # Still count against compression_attempts so we don't # loop forever if the error keeps recurring. compression_attempts += 1 if compression_attempts > max_compression_attempts: self._vprint(f"{self.log_prefix}❌ Max compression attempts ({max_compression_attempts}) reached.", force=True) self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True) logging.error(f"{self.log_prefix}Context compression failed after {max_compression_attempts} attempts.") self._persist_session(messages, conversation_history) return { "messages": messages, "completed": False, "api_calls": api_call_count, "error": f"Context length exceeded: max compression attempts ({max_compression_attempts}) reached.", "partial": True, "failed": True, "compression_exhausted": True, } restart_with_compressed_messages = True break # Error is about the INPUT being too large — reduce context_length. # Try to parse the actual limit from the error message parsed_limit = parse_context_limit_from_error(error_msg) if parsed_limit and parsed_limit < old_ctx: new_ctx = parsed_limit self._vprint(f"{self.log_prefix}⚠️ Context limit detected from API: {new_ctx:,} tokens (was {old_ctx:,})", force=True) else: # Step down to the next probe tier new_ctx = get_next_probe_tier(old_ctx) if new_ctx and new_ctx < old_ctx: compressor.update_model( model=self.model, context_length=new_ctx, base_url=self.base_url, api_key=getattr(self, "api_key", ""), provider=self.provider, ) # Context probing flags — only set on built-in # compressor (plugin engines manage their own). if hasattr(compressor, "_context_probed"): compressor._context_probed = True # Only persist limits parsed from the provider's # error message (a real number). Guessed fallback # tiers from get_next_probe_tier() should stay # in-memory only — persisting them pollutes the # cache with wrong values. compressor._context_probe_persistable = bool( parsed_limit and parsed_limit == new_ctx ) self._vprint(f"{self.log_prefix}⚠️ Context length exceeded — stepping down: {old_ctx:,} → {new_ctx:,} tokens", force=True) else: self._vprint(f"{self.log_prefix}⚠️ Context length exceeded at minimum tier — attempting compression...", force=True) compression_attempts += 1 if compression_attempts > max_compression_attempts: self._vprint(f"{self.log_prefix}❌ Max compression attempts ({max_compression_attempts}) reached.", force=True) self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True) logging.error(f"{self.log_prefix}Context compression failed after {max_compression_attempts} attempts.") self._persist_session(messages, conversation_history) return { "messages": messages, "completed": False, "api_calls": api_call_count, "error": f"Context length exceeded: max compression attempts ({max_compression_attempts}) reached.", "partial": True, "failed": True, "compression_exhausted": True, } self._emit_status(f"🗜️ Context too large (~{approx_tokens:,} tokens) — compressing ({compression_attempts}/{max_compression_attempts})...") original_len = len(messages) messages, active_system_prompt = self._compress_context( messages, system_message, approx_tokens=approx_tokens, task_id=effective_task_id, ) # Compression created a new session — clear history # so _flush_messages_to_session_db writes compressed # messages to the new session, not skipping them. conversation_history = None if len(messages) < original_len or new_ctx and new_ctx < old_ctx: if len(messages) < original_len: self._emit_status(f"🗜️ Compressed {original_len} → {len(messages)} messages, retrying...") time.sleep(2) # Brief pause between compression retries restart_with_compressed_messages = True break else: # Can't compress further and already at minimum tier self._vprint(f"{self.log_prefix}❌ Context length exceeded and cannot compress further.", force=True) self._vprint(f"{self.log_prefix} 💡 The conversation has accumulated too much content. Try /new to start fresh, or /compress to manually trigger compression.", force=True) logging.error(f"{self.log_prefix}Context length exceeded: {approx_tokens:,} tokens. Cannot compress further.") self._persist_session(messages, conversation_history) return { "messages": messages, "completed": False, "api_calls": api_call_count, "error": f"Context length exceeded ({approx_tokens:,} tokens). Cannot compress further.", "partial": True, "failed": True, "compression_exhausted": True, } # Check for non-retryable client errors. The classifier # already accounts for 413, 429, 529 (transient), context # overflow, and generic-400 heuristics. Local validation # errors (ValueError, TypeError) are programming bugs. is_local_validation_error = ( isinstance(api_error, (ValueError, TypeError)) and not isinstance(api_error, UnicodeEncodeError) ) is_client_error = ( is_local_validation_error or ( not classified.retryable and not classified.should_compress and classified.reason not in ( FailoverReason.rate_limit, FailoverReason.billing, FailoverReason.overloaded, FailoverReason.context_overflow, FailoverReason.payload_too_large, FailoverReason.long_context_tier, FailoverReason.thinking_signature, ) ) ) and not is_context_length_error if is_client_error: # Try fallback before aborting — a different provider # may not have the same issue (rate limit, auth, etc.) self._emit_status(f"⚠️ Non-retryable error (HTTP {status_code}) — trying fallback...") if self._try_activate_fallback(): retry_count = 0 compression_attempts = 0 primary_recovery_attempted = False continue if api_kwargs is not None: self._dump_api_request_debug( api_kwargs, reason="non_retryable_client_error", error=api_error, ) self._emit_status( f"❌ Non-retryable error (HTTP {status_code}): " f"{self._summarize_api_error(api_error)}" ) self._vprint(f"{self.log_prefix}❌ Non-retryable client error (HTTP {status_code}). Aborting.", force=True) self._vprint(f"{self.log_prefix} 🔌 Provider: {_provider} Model: {_model}", force=True) self._vprint(f"{self.log_prefix} 🌐 Endpoint: {_base}", force=True) # Actionable guidance for common auth errors if classified.is_auth or classified.reason == FailoverReason.billing: if _provider == "openai-codex" and status_code == 401: self._vprint(f"{self.log_prefix} 💡 Codex OAuth token was rejected (HTTP 401). Your token may have been", force=True) self._vprint(f"{self.log_prefix} refreshed by another client (Codex CLI, VS Code). To fix:", force=True) self._vprint(f"{self.log_prefix} 1. Run `codex` in your terminal to generate fresh tokens.", force=True) self._vprint(f"{self.log_prefix} 2. Then run `hermes auth` to re-authenticate.", force=True) else: self._vprint(f"{self.log_prefix} 💡 Your API key was rejected by the provider. Check:", force=True) self._vprint(f"{self.log_prefix} • Is the key valid? Run: hermes setup", force=True) self._vprint(f"{self.log_prefix} • Does your account have access to {_model}?", force=True) if "openrouter" in str(_base).lower(): self._vprint(f"{self.log_prefix} • Check credits: https://openrouter.ai/settings/credits", force=True) else: self._vprint(f"{self.log_prefix} 💡 This type of error won't be fixed by retrying.", force=True) logging.error(f"{self.log_prefix}Non-retryable client error: {api_error}") # Skip session persistence when the error is likely # context-overflow related (status 400 + large session). # Persisting the failed user message would make the # session even larger, causing the same failure on the # next attempt. (#1630) if status_code == 400 and (approx_tokens > 50000 or len(api_messages) > 80): self._vprint( f"{self.log_prefix}⚠️ Skipping session persistence " f"for large failed session to prevent growth loop.", force=True, ) else: self._persist_session(messages, conversation_history) return { "final_response": None, "messages": messages, "api_calls": api_call_count, "completed": False, "failed": True, "error": str(api_error), } if retry_count >= max_retries: # Before falling back, try rebuilding the primary # client once for transient transport errors (stale # connection pool, TCP reset). Only attempted once # per API call block. if not primary_recovery_attempted and self._try_recover_primary_transport( api_error, retry_count=retry_count, max_retries=max_retries, ): primary_recovery_attempted = True retry_count = 0 continue # Try fallback before giving up entirely self._emit_status(f"⚠️ Max retries ({max_retries}) exhausted — trying fallback...") if self._try_activate_fallback(): retry_count = 0 compression_attempts = 0 primary_recovery_attempted = False continue _final_summary = self._summarize_api_error(api_error) if is_rate_limited: self._emit_status(f"❌ Rate limited after {max_retries} retries — {_final_summary}") else: self._emit_status(f"❌ API failed after {max_retries} retries — {_final_summary}") self._vprint(f"{self.log_prefix} 💀 Final error: {_final_summary}", force=True) # Detect SSE stream-drop pattern (e.g. "Network # connection lost") and surface actionable guidance. # This typically happens when the model generates a # very large tool call (write_file with huge content) # and the proxy/CDN drops the stream mid-response. _is_stream_drop = ( not getattr(api_error, "status_code", None) and any(p in error_msg for p in ( "connection lost", "connection reset", "connection closed", "network connection", "network error", "terminated", )) ) if _is_stream_drop: self._vprint( f"{self.log_prefix} 💡 The provider's stream " f"connection keeps dropping. This often happens " f"when the model tries to write a very large " f"file in a single tool call.", force=True, ) self._vprint( f"{self.log_prefix} Try asking the model " f"to use execute_code with Python's open() for " f"large files, or to write the file in smaller " f"sections.", force=True, ) logging.error( "%sAPI call failed after %s retries. %s | provider=%s model=%s msgs=%s tokens=~%s", self.log_prefix, max_retries, _final_summary, _provider, _model, len(api_messages), f"{approx_tokens:,}", ) if api_kwargs is not None: self._dump_api_request_debug( api_kwargs, reason="max_retries_exhausted", error=api_error, ) self._persist_session(messages, conversation_history) _final_response = f"API call failed after {max_retries} retries: {_final_summary}" if _is_stream_drop: _final_response += ( "\n\nThe provider's stream connection keeps " "dropping — this often happens when generating " "very large tool call responses (e.g. write_file " "with long content). Try asking me to use " "execute_code with Python's open() for large " "files, or to write in smaller sections." ) return { "final_response": _final_response, "messages": messages, "api_calls": api_call_count, "completed": False, "failed": True, "error": _final_summary, } # For rate limits, respect the Retry-After header if present _retry_after = None if is_rate_limited: _resp_headers = getattr(getattr(api_error, "response", None), "headers", None) if _resp_headers and hasattr(_resp_headers, "get"): _ra_raw = _resp_headers.get("retry-after") or _resp_headers.get("Retry-After") if _ra_raw: try: _retry_after = min(int(_ra_raw), 120) # Cap at 2 minutes except (TypeError, ValueError): pass wait_time = _retry_after if _retry_after else jittered_backoff(retry_count, base_delay=2.0, max_delay=60.0) if is_rate_limited: self._emit_status(f"⏱️ Rate limit reached. Waiting {wait_time}s before retry (attempt {retry_count + 1}/{max_retries})...") else: self._emit_status(f"⏳ Retrying in {wait_time}s (attempt {retry_count}/{max_retries})...") logger.warning( "Retrying API call in %ss (attempt %s/%s) %s error=%s", wait_time, retry_count, max_retries, self._client_log_context(), api_error, ) # Sleep in small increments so we can respond to interrupts quickly # instead of blocking the entire wait_time in one sleep() call sleep_end = time.time() + wait_time _backoff_touch_counter = 0 while time.time() < sleep_end: if self._interrupt_requested: self._vprint(f"{self.log_prefix}⚡ Interrupt detected during retry wait, aborting.", force=True) self._persist_session(messages, conversation_history) self.clear_interrupt() return { "final_response": f"Operation interrupted: retrying API call after error (retry {retry_count}/{max_retries}).", "messages": messages, "api_calls": api_call_count, "completed": False, "interrupted": True, } time.sleep(0.2) # Check interrupt every 200ms # Touch activity every ~30s so the gateway's inactivity # monitor knows we're alive during backoff waits. _backoff_touch_counter += 1 if _backoff_touch_counter % 150 == 0: # 150 × 0.2s = 30s self._touch_activity( f"error retry backoff ({retry_count}/{max_retries}), " f"{int(sleep_end - time.time())}s remaining" ) # If the API call was interrupted, skip response processing if interrupted: _turn_exit_reason = "interrupted_during_api_call" break if restart_with_compressed_messages: api_call_count -= 1 self.iteration_budget.refund() # Count compression restarts toward the retry limit to prevent # infinite loops when compression reduces messages but not enough # to fit the context window. retry_count += 1 restart_with_compressed_messages = False continue if restart_with_length_continuation: continue # Guard: if all retries exhausted without a successful response # (e.g. repeated context-length errors that exhausted retry_count), # the `response` variable is still None. Break out cleanly. if response is None: _turn_exit_reason = "all_retries_exhausted_no_response" print(f"{self.log_prefix}❌ All API retries exhausted with no successful response.") self._persist_session(messages, conversation_history) break try: if self.api_mode == "codex_responses": assistant_message, finish_reason = self._normalize_codex_response(response) elif self.api_mode == "anthropic_messages": from agent.anthropic_adapter import normalize_anthropic_response assistant_message, finish_reason = normalize_anthropic_response( response, strip_tool_prefix=self._is_anthropic_oauth ) else: assistant_message = response.choices[0].message # Normalize content to string — some OpenAI-compatible servers # (llama-server, etc.) return content as a dict or list instead # of a plain string, which crashes downstream .strip() calls. if assistant_message.content is not None and not isinstance(assistant_message.content, str): raw = assistant_message.content if isinstance(raw, dict): assistant_message.content = raw.get("text", "") or raw.get("content", "") or json.dumps(raw) elif isinstance(raw, list): # Multimodal content list — extract text parts parts = [] for part in raw: if isinstance(part, str): parts.append(part) elif isinstance(part, dict) and part.get("type") == "text": parts.append(part.get("text", "")) elif isinstance(part, dict) and "text" in part: parts.append(str(part["text"])) assistant_message.content = "\n".join(parts) else: assistant_message.content = str(raw) try: from hermes_cli.plugins import invoke_hook as _invoke_hook _assistant_tool_calls = getattr(assistant_message, "tool_calls", None) or [] _assistant_text = assistant_message.content or "" _invoke_hook( "post_api_request", task_id=effective_task_id, session_id=self.session_id or "", platform=self.platform or "", model=self.model, provider=self.provider, base_url=self.base_url, api_mode=self.api_mode, api_call_count=api_call_count, api_duration=api_duration, finish_reason=finish_reason, message_count=len(api_messages), response_model=getattr(response, "model", None), usage=self._usage_summary_for_api_request_hook(response), assistant_content_chars=len(_assistant_text), assistant_tool_call_count=len(_assistant_tool_calls), ) except Exception: pass # Handle assistant response if assistant_message.content and not self.quiet_mode: if self.verbose_logging: self._vprint(f"{self.log_prefix}🤖 Assistant: {assistant_message.content}") else: self._vprint(f"{self.log_prefix}🤖 Assistant: {assistant_message.content[:100]}{'...' if len(assistant_message.content) > 100 else ''}") # Notify progress callback of model's thinking (used by subagent # delegation to relay the child's reasoning to the parent display). if (assistant_message.content and self.tool_progress_callback): _think_text = assistant_message.content.strip() # Strip reasoning XML tags that shouldn't leak to parent display _think_text = re.sub( r'', '', _think_text ).strip() # For subagents: relay first line to parent display (existing behaviour). # For all agents with a structured callback: emit reasoning.available event. first_line = _think_text.split('\n')[0][:80] if _think_text else "" if first_line and getattr(self, '_delegate_depth', 0) > 0: try: self.tool_progress_callback("_thinking", first_line) except Exception: pass elif _think_text: try: self.tool_progress_callback("reasoning.available", "_thinking", _think_text[:500], None) except Exception: pass # Check for incomplete (opened but never closed) # This means the model ran out of output tokens mid-reasoning — retry up to 2 times if has_incomplete_scratchpad(assistant_message.content or ""): self._incomplete_scratchpad_retries += 1 self._vprint(f"{self.log_prefix}⚠️ Incomplete detected (opened but never closed)") if self._incomplete_scratchpad_retries <= 2: self._vprint(f"{self.log_prefix}🔄 Retrying API call ({self._incomplete_scratchpad_retries}/2)...") # Don't add the broken message, just retry continue else: # Max retries - discard this turn and save as partial self._vprint(f"{self.log_prefix}❌ Max retries (2) for incomplete scratchpad. Saving as partial.", force=True) self._incomplete_scratchpad_retries = 0 rolled_back_messages = self._get_messages_up_to_last_assistant(messages) self._cleanup_task_resources(effective_task_id) self._persist_session(messages, conversation_history) return { "final_response": None, "messages": rolled_back_messages, "api_calls": api_call_count, "completed": False, "partial": True, "error": "Incomplete REASONING_SCRATCHPAD after 2 retries" } # Reset incomplete scratchpad counter on clean response self._incomplete_scratchpad_retries = 0 if self.api_mode == "codex_responses" and finish_reason == "incomplete": self._codex_incomplete_retries += 1 interim_msg = self._build_assistant_message(assistant_message, finish_reason) interim_has_content = bool((interim_msg.get("content") or "").strip()) interim_has_reasoning = bool(interim_msg.get("reasoning", "").strip()) if isinstance(interim_msg.get("reasoning"), str) else False interim_has_codex_reasoning = bool(interim_msg.get("codex_reasoning_items")) if interim_has_content or interim_has_reasoning or interim_has_codex_reasoning: last_msg = messages[-1] if messages else None # Duplicate detection: two consecutive incomplete assistant # messages with identical content AND reasoning are collapsed. # For reasoning-only messages (codex_reasoning_items differ but # visible content/reasoning are both empty), we also compare # the encrypted items to avoid silently dropping new state. last_codex_items = last_msg.get("codex_reasoning_items") if isinstance(last_msg, dict) else None interim_codex_items = interim_msg.get("codex_reasoning_items") duplicate_interim = ( isinstance(last_msg, dict) and last_msg.get("role") == "assistant" and last_msg.get("finish_reason") == "incomplete" and (last_msg.get("content") or "") == (interim_msg.get("content") or "") and (last_msg.get("reasoning") or "") == (interim_msg.get("reasoning") or "") and last_codex_items == interim_codex_items ) if not duplicate_interim: messages.append(interim_msg) self._emit_interim_assistant_message(interim_msg) if self._codex_incomplete_retries < 3: if not self.quiet_mode: self._vprint(f"{self.log_prefix}↻ Codex response incomplete; continuing turn ({self._codex_incomplete_retries}/3)") self._session_messages = messages self._save_session_log(messages) continue self._codex_incomplete_retries = 0 self._persist_session(messages, conversation_history) return { "final_response": None, "messages": messages, "api_calls": api_call_count, "completed": False, "partial": True, "error": "Codex response remained incomplete after 3 continuation attempts", } elif hasattr(self, "_codex_incomplete_retries"): self._codex_incomplete_retries = 0 # Check for tool calls if assistant_message.tool_calls: if not self.quiet_mode: self._vprint(f"{self.log_prefix}🔧 Processing {len(assistant_message.tool_calls)} tool call(s)...") if self.verbose_logging: for tc in assistant_message.tool_calls: logging.debug(f"Tool call: {tc.function.name} with args: {tc.function.arguments[:200]}...") # Validate tool call names - detect model hallucinations # Repair mismatched tool names before validating for tc in assistant_message.tool_calls: if tc.function.name not in self.valid_tool_names: repaired = self._repair_tool_call(tc.function.name) if repaired: print(f"{self.log_prefix}🔧 Auto-repaired tool name: '{tc.function.name}' -> '{repaired}'") tc.function.name = repaired invalid_tool_calls = [ tc.function.name for tc in assistant_message.tool_calls if tc.function.name not in self.valid_tool_names ] if invalid_tool_calls: # Track retries for invalid tool calls self._invalid_tool_retries += 1 # Return helpful error to model — model can self-correct next turn available = ", ".join(sorted(self.valid_tool_names)) invalid_name = invalid_tool_calls[0] invalid_preview = invalid_name[:80] + "..." if len(invalid_name) > 80 else invalid_name self._vprint(f"{self.log_prefix}⚠️ Unknown tool '{invalid_preview}' — sending error to model for self-correction ({self._invalid_tool_retries}/3)") if self._invalid_tool_retries >= 3: self._vprint(f"{self.log_prefix}❌ Max retries (3) for invalid tool calls exceeded. Stopping as partial.", force=True) self._invalid_tool_retries = 0 self._persist_session(messages, conversation_history) return { "final_response": None, "messages": messages, "api_calls": api_call_count, "completed": False, "partial": True, "error": f"Model generated invalid tool call: {invalid_preview}" } assistant_msg = self._build_assistant_message(assistant_message, finish_reason) messages.append(assistant_msg) for tc in assistant_message.tool_calls: if tc.function.name not in self.valid_tool_names: content = f"Tool '{tc.function.name}' does not exist. Available tools: {available}" else: content = "Skipped: another tool call in this turn used an invalid name. Please retry this tool call." messages.append({ "role": "tool", "tool_call_id": tc.id, "content": content, }) continue # Reset retry counter on successful tool call validation self._invalid_tool_retries = 0 # Validate tool call arguments are valid JSON # Handle empty strings as empty objects (common model quirk) invalid_json_args = [] for tc in assistant_message.tool_calls: args = tc.function.arguments if isinstance(args, (dict, list)): tc.function.arguments = json.dumps(args) continue if args is not None and not isinstance(args, str): tc.function.arguments = str(args) args = tc.function.arguments # Treat empty/whitespace strings as empty object if not args or not args.strip(): tc.function.arguments = "{}" continue try: json.loads(args) except json.JSONDecodeError as e: invalid_json_args.append((tc.function.name, str(e))) if invalid_json_args: # Check if the invalid JSON is due to truncation rather # than a model formatting mistake. Routers sometimes # rewrite finish_reason from "length" to "tool_calls", # hiding the truncation from the length handler above. # Detect truncation: args that don't end with } or ] # (after stripping whitespace) are cut off mid-stream. _truncated = any( not (tc.function.arguments or "").rstrip().endswith(("}", "]")) for tc in assistant_message.tool_calls if tc.function.name in {n for n, _ in invalid_json_args} ) if _truncated: self._vprint( f"{self.log_prefix}⚠️ Truncated tool call arguments detected " f"(finish_reason={finish_reason!r}) — refusing to execute.", force=True, ) self._invalid_json_retries = 0 self._cleanup_task_resources(effective_task_id) self._persist_session(messages, conversation_history) return { "final_response": None, "messages": messages, "api_calls": api_call_count, "completed": False, "partial": True, "error": "Response truncated due to output length limit", } # Track retries for invalid JSON arguments self._invalid_json_retries += 1 tool_name, error_msg = invalid_json_args[0] self._vprint(f"{self.log_prefix}⚠️ Invalid JSON in tool call arguments for '{tool_name}': {error_msg}") if self._invalid_json_retries < 3: self._vprint(f"{self.log_prefix}🔄 Retrying API call ({self._invalid_json_retries}/3)...") # Don't add anything to messages, just retry the API call continue else: # Instead of returning partial, inject tool error results so the model can recover. # Using tool results (not user messages) preserves role alternation. self._vprint(f"{self.log_prefix}⚠️ Injecting recovery tool results for invalid JSON...") self._invalid_json_retries = 0 # Reset for next attempt # Append the assistant message with its (broken) tool_calls recovery_assistant = self._build_assistant_message(assistant_message, finish_reason) messages.append(recovery_assistant) # Respond with tool error results for each tool call invalid_names = {name for name, _ in invalid_json_args} for tc in assistant_message.tool_calls: if tc.function.name in invalid_names: err = next(e for n, e in invalid_json_args if n == tc.function.name) tool_result = ( f"Error: Invalid JSON arguments. {err}. " f"For tools with no required parameters, use an empty object: {{}}. " f"Please retry with valid JSON." ) else: tool_result = "Skipped: other tool call in this response had invalid JSON." messages.append({ "role": "tool", "tool_call_id": tc.id, "content": tool_result, }) continue # Reset retry counter on successful JSON validation self._invalid_json_retries = 0 # ── Post-call guardrails ────────────────────────── assistant_message.tool_calls = self._cap_delegate_task_calls( assistant_message.tool_calls ) assistant_message.tool_calls = self._deduplicate_tool_calls( assistant_message.tool_calls ) assistant_msg = self._build_assistant_message(assistant_message, finish_reason) # If this turn has both content AND tool_calls, capture the content # as a fallback final response. Common pattern: model delivers its # answer and calls memory/skill tools as a side-effect in the same # turn. If the follow-up turn after tools is empty, we use this. turn_content = assistant_message.content or "" if turn_content and self._has_content_after_think_block(turn_content): self._last_content_with_tools = turn_content # Only mute subsequent output when EVERY tool call in # this turn is post-response housekeeping (memory, todo, # skill_manage, etc.). If any substantive tool is present # (search_files, read_file, write_file, terminal, ...), # keep output visible so the user sees progress. _HOUSEKEEPING_TOOLS = frozenset({ "memory", "todo", "skill_manage", "session_search", }) _all_housekeeping = all( tc.function.name in _HOUSEKEEPING_TOOLS for tc in assistant_message.tool_calls ) self._last_content_tools_all_housekeeping = _all_housekeeping if _all_housekeeping and self._has_stream_consumers(): self._mute_post_response = True elif self.quiet_mode: clean = self._strip_think_blocks(turn_content).strip() if clean: self._vprint(f" ┊ 💬 {clean}") # Pop thinking-only prefill message(s) before appending # (tool-call path — same rationale as the final-response path). _had_prefill = False while ( messages and isinstance(messages[-1], dict) and messages[-1].get("_thinking_prefill") ): messages.pop() _had_prefill = True # Reset prefill counter when tool calls follow a prefill # recovery. Without this, the counter accumulates across # the whole conversation — a model that intermittently # empties (empty → prefill → tools → empty → prefill → # tools) burns both prefill attempts and the third empty # gets zero recovery. Resetting here treats each tool- # call success as a fresh start. if _had_prefill: self._thinking_prefill_retries = 0 self._empty_content_retries = 0 # Successful tool execution — reset the post-tool nudge # flag so it can fire again if the model goes empty on # a LATER tool round. self._post_tool_empty_retried = False messages.append(assistant_msg) self._emit_interim_assistant_message(assistant_msg) # Close any open streaming display (response box, reasoning # box) before tool execution begins. Intermediate turns may # have streamed early content that opened the response box; # flushing here prevents it from wrapping tool feed lines. # Only signal the display callback — TTS (_stream_callback) # should NOT receive None (it uses None as end-of-stream). if self.stream_delta_callback: try: self.stream_delta_callback(None) except Exception: pass self._execute_tool_calls(assistant_message, messages, effective_task_id, api_call_count) # Reset per-turn retry counters after successful tool # execution so a single truncation doesn't poison the # entire conversation. truncated_tool_call_retries = 0 # Signal that a paragraph break is needed before the next # streamed text. We don't emit it immediately because # multiple consecutive tool iterations would stack up # redundant blank lines. Instead, _fire_stream_delta() # will prepend a single "\n\n" the next time real text # arrives. self._stream_needs_break = True # Refund the iteration if the ONLY tool(s) called were # execute_code (programmatic tool calling). These are # cheap RPC-style calls that shouldn't eat the budget. _tc_names = {tc.function.name for tc in assistant_message.tool_calls} if _tc_names == {"execute_code"}: self.iteration_budget.refund() # Use real token counts from the API response to decide # compression. prompt_tokens + completion_tokens is the # actual context size the provider reported plus the # assistant turn — a tight lower bound for the next prompt. # Tool results appended above aren't counted yet, but the # threshold (default 50%) leaves ample headroom; if tool # results push past it, the next API call will report the # real total and trigger compression then. # # If last_prompt_tokens is 0 (stale after API disconnect # or provider returned no usage data), fall back to rough # estimate to avoid missing compression. Without this, # a session can grow unbounded after disconnects because # should_compress(0) never fires. (#2153) _compressor = self.context_compressor if _compressor.last_prompt_tokens > 0: _real_tokens = ( _compressor.last_prompt_tokens + _compressor.last_completion_tokens ) else: _real_tokens = estimate_messages_tokens_rough(messages) if self.compression_enabled and _compressor.should_compress(_real_tokens): self._safe_print(" ⟳ compacting context…") messages, active_system_prompt = self._compress_context( messages, system_message, approx_tokens=self.context_compressor.last_prompt_tokens, task_id=effective_task_id, ) # Compression created a new session — clear history so # _flush_messages_to_session_db writes compressed messages # to the new session (see preflight compression comment). conversation_history = None # Save session log incrementally (so progress is visible even if interrupted) self._session_messages = messages self._save_session_log(messages) # Continue loop for next response continue else: # No tool calls - this is the final response final_response = assistant_message.content or "" # Fix: unmute output when entering the no-tool-call branch # so the user can see empty-response warnings and recovery # status messages. _mute_post_response was set during a # prior housekeeping tool turn and should not silence the # final response path. self._mute_post_response = False # Check if response only has think block with no actual content after it if not self._has_content_after_think_block(final_response): # ── Partial stream recovery ───────────────────── # If content was already streamed to the user before # the connection died, use it as the final response # instead of falling through to prior-turn fallback # or wasting API calls on retries. _partial_streamed = ( getattr(self, "_current_streamed_assistant_text", "") or "" ) if self._has_content_after_think_block(_partial_streamed): _turn_exit_reason = "partial_stream_recovery" _recovered = self._strip_think_blocks(_partial_streamed).strip() logger.info( "Partial stream content delivered (%d chars) " "— using as final response", len(_recovered), ) self._emit_status( "↻ Stream interrupted — using delivered content " "as final response" ) final_response = _recovered self._response_was_previewed = True break # If the previous turn already delivered real content alongside # HOUSEKEEPING tool calls (e.g. "You're welcome!" + memory save), # the model has nothing more to say. Use the earlier content # immediately instead of wasting API calls on retries. # NOTE: Only use this shortcut when ALL tools in that turn were # housekeeping (memory, todo, etc.). When substantive tools # were called (terminal, search_files, etc.), the content was # likely mid-task narration ("I'll scan the directory...") and # the empty follow-up means the model choked — let the # post-tool nudge below handle that instead of exiting early. fallback = getattr(self, '_last_content_with_tools', None) if fallback and getattr(self, '_last_content_tools_all_housekeeping', False): _turn_exit_reason = "fallback_prior_turn_content" logger.info("Empty follow-up after tool calls — using prior turn content as final response") self._emit_status("↻ Empty response after tool calls — using earlier content as final answer") self._last_content_with_tools = None self._last_content_tools_all_housekeeping = False self._empty_content_retries = 0 # Do NOT modify the assistant message content — the # old code injected "Calling the X tools..." which # poisoned the conversation history. Just use the # fallback text as the final response and break. final_response = self._strip_think_blocks(fallback).strip() self._response_was_previewed = True break # ── Post-tool-call empty response nudge ─────────── # The model returned empty after executing tool calls. # This covers two cases: # (a) No prior-turn content at all — model went silent # (b) Prior turn had content + SUBSTANTIVE tools (the # fallback above was skipped because the content # was mid-task narration, not a final answer) # Instead of giving up, nudge the model to continue by # appending a user-level hint. This is the #9400 case: # weaker models (mimo-v2-pro, GLM-5, etc.) sometimes # return empty after tool results instead of continuing # to the next step. One retry with a nudge usually # fixes it. _prior_was_tool = any( m.get("role") == "tool" for m in messages[-5:] # check recent messages ) if ( _prior_was_tool and not getattr(self, "_post_tool_empty_retried", False) ): self._post_tool_empty_retried = True # Clear stale narration so it doesn't resurface # on a later empty response after the nudge. self._last_content_with_tools = None self._last_content_tools_all_housekeeping = False logger.info( "Empty response after tool calls — nudging model " "to continue processing" ) self._emit_status( "⚠️ Model returned empty after tool calls — " "nudging to continue" ) # Append the empty assistant message first so the # message sequence stays valid: # tool(result) → assistant("(empty)") → user(nudge) # Without this, we'd have tool → user which most # APIs reject as an invalid sequence. _nudge_msg = self._build_assistant_message(assistant_message, finish_reason) _nudge_msg["content"] = "(empty)" messages.append(_nudge_msg) messages.append({ "role": "user", "content": ( "You just executed tool calls but returned an " "empty response. Please process the tool " "results above and continue with the task." ), }) continue # ── Thinking-only prefill continuation ────────── # The model produced structured reasoning (via API # fields) but no visible text content. Rather than # giving up, append the assistant message as-is and # continue — the model will see its own reasoning # on the next turn and produce the text portion. # Inspired by clawdbot's "incomplete-text" recovery. _has_structured = bool( getattr(assistant_message, "reasoning", None) or getattr(assistant_message, "reasoning_content", None) or getattr(assistant_message, "reasoning_details", None) ) if _has_structured and self._thinking_prefill_retries < 2: self._thinking_prefill_retries += 1 logger.info( "Thinking-only response (no visible content) — " "prefilling to continue (%d/2)", self._thinking_prefill_retries, ) self._emit_status( f"↻ Thinking-only response — prefilling to continue " f"({self._thinking_prefill_retries}/2)" ) interim_msg = self._build_assistant_message( assistant_message, "incomplete" ) interim_msg["_thinking_prefill"] = True messages.append(interim_msg) self._session_messages = messages self._save_session_log(messages) continue # ── Empty response retry ────────────────────── # Model returned nothing usable. Retry up to 3 # times before attempting fallback. This covers # both truly empty responses (no content, no # reasoning) AND reasoning-only responses after # prefill exhaustion — models like mimo-v2-pro # always populate reasoning fields via OpenRouter, # so the old `not _has_structured` guard blocked # retries for every reasoning model after prefill. _truly_empty = not self._strip_think_blocks( final_response ).strip() _prefill_exhausted = ( _has_structured and self._thinking_prefill_retries >= 2 ) if _truly_empty and (not _has_structured or _prefill_exhausted) and self._empty_content_retries < 3: self._empty_content_retries += 1 logger.warning( "Empty response (no content or reasoning) — " "retry %d/3 (model=%s)", self._empty_content_retries, self.model, ) self._emit_status( f"⚠️ Empty response from model — retrying " f"({self._empty_content_retries}/3)" ) continue # ── Exhausted retries — try fallback provider ── # Before giving up with "(empty)", attempt to # switch to the next provider in the fallback # chain. This covers the case where a model # (e.g. GLM-4.5-Air) consistently returns empty # due to context degradation or provider issues. if _truly_empty and self._fallback_chain: logger.warning( "Empty response after %d retries — " "attempting fallback (model=%s, provider=%s)", self._empty_content_retries, self.model, self.provider, ) self._emit_status( "⚠️ Model returning empty responses — " "switching to fallback provider..." ) if self._try_activate_fallback(): self._empty_content_retries = 0 self._emit_status( f"↻ Switched to fallback: {self.model} " f"({self.provider})" ) logger.info( "Fallback activated after empty responses: " "now using %s on %s", self.model, self.provider, ) continue # Exhausted retries and fallback chain (or no # fallback configured). Fall through to the # "(empty)" terminal. _turn_exit_reason = "empty_response_exhausted" reasoning_text = self._extract_reasoning(assistant_message) assistant_msg = self._build_assistant_message(assistant_message, finish_reason) assistant_msg["content"] = "(empty)" messages.append(assistant_msg) if reasoning_text: reasoning_preview = reasoning_text[:500] + "..." if len(reasoning_text) > 500 else reasoning_text logger.warning( "Reasoning-only response (no visible content) " "after exhausting retries and fallback. " "Reasoning: %s", reasoning_preview, ) self._emit_status( "⚠️ Model produced reasoning but no visible " "response after all retries. Returning empty." ) else: logger.warning( "Empty response (no content or reasoning) " "after %d retries. No fallback available. " "model=%s provider=%s", self._empty_content_retries, self.model, self.provider, ) self._emit_status( "❌ Model returned no content after all retries" + (" and fallback attempts." if self._fallback_chain else ". No fallback providers configured.") ) final_response = "(empty)" break # Reset retry counter/signature on successful content self._empty_content_retries = 0 self._thinking_prefill_retries = 0 if ( self.api_mode == "codex_responses" and self.valid_tool_names and codex_ack_continuations < 2 and self._looks_like_codex_intermediate_ack( user_message=user_message, assistant_content=final_response, messages=messages, ) ): codex_ack_continuations += 1 interim_msg = self._build_assistant_message(assistant_message, "incomplete") messages.append(interim_msg) self._emit_interim_assistant_message(interim_msg) continue_msg = { "role": "user", "content": ( "[System: Continue now. Execute the required tool calls and only " "send your final answer after completing the task.]" ), } messages.append(continue_msg) self._session_messages = messages self._save_session_log(messages) continue codex_ack_continuations = 0 if truncated_response_prefix: final_response = truncated_response_prefix + final_response truncated_response_prefix = "" length_continue_retries = 0 # Strip blocks from user-facing response (keep raw in messages for trajectory) final_response = self._strip_think_blocks(final_response).strip() final_msg = self._build_assistant_message(assistant_message, finish_reason) # Pop thinking-only prefill message(s) before appending # the final response. This avoids consecutive assistant # messages which break strict-alternation providers # (Anthropic Messages API) and keeps history clean. while ( messages and isinstance(messages[-1], dict) and messages[-1].get("_thinking_prefill") ): messages.pop() messages.append(final_msg) _turn_exit_reason = f"text_response(finish_reason={finish_reason})" if not self.quiet_mode: self._safe_print(f"🎉 Conversation completed after {api_call_count} OpenAI-compatible API call(s)") break except Exception as e: error_msg = f"Error during OpenAI-compatible API call #{api_call_count}: {str(e)}" try: print(f"❌ {error_msg}") except (OSError, ValueError): logger.error(error_msg) logger.debug("Outer loop error in API call #%d", api_call_count, exc_info=True) # If an assistant message with tool_calls was already appended, # the API expects a role="tool" result for every tool_call_id. # Fill in error results for any that weren't answered yet. for idx in range(len(messages) - 1, -1, -1): msg = messages[idx] if not isinstance(msg, dict): break if msg.get("role") == "tool": continue if msg.get("role") == "assistant" and msg.get("tool_calls"): answered_ids = { m["tool_call_id"] for m in messages[idx + 1:] if isinstance(m, dict) and m.get("role") == "tool" } for tc in msg["tool_calls"]: if not tc or not isinstance(tc, dict): continue if tc["id"] not in answered_ids: err_msg = { "role": "tool", "tool_call_id": tc["id"], "content": f"Error executing tool: {error_msg}", } messages.append(err_msg) break # Non-tool errors don't need a synthetic message injected. # The error is already printed to the user (line above), and # the retry loop continues. Injecting a fake user/assistant # message pollutes history, burns tokens, and risks violating # role-alternation invariants. # If we're near the limit, break to avoid infinite loops if api_call_count >= self.max_iterations - 1: _turn_exit_reason = f"error_near_max_iterations({error_msg[:80]})" final_response = f"I apologize, but I encountered repeated errors: {error_msg}" # Append as assistant so the history stays valid for # session resume (avoids consecutive user messages). messages.append({"role": "assistant", "content": final_response}) break if final_response is None and ( api_call_count >= self.max_iterations or self.iteration_budget.remaining <= 0 ): # Budget exhausted — ask the model for a summary via one extra # API call with tools stripped. _handle_max_iterations injects a # user message and makes a single toolless request. _turn_exit_reason = f"max_iterations_reached({api_call_count}/{self.max_iterations})" self._emit_status( f"⚠️ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) " "— asking model to summarise" ) if not self.quiet_mode: self._safe_print( f"\n⚠️ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) " "— requesting summary..." ) final_response = self._handle_max_iterations(messages, api_call_count) # Determine if conversation completed successfully completed = final_response is not None and api_call_count < self.max_iterations # Save trajectory if enabled self._save_trajectory(messages, user_message, completed) # Clean up VM and browser for this task after conversation completes self._cleanup_task_resources(effective_task_id) # Persist session to both JSON log and SQLite self._persist_session(messages, conversation_history) # ── Turn-exit diagnostic log ───────────────────────────────────── # Always logged at INFO so agent.log captures WHY every turn ended. # When the last message is a tool result (agent was mid-work), log # at WARNING — this is the "just stops" scenario users report. _last_msg_role = messages[-1].get("role") if messages else None _last_tool_name = None if _last_msg_role == "tool": # Walk back to find the assistant message with the tool call for _m in reversed(messages): if _m.get("role") == "assistant" and _m.get("tool_calls"): _tcs = _m["tool_calls"] if _tcs and isinstance(_tcs[0], dict): _last_tool_name = _tcs[-1].get("function", {}).get("name") break _turn_tool_count = sum( 1 for m in messages if isinstance(m, dict) and m.get("role") == "assistant" and m.get("tool_calls") ) _resp_len = len(final_response) if final_response else 0 _budget_used = self.iteration_budget.used if self.iteration_budget else 0 _budget_max = self.iteration_budget.max_total if self.iteration_budget else 0 _diag_msg = ( "Turn ended: reason=%s model=%s api_calls=%d/%d budget=%d/%d " "tool_turns=%d last_msg_role=%s response_len=%d session=%s" ) _diag_args = ( _turn_exit_reason, self.model, api_call_count, self.max_iterations, _budget_used, _budget_max, _turn_tool_count, _last_msg_role, _resp_len, self.session_id or "none", ) if _last_msg_role == "tool" and not interrupted: # Agent was mid-work — this is the "just stops" case. logger.warning( "Turn ended with pending tool result (agent may appear stuck). " + _diag_msg + " last_tool=%s", *_diag_args, _last_tool_name, ) else: logger.info(_diag_msg, *_diag_args) # Plugin hook: post_llm_call # Fired once per turn after the tool-calling loop completes. # Plugins can use this to persist conversation data (e.g. sync # to an external memory system). if final_response and not interrupted: try: from hermes_cli.plugins import invoke_hook as _invoke_hook _invoke_hook( "post_llm_call", session_id=self.session_id, user_message=original_user_message, assistant_response=final_response, conversation_history=list(messages), model=self.model, platform=getattr(self, "platform", None) or "", ) except Exception as exc: logger.warning("post_llm_call hook failed: %s", exc) # Extract reasoning from the last assistant message (if any) last_reasoning = None for msg in reversed(messages): if msg.get("role") == "assistant" and msg.get("reasoning"): last_reasoning = msg["reasoning"] break # Build result with interrupt info if applicable result = { "final_response": final_response, "last_reasoning": last_reasoning, "messages": messages, "api_calls": api_call_count, "completed": completed, "partial": False, # True only when stopped due to invalid tool calls "interrupted": interrupted, "response_previewed": getattr(self, "_response_was_previewed", False), "model": self.model, "provider": self.provider, "base_url": self.base_url, "input_tokens": self.session_input_tokens, "output_tokens": self.session_output_tokens, "cache_read_tokens": self.session_cache_read_tokens, "cache_write_tokens": self.session_cache_write_tokens, "reasoning_tokens": self.session_reasoning_tokens, "prompt_tokens": self.session_prompt_tokens, "completion_tokens": self.session_completion_tokens, "total_tokens": self.session_total_tokens, "last_prompt_tokens": getattr(self.context_compressor, "last_prompt_tokens", 0) or 0, "estimated_cost_usd": self.session_estimated_cost_usd, "cost_status": self.session_cost_status, "cost_source": self.session_cost_source, } self._response_was_previewed = False # Include interrupt message if one triggered the interrupt if interrupted and self._interrupt_message: result["interrupt_message"] = self._interrupt_message # Clear interrupt state after handling self.clear_interrupt() # Clear stream callback so it doesn't leak into future calls self._stream_callback = None # Check skill trigger NOW — based on how many tool iterations THIS turn used. _should_review_skills = False if (self._skill_nudge_interval > 0 and self._iters_since_skill >= self._skill_nudge_interval and "skill_manage" in self.valid_tool_names): _should_review_skills = True self._iters_since_skill = 0 # External memory provider: sync the completed turn + queue next prefetch. # Use original_user_message (clean input) — user_message may contain # injected skill content that bloats / breaks provider queries. if self._memory_manager and final_response and original_user_message: try: self._memory_manager.sync_all(original_user_message, final_response) self._memory_manager.queue_prefetch_all(original_user_message) except Exception: pass # Background memory/skill review — runs AFTER the response is delivered # so it never competes with the user's task for model attention. if final_response and not interrupted and (_should_review_memory or _should_review_skills): try: self._spawn_background_review( messages_snapshot=list(messages), review_memory=_should_review_memory, review_skills=_should_review_skills, ) except Exception: pass # Background review is best-effort # Note: Memory provider on_session_end() + shutdown_all() are NOT # called here — run_conversation() is called once per user message in # multi-turn sessions. Shutting down after every turn would kill the # provider before the second message. Actual session-end cleanup is # handled by the CLI (atexit / /reset) and gateway (session expiry / # _reset_session). # Plugin hook: on_session_end # Fired at the very end of every run_conversation call. # Plugins can use this for cleanup, flushing buffers, etc. try: from hermes_cli.plugins import invoke_hook as _invoke_hook _invoke_hook( "on_session_end", session_id=self.session_id, completed=completed, interrupted=interrupted, model=self.model, platform=getattr(self, "platform", None) or "", ) except Exception as exc: logger.warning("on_session_end hook failed: %s", exc) return result def chat(self, message: str, stream_callback: Optional[callable] = None) -> str: """ Simple chat interface that returns just the final response. Args: message (str): User message stream_callback: Optional callback invoked with each text delta during streaming. Returns: str: Final assistant response """ result = self.run_conversation(message, stream_callback=stream_callback) return result["final_response"] def main( query: str = None, model: str = "", api_key: str = None, base_url: str = "", max_turns: int = 10, enabled_toolsets: str = None, disabled_toolsets: str = None, list_tools: bool = False, save_trajectories: bool = False, save_sample: bool = False, verbose: bool = False, log_prefix_chars: int = 20 ): """ Main function for running the agent directly. Args: query (str): Natural language query for the agent. Defaults to Python 3.13 example. model (str): Model name to use (OpenRouter format: provider/model). Defaults to anthropic/claude-sonnet-4.6. api_key (str): API key for authentication. Uses OPENROUTER_API_KEY env var if not provided. base_url (str): Base URL for the model API. Defaults to https://openrouter.ai/api/v1 max_turns (int): Maximum number of API call iterations. Defaults to 10. enabled_toolsets (str): Comma-separated list of toolsets to enable. Supports predefined toolsets (e.g., "research", "development", "safe"). Multiple toolsets can be combined: "web,vision" disabled_toolsets (str): Comma-separated list of toolsets to disable (e.g., "terminal") list_tools (bool): Just list available tools and exit save_trajectories (bool): Save conversation trajectories to JSONL files (appends to trajectory_samples.jsonl). Defaults to False. save_sample (bool): Save a single trajectory sample to a UUID-named JSONL file for inspection. Defaults to False. verbose (bool): Enable verbose logging for debugging. Defaults to False. log_prefix_chars (int): Number of characters to show in log previews for tool calls/responses. Defaults to 20. Toolset Examples: - "research": Web search, extract, crawl + vision tools """ print("🤖 AI Agent with Tool Calling") print("=" * 50) # Handle tool listing if list_tools: from model_tools import get_all_tool_names, get_toolset_for_tool, get_available_toolsets from toolsets import get_all_toolsets, get_toolset_info print("📋 Available Tools & Toolsets:") print("-" * 50) # Show new toolsets system print("\n🎯 Predefined Toolsets (New System):") print("-" * 40) all_toolsets = get_all_toolsets() # Group by category basic_toolsets = [] composite_toolsets = [] scenario_toolsets = [] for name, toolset in all_toolsets.items(): info = get_toolset_info(name) if info: entry = (name, info) if name in ["web", "terminal", "vision", "creative", "reasoning"]: basic_toolsets.append(entry) elif name in ["research", "development", "analysis", "content_creation", "full_stack"]: composite_toolsets.append(entry) else: scenario_toolsets.append(entry) # Print basic toolsets print("\n📌 Basic Toolsets:") for name, info in basic_toolsets: tools_str = ', '.join(info['resolved_tools']) if info['resolved_tools'] else 'none' print(f" • {name:15} - {info['description']}") print(f" Tools: {tools_str}") # Print composite toolsets print("\n📂 Composite Toolsets (built from other toolsets):") for name, info in composite_toolsets: includes_str = ', '.join(info['includes']) if info['includes'] else 'none' print(f" • {name:15} - {info['description']}") print(f" Includes: {includes_str}") print(f" Total tools: {info['tool_count']}") # Print scenario-specific toolsets print("\n🎭 Scenario-Specific Toolsets:") for name, info in scenario_toolsets: print(f" • {name:20} - {info['description']}") print(f" Total tools: {info['tool_count']}") # Show legacy toolset compatibility print("\n📦 Legacy Toolsets (for backward compatibility):") legacy_toolsets = get_available_toolsets() for name, info in legacy_toolsets.items(): status = "✅" if info["available"] else "❌" print(f" {status} {name}: {info['description']}") if not info["available"]: print(f" Requirements: {', '.join(info['requirements'])}") # Show individual tools all_tools = get_all_tool_names() print(f"\n🔧 Individual Tools ({len(all_tools)} available):") for tool_name in sorted(all_tools): toolset = get_toolset_for_tool(tool_name) print(f" 📌 {tool_name} (from {toolset})") print("\n💡 Usage Examples:") print(" # Use predefined toolsets") print(" python run_agent.py --enabled_toolsets=research --query='search for Python news'") print(" python run_agent.py --enabled_toolsets=development --query='debug this code'") print(" python run_agent.py --enabled_toolsets=safe --query='analyze without terminal'") print(" ") print(" # Combine multiple toolsets") print(" python run_agent.py --enabled_toolsets=web,vision --query='analyze website'") print(" ") print(" # Disable toolsets") print(" python run_agent.py --disabled_toolsets=terminal --query='no command execution'") print(" ") print(" # Run with trajectory saving enabled") print(" python run_agent.py --save_trajectories --query='your question here'") return # Parse toolset selection arguments enabled_toolsets_list = None disabled_toolsets_list = None if enabled_toolsets: enabled_toolsets_list = [t.strip() for t in enabled_toolsets.split(",")] print(f"🎯 Enabled toolsets: {enabled_toolsets_list}") if disabled_toolsets: disabled_toolsets_list = [t.strip() for t in disabled_toolsets.split(",")] print(f"🚫 Disabled toolsets: {disabled_toolsets_list}") if save_trajectories: print("💾 Trajectory saving: ENABLED") print(" - Successful conversations → trajectory_samples.jsonl") print(" - Failed conversations → failed_trajectories.jsonl") # Initialize agent with provided parameters try: agent = AIAgent( base_url=base_url, model=model, api_key=api_key, max_iterations=max_turns, enabled_toolsets=enabled_toolsets_list, disabled_toolsets=disabled_toolsets_list, save_trajectories=save_trajectories, verbose_logging=verbose, log_prefix_chars=log_prefix_chars ) except RuntimeError as e: print(f"❌ Failed to initialize agent: {e}") return # Use provided query or default to Python 3.13 example if query is None: user_query = ( "Tell me about the latest developments in Python 3.13 and what new features " "developers should know about. Please search for current information and try it out." ) else: user_query = query print(f"\n📝 User Query: {user_query}") print("\n" + "=" * 50) # Run conversation result = agent.run_conversation(user_query) print("\n" + "=" * 50) print("📋 CONVERSATION SUMMARY") print("=" * 50) print(f"✅ Completed: {result['completed']}") print(f"📞 API Calls: {result['api_calls']}") print(f"💬 Messages: {len(result['messages'])}") if result['final_response']: print("\n🎯 FINAL RESPONSE:") print("-" * 30) print(result['final_response']) # Save sample trajectory to UUID-named file if requested if save_sample: sample_id = str(uuid.uuid4())[:8] sample_filename = f"sample_{sample_id}.json" # Convert messages to trajectory format (same as batch_runner) trajectory = agent._convert_to_trajectory_format( result['messages'], user_query, result['completed'] ) entry = { "conversations": trajectory, "timestamp": datetime.now().isoformat(), "model": model, "completed": result['completed'], "query": user_query } try: with open(sample_filename, "w", encoding="utf-8") as f: # Pretty-print JSON with indent for readability f.write(json.dumps(entry, ensure_ascii=False, indent=2)) print(f"\n💾 Sample trajectory saved to: {sample_filename}") except Exception as e: print(f"\n⚠️ Failed to save sample: {e}") print("\n👋 Agent execution completed!") if __name__ == "__main__": fire.Fire(main)