Merge branch 'main' of github.com:NousResearch/hermes-agent into feat/ink-refactor

This commit is contained in:
Brooklyn Nicholson 2026-04-12 13:18:55 -05:00
commit 2aea75e91e
131 changed files with 12350 additions and 1164 deletions

View file

@ -69,9 +69,7 @@ jobs:
file: Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
nousresearch/hermes-agent:latest
nousresearch/hermes-agent:${{ github.sha }}
tags: nousresearch/hermes-agent:latest
cache-from: type=gha
cache-to: type=gha,mode=max
@ -83,9 +81,6 @@ jobs:
file: Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
nousresearch/hermes-agent:latest
nousresearch/hermes-agent:${{ github.event.release.tag_name }}
nousresearch/hermes-agent:${{ github.sha }}
tags: nousresearch/hermes-agent:${{ github.event.release.tag_name }}
cache-from: type=gha
cache-to: type=gha,mode=max

View file

@ -415,8 +415,9 @@ Cache-breaking forces dramatically higher costs. The ONLY time we alter context
### Background Process Notifications (Gateway)
When `terminal(background=true, check_interval=...)` is used, the gateway runs a watcher that
pushes status updates to the user's chat. Control verbosity with `display.background_process_notifications`
When `terminal(background=true, notify_on_complete=true)` is used, the gateway runs a watcher that
detects process completion and triggers a new agent turn. Control verbosity of background process
messages with `display.background_process_notifications`
in config.yaml (or `HERMES_BACKGROUND_NOTIFICATIONS` env var):
- `all` — running-output updates + final message (default)

View file

@ -1,27 +1,44 @@
FROM ghcr.io/astral-sh/uv:0.11.6-python3.13-trixie@sha256:b3c543b6c4f23a5f2df22866bd7857e5d304b67a564f4feab6ac22044dde719b AS uv_source
FROM tianon/gosu:1.19-trixie@sha256:3b176695959c71e123eb390d427efc665eeb561b1540e82679c15e992006b8b9 AS gosu_source
FROM debian:13.4
# Disable Python stdout buffering to ensure logs are printed immediately
ENV PYTHONUNBUFFERED=1
# Store Playwright browsers outside the volume mount so the build-time
# install survives the /opt/data volume overlay at runtime.
ENV PLAYWRIGHT_BROWSERS_PATH=/opt/hermes/.playwright
# Install system dependencies in one layer, clear APT cache
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential nodejs npm python3 python3-pip ripgrep ffmpeg gcc python3-dev libffi-dev procps && \
build-essential nodejs npm python3 ripgrep ffmpeg gcc python3-dev libffi-dev procps && \
rm -rf /var/lib/apt/lists/*
# Non-root user for runtime; UID can be overridden via HERMES_UID at runtime
RUN useradd -u 10000 -m -d /opt/data hermes
COPY --chmod=0755 --from=gosu_source /gosu /usr/local/bin/
COPY --chmod=0755 --from=uv_source /usr/local/bin/uv /usr/local/bin/uvx /usr/local/bin/
COPY . /opt/hermes
WORKDIR /opt/hermes
# Install Python and Node dependencies in one layer, no cache
RUN pip install --no-cache-dir uv --break-system-packages && \
uv pip install --system --break-system-packages --no-cache -e ".[all]" && \
npm install --prefer-offline --no-audit && \
# Install Node dependencies and Playwright as root (--with-deps needs apt)
RUN npm install --prefer-offline --no-audit && \
npx playwright install --with-deps chromium --only-shell && \
cd /opt/hermes/scripts/whatsapp-bridge && \
npm install --prefer-offline --no-audit && \
npm cache clean --force
WORKDIR /opt/hermes
# Hand ownership to hermes user, then install Python deps in a virtualenv
RUN chown -R hermes:hermes /opt/hermes
USER hermes
RUN uv venv && \
uv pip install --no-cache-dir -e ".[all]"
USER root
RUN chmod +x /opt/hermes/docker/entrypoint.sh
ENV HERMES_HOME=/opt/data

View file

@ -1021,6 +1021,23 @@ _AUTO_PROVIDER_LABELS = {
_AGGREGATOR_PROVIDERS = frozenset({"openrouter", "nous"})
_MAIN_RUNTIME_FIELDS = ("provider", "model", "base_url", "api_key", "api_mode")
def _normalize_main_runtime(main_runtime: Optional[Dict[str, Any]]) -> Dict[str, str]:
"""Return a sanitized copy of a live main-runtime override."""
if not isinstance(main_runtime, dict):
return {}
normalized: Dict[str, str] = {}
for field in _MAIN_RUNTIME_FIELDS:
value = main_runtime.get(field)
if isinstance(value, str) and value.strip():
normalized[field] = value.strip()
provider = normalized.get("provider")
if provider:
normalized["provider"] = provider.lower()
return normalized
def _get_provider_chain() -> List[tuple]:
"""Return the ordered provider detection chain.
@ -1130,7 +1147,7 @@ def _try_payment_fallback(
return None, None, ""
def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]:
def _resolve_auto(main_runtime: Optional[Dict[str, Any]] = None) -> Tuple[Optional[OpenAI], Optional[str]]:
"""Full auto-detection chain.
Priority:
@ -1142,6 +1159,12 @@ def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]:
"""
global auxiliary_is_nous, _stale_base_url_warned
auxiliary_is_nous = False # Reset — _try_nous() will set True if it wins
runtime = _normalize_main_runtime(main_runtime)
runtime_provider = runtime.get("provider", "")
runtime_model = runtime.get("model", "")
runtime_base_url = runtime.get("base_url", "")
runtime_api_key = runtime.get("api_key", "")
runtime_api_mode = runtime.get("api_mode", "")
# ── Warn once if OPENAI_BASE_URL is set but config.yaml uses a named
# provider (not 'custom'). This catches the common "env poisoning"
@ -1149,7 +1172,7 @@ def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]:
# old OPENAI_BASE_URL lingers in ~/.hermes/.env. ──
if not _stale_base_url_warned:
_env_base = os.getenv("OPENAI_BASE_URL", "").strip()
_cfg_provider = _read_main_provider()
_cfg_provider = runtime_provider or _read_main_provider()
if (_env_base and _cfg_provider
and _cfg_provider != "custom"
and not _cfg_provider.startswith("custom:")):
@ -1163,12 +1186,25 @@ def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]:
_stale_base_url_warned = True
# ── Step 1: non-aggregator main provider → use main model directly ──
main_provider = _read_main_provider()
main_model = _read_main_model()
main_provider = runtime_provider or _read_main_provider()
main_model = runtime_model or _read_main_model()
if (main_provider and main_model
and main_provider not in _AGGREGATOR_PROVIDERS
and main_provider not in ("auto", "")):
client, resolved = resolve_provider_client(main_provider, main_model)
resolved_provider = main_provider
explicit_base_url = None
explicit_api_key = None
if runtime_base_url and (main_provider == "custom" or main_provider.startswith("custom:")):
resolved_provider = "custom"
explicit_base_url = runtime_base_url
explicit_api_key = runtime_api_key or None
client, resolved = resolve_provider_client(
resolved_provider,
main_model,
explicit_base_url=explicit_base_url,
explicit_api_key=explicit_api_key,
api_mode=runtime_api_mode or None,
)
if client is not None:
logger.info("Auxiliary auto-detect: using main provider %s (%s)",
main_provider, resolved or main_model)
@ -1249,6 +1285,7 @@ def resolve_provider_client(
explicit_base_url: str = None,
explicit_api_key: str = None,
api_mode: str = None,
main_runtime: Optional[Dict[str, Any]] = None,
) -> Tuple[Optional[Any], Optional[str]]:
"""Central router: given a provider name and optional model, return a
configured client with the correct auth, base URL, and API format.
@ -1319,7 +1356,7 @@ def resolve_provider_client(
# ── Auto: try all providers in priority order ────────────────────
if provider == "auto":
client, resolved = _resolve_auto()
client, resolved = _resolve_auto(main_runtime=main_runtime)
if client is None:
return None, None
# When auto-detection lands on a non-OpenRouter provider (e.g. a
@ -1543,7 +1580,11 @@ def resolve_provider_client(
# ── Public API ──────────────────────────────────────────────────────────────
def get_text_auxiliary_client(task: str = "") -> Tuple[Optional[OpenAI], Optional[str]]:
def get_text_auxiliary_client(
task: str = "",
*,
main_runtime: Optional[Dict[str, Any]] = None,
) -> Tuple[Optional[OpenAI], Optional[str]]:
"""Return (client, default_model_slug) for text-only auxiliary tasks.
Args:
@ -1560,10 +1601,11 @@ def get_text_auxiliary_client(task: str = "") -> Tuple[Optional[OpenAI], Optiona
explicit_base_url=base_url,
explicit_api_key=api_key,
api_mode=api_mode,
main_runtime=main_runtime,
)
def get_async_text_auxiliary_client(task: str = ""):
def get_async_text_auxiliary_client(task: str = "", *, main_runtime: Optional[Dict[str, Any]] = None):
"""Return (async_client, model_slug) for async consumers.
For standard providers returns (AsyncOpenAI, model). For Codex returns
@ -1578,6 +1620,7 @@ def get_async_text_auxiliary_client(task: str = ""):
explicit_base_url=base_url,
explicit_api_key=api_key,
api_mode=api_mode,
main_runtime=main_runtime,
)
@ -1892,6 +1935,7 @@ def _get_cached_client(
base_url: str = None,
api_key: str = None,
api_mode: str = None,
main_runtime: Optional[Dict[str, Any]] = None,
) -> Tuple[Optional[Any], Optional[str]]:
"""Get or create a cached client for the given provider.
@ -1915,7 +1959,9 @@ def _get_cached_client(
loop_id = id(current_loop)
except RuntimeError:
pass
cache_key = (provider, async_mode, base_url or "", api_key or "", api_mode or "", loop_id)
runtime = _normalize_main_runtime(main_runtime)
runtime_key = tuple(runtime.get(field, "") for field in _MAIN_RUNTIME_FIELDS) if provider == "auto" else ()
cache_key = (provider, async_mode, base_url or "", api_key or "", api_mode or "", loop_id, runtime_key)
with _client_cache_lock:
if cache_key in _client_cache:
cached_client, cached_default, cached_loop = _client_cache[cache_key]
@ -1940,6 +1986,7 @@ def _get_cached_client(
explicit_base_url=base_url,
explicit_api_key=api_key,
api_mode=api_mode,
main_runtime=runtime,
)
if client is not None:
# For async clients, remember which loop they were created on so we
@ -2065,6 +2112,75 @@ def _get_task_timeout(task: str, default: float = _DEFAULT_AUX_TIMEOUT) -> float
return default
# ---------------------------------------------------------------------------
# Anthropic-compatible endpoint detection + image block conversion
# ---------------------------------------------------------------------------
# Providers that use Anthropic-compatible endpoints (via OpenAI SDK wrapper).
# Their image content blocks must use Anthropic format, not OpenAI format.
_ANTHROPIC_COMPAT_PROVIDERS = frozenset({"minimax", "minimax-cn"})
def _is_anthropic_compat_endpoint(provider: str, base_url: str) -> bool:
"""Detect if an endpoint expects Anthropic-format content blocks.
Returns True for known Anthropic-compatible providers (MiniMax) and
any endpoint whose URL contains ``/anthropic`` in the path.
"""
if provider in _ANTHROPIC_COMPAT_PROVIDERS:
return True
url_lower = (base_url or "").lower()
return "/anthropic" in url_lower
def _convert_openai_images_to_anthropic(messages: list) -> list:
"""Convert OpenAI ``image_url`` content blocks to Anthropic ``image`` blocks.
Only touches messages that have list-type content with ``image_url`` blocks;
plain text messages pass through unchanged.
"""
converted = []
for msg in messages:
content = msg.get("content")
if not isinstance(content, list):
converted.append(msg)
continue
new_content = []
changed = False
for block in content:
if block.get("type") == "image_url":
image_url_val = (block.get("image_url") or {}).get("url", "")
if image_url_val.startswith("data:"):
# Parse data URI: data:<media_type>;base64,<data>
header, _, b64data = image_url_val.partition(",")
media_type = "image/png"
if ":" in header and ";" in header:
media_type = header.split(":", 1)[1].split(";", 1)[0]
new_content.append({
"type": "image",
"source": {
"type": "base64",
"media_type": media_type,
"data": b64data,
},
})
else:
# URL-based image
new_content.append({
"type": "image",
"source": {
"type": "url",
"url": image_url_val,
},
})
changed = True
else:
new_content.append(block)
converted.append({**msg, "content": new_content} if changed else msg)
return converted
def _build_call_kwargs(
provider: str,
model: str,
@ -2149,6 +2265,7 @@ def call_llm(
model: str = None,
base_url: str = None,
api_key: str = None,
main_runtime: Optional[Dict[str, Any]] = None,
messages: list,
temperature: float = None,
max_tokens: int = None,
@ -2214,6 +2331,7 @@ def call_llm(
base_url=resolved_base_url,
api_key=resolved_api_key,
api_mode=resolved_api_mode,
main_runtime=main_runtime,
)
if client is None:
# When the user explicitly chose a non-OpenRouter provider but no
@ -2234,7 +2352,7 @@ def call_llm(
if not resolved_base_url:
logger.info("Auxiliary %s: provider %s unavailable, trying auto-detection chain",
task or "call", resolved_provider)
client, final_model = _get_cached_client("auto")
client, final_model = _get_cached_client("auto", main_runtime=main_runtime)
if client is None:
raise RuntimeError(
f"No LLM provider configured for task={task} provider={resolved_provider}. "
@ -2255,6 +2373,11 @@ def call_llm(
tools=tools, timeout=effective_timeout, extra_body=extra_body,
base_url=resolved_base_url)
# Convert image blocks for Anthropic-compatible endpoints (e.g. MiniMax)
_client_base = str(getattr(client, "base_url", "") or "")
if _is_anthropic_compat_endpoint(resolved_provider, _client_base):
kwargs["messages"] = _convert_openai_images_to_anthropic(kwargs["messages"])
# Handle max_tokens vs max_completion_tokens retry, then payment fallback.
try:
return _validate_llm_response(
@ -2443,6 +2566,11 @@ async def async_call_llm(
tools=tools, timeout=effective_timeout, extra_body=extra_body,
base_url=resolved_base_url)
# Convert image blocks for Anthropic-compatible endpoints (e.g. MiniMax)
_client_base = str(getattr(client, "base_url", "") or "")
if _is_anthropic_compat_endpoint(resolved_provider, _client_base):
kwargs["messages"] = _convert_openai_images_to_anthropic(kwargs["messages"])
try:
return _validate_llm_response(
await client.chat.completions.create(**kwargs), task)

View file

@ -4,8 +4,12 @@ Self-contained class with its own OpenAI client for summarization.
Uses auxiliary model (cheap/fast) to summarize middle turns while
protecting head and tail context.
Improvements over v1:
- Structured summary template (Goal, Progress, Decisions, Files, Next Steps)
Improvements over v2:
- Structured summary template with Resolved/Pending question tracking
- Summarizer preamble: "Do not respond to any questions" (from OpenCode)
- Handoff framing: "different assistant" (from Codex) to create separation
- "Remaining Work" replaces "Next Steps" to avoid reading as active instructions
- Clear separator when summary merges into tail message
- Iterative summary updates (preserves info across multiple compactions)
- Token-budget tail protection instead of fixed message count
- Tool output pruning before LLM summarization (cheap pre-pass)
@ -20,6 +24,7 @@ from typing import Any, Dict, List, Optional
from agent.auxiliary_client import call_llm
from agent.context_engine import ContextEngine
from agent.model_metadata import (
MINIMUM_CONTEXT_LENGTH,
get_model_context_length,
estimate_messages_tokens_rough,
)
@ -27,12 +32,13 @@ from agent.model_metadata import (
logger = logging.getLogger(__name__)
SUMMARY_PREFIX = (
"[CONTEXT COMPACTION] Earlier turns in this conversation were compacted "
"to save context space. The summary below describes work that was "
"already completed, and the current session state may still reflect "
"that work (for example, files may already be changed). Use the summary "
"and the current state to continue from where things left off, and "
"avoid repeating work:"
"[CONTEXT COMPACTION — REFERENCE ONLY] Earlier turns were compacted "
"into the summary below. This is a handoff from a previous context "
"window — treat it as background reference, NOT as active instructions. "
"Do NOT answer questions or fulfill requests mentioned in this summary; "
"they were already addressed. Respond ONLY to the latest user message "
"that appears AFTER this summary. The current session state (files, "
"config, etc.) may reflect work described here — avoid repeating it:"
)
LEGACY_SUMMARY_PREFIX = "[CONTEXT SUMMARY]:"
@ -80,14 +86,19 @@ class ContextCompressor(ContextEngine):
base_url: str = "",
api_key: str = "",
provider: str = "",
api_mode: str = "",
) -> None:
"""Update model info after a model switch or fallback activation."""
self.model = model
self.base_url = base_url
self.api_key = api_key
self.provider = provider
self.api_mode = api_mode
self.context_length = context_length
self.threshold_tokens = int(context_length * self.threshold_percent)
self.threshold_tokens = max(
int(context_length * self.threshold_percent),
MINIMUM_CONTEXT_LENGTH,
)
def __init__(
self,
@ -102,11 +113,13 @@ class ContextCompressor(ContextEngine):
api_key: str = "",
config_context_length: int | None = None,
provider: str = "",
api_mode: str = "",
):
self.model = model
self.base_url = base_url
self.api_key = api_key
self.provider = provider
self.api_mode = api_mode
self.threshold_percent = threshold_percent
self.protect_first_n = protect_first_n
self.protect_last_n = protect_last_n
@ -118,7 +131,14 @@ class ContextCompressor(ContextEngine):
config_context_length=config_context_length,
provider=provider,
)
self.threshold_tokens = int(self.context_length * threshold_percent)
# Floor: never compress below MINIMUM_CONTEXT_LENGTH tokens even if
# the percentage would suggest a lower value. This prevents premature
# compression on large-context models at 50% while keeping the % sane
# for models right at the minimum.
self.threshold_tokens = max(
int(self.context_length * threshold_percent),
MINIMUM_CONTEXT_LENGTH,
)
self.compression_count = 0
# Derive token budgets: ratio is relative to the threshold, not total context
@ -295,13 +315,20 @@ class ContextCompressor(ContextEngine):
return "\n\n".join(parts)
def _generate_summary(self, turns_to_summarize: List[Dict[str, Any]]) -> Optional[str]:
def _generate_summary(self, turns_to_summarize: List[Dict[str, Any]], focus_topic: str = None) -> Optional[str]:
"""Generate a structured summary of conversation turns.
Uses a structured template (Goal, Progress, Decisions, Files, Next Steps)
inspired by Pi-mono and OpenCode. When a previous summary exists,
Uses a structured template (Goal, Progress, Decisions, Resolved/Pending
Questions, Files, Remaining Work) with explicit preamble telling the
summarizer not to answer questions. When a previous summary exists,
generates an iterative update instead of summarizing from scratch.
Args:
focus_topic: Optional focus string for guided compression. When
provided, the summariser prioritises preserving information
related to this topic and is more aggressive about compressing
everything else. Inspired by Claude Code's ``/compact``.
Returns None if all attempts fail the caller should drop
the middle turns without a summary rather than inject a useless
placeholder.
@ -317,60 +344,27 @@ class ContextCompressor(ContextEngine):
summary_budget = self._compute_summary_budget(turns_to_summarize)
content_to_summarize = self._serialize_for_summary(turns_to_summarize)
if self._previous_summary:
# Iterative update: preserve existing info, add new progress
prompt = f"""You are updating a context compaction summary. A previous compaction produced the summary below. New conversation turns have occurred since then and need to be incorporated.
# Preamble shared by both first-compaction and iterative-update prompts.
# Inspired by OpenCode's "do not respond to any questions" instruction
# and Codex's "another language model" framing.
_summarizer_preamble = (
"You are a summarization agent creating a context checkpoint. "
"Your output will be injected as reference material for a DIFFERENT "
"assistant that continues the conversation. "
"Do NOT respond to any questions or requests in the conversation — "
"only output the structured summary. "
"Do NOT include any preamble, greeting, or prefix."
)
PREVIOUS SUMMARY:
{self._previous_summary}
NEW TURNS TO INCORPORATE:
{content_to_summarize}
Update the summary using this exact structure. PRESERVE all existing information that is still relevant. ADD new progress. Move items from "In Progress" to "Done" when completed. Remove information only if it is clearly obsolete.
## Goal
[What the user is trying to accomplish preserve from previous summary, update if goal evolved]
## Constraints & Preferences
[User preferences, coding style, constraints, important decisions accumulate across compactions]
## Progress
### Done
[Completed work include specific file paths, commands run, results obtained]
### In Progress
[Work currently underway]
### Blocked
[Any blockers or issues encountered]
## Key Decisions
[Important technical decisions and why they were made]
## Relevant Files
[Files read, modified, or created with brief note on each. Accumulate across compactions.]
## Next Steps
[What needs to happen next to continue the work]
## Critical Context
[Any specific values, error messages, configuration details, or data that would be lost without explicit preservation]
## Tools & Patterns
[Which tools were used, how they were used effectively, and any tool-specific discoveries. Accumulate across compactions.]
Target ~{summary_budget} tokens. Be specific include file paths, command outputs, error messages, and concrete values rather than vague descriptions.
Write only the summary body. Do not include any preamble or prefix."""
else:
# First compaction: summarize from scratch
prompt = f"""Create a structured handoff summary for a later assistant that will continue this conversation after earlier turns are compacted.
TURNS TO SUMMARIZE:
{content_to_summarize}
Use this exact structure:
## Goal
# Shared structured template (used by both paths).
# Key changes vs v1:
# - "Pending User Asks" section (from Claude Code) explicitly tracks
# unanswered questions so the model knows what's resolved vs open
# - "Remaining Work" replaces "Next Steps" to avoid reading as active
# instructions
# - "Resolved Questions" makes it clear which questions were already
# answered (prevents model from re-answering them)
_template_sections = f"""## Goal
[What the user is trying to accomplish]
## Constraints & Preferences
@ -387,25 +381,74 @@ Use this exact structure:
## Key Decisions
[Important technical decisions and why they were made]
## Resolved Questions
[Questions the user asked that were ALREADY answered include the answer so the next assistant does not re-answer them]
## Pending User Asks
[Questions or requests from the user that have NOT yet been answered or fulfilled. If none, write "None."]
## Relevant Files
[Files read, modified, or created with brief note on each]
## Next Steps
[What needs to happen next to continue the work]
## Remaining Work
[What remains to be done framed as context, not instructions]
## Critical Context
[Any specific values, error messages, configuration details, or data that would be lost without explicit preservation]
## Tools & Patterns
[Which tools were used, how they were used effectively, and any tool-specific discoveries (e.g., preferred flags, working invocations, successful command patterns)]
[Which tools were used, how they were used effectively, and any tool-specific discoveries]
Target ~{summary_budget} tokens. Be specific include file paths, command outputs, error messages, and concrete values rather than vague descriptions. The goal is to prevent the next assistant from repeating work or losing important details.
Target ~{summary_budget} tokens. Be specific include file paths, command outputs, error messages, and concrete values rather than vague descriptions.
Write only the summary body. Do not include any preamble or prefix."""
if self._previous_summary:
# Iterative update: preserve existing info, add new progress
prompt = f"""{_summarizer_preamble}
You are updating a context compaction summary. A previous compaction produced the summary below. New conversation turns have occurred since then and need to be incorporated.
PREVIOUS SUMMARY:
{self._previous_summary}
NEW TURNS TO INCORPORATE:
{content_to_summarize}
Update the summary using this exact structure. PRESERVE all existing information that is still relevant. ADD new progress. Move items from "In Progress" to "Done" when completed. Move answered questions to "Resolved Questions". Remove information only if it is clearly obsolete.
{_template_sections}"""
else:
# First compaction: summarize from scratch
prompt = f"""{_summarizer_preamble}
Create a structured handoff summary for a different assistant that will continue this conversation after earlier turns are compacted. The next assistant should be able to understand what happened without re-reading the original turns.
TURNS TO SUMMARIZE:
{content_to_summarize}
Use this exact structure:
{_template_sections}"""
# Inject focus topic guidance when the user provides one via /compress <focus>.
# This goes at the end of the prompt so it takes precedence.
if focus_topic:
prompt += f"""
FOCUS TOPIC: "{focus_topic}"
The user has requested that this compaction PRIORITISE preserving all information related to the focus topic above. For content related to "{focus_topic}", include full detail exact values, file paths, command outputs, error messages, and decisions. For content NOT related to the focus topic, summarise more aggressively (brief one-liners or omit if truly irrelevant). The focus topic sections should receive roughly 60-70% of the summary token budget."""
try:
call_kwargs = {
"task": "compression",
"main_runtime": {
"model": self.model,
"provider": self.provider,
"base_url": self.base_url,
"api_key": self.api_key,
"api_mode": self.api_mode,
},
"messages": [{"role": "user", "content": prompt}],
"max_tokens": summary_budget * 2,
# timeout resolved from auxiliary.compression.timeout config by call_llm
@ -620,7 +663,7 @@ Write only the summary body. Do not include any preamble or prefix."""
# Main compression entry point
# ------------------------------------------------------------------
def compress(self, messages: List[Dict[str, Any]], current_tokens: int = None) -> List[Dict[str, Any]]:
def compress(self, messages: List[Dict[str, Any]], current_tokens: int = None, focus_topic: str = None) -> List[Dict[str, Any]]:
"""Compress conversation messages by summarizing middle turns.
Algorithm:
@ -632,6 +675,12 @@ Write only the summary body. Do not include any preamble or prefix."""
After compression, orphaned tool_call / tool_result pairs are cleaned
up so the API never receives mismatched IDs.
Args:
focus_topic: Optional focus string for guided compression. When
provided, the summariser will prioritise preserving information
related to this topic and be more aggressive about compressing
everything else. Inspired by Claude Code's ``/compact``.
"""
n_messages = len(messages)
# Only need head + 3 tail messages minimum (token budget decides the real tail size)
@ -689,7 +738,7 @@ Write only the summary body. Do not include any preamble or prefix."""
)
# Phase 3: Generate structured summary
summary = self._generate_summary(turns_to_summarize)
summary = self._generate_summary(turns_to_summarize, focus_topic=focus_topic)
# Phase 4: Assemble compressed message list
compressed = []
@ -744,7 +793,12 @@ Write only the summary body. Do not include any preamble or prefix."""
msg = messages[i].copy()
if _merge_summary_into_tail and i == compress_end:
original = msg.get("content") or ""
msg["content"] = summary + "\n\n" + original
msg["content"] = (
summary
+ "\n\n--- END OF CONTEXT SUMMARY — "
"respond to the message below, not the summary above ---\n\n"
+ original
)
_merge_summary_into_tail = False
compressed.append(msg)

View file

@ -24,6 +24,7 @@ from hermes_cli.auth import (
_codex_access_token_is_expiring,
_decode_jwt_claims,
_import_codex_cli_tokens,
_write_codex_cli_tokens,
_load_auth_store,
_load_provider_state,
_resolve_kimi_base_url,
@ -693,6 +694,14 @@ class CredentialPool:
self._replace_entry(synced, updated)
self._persist()
self._sync_device_code_entry_to_auth_store(updated)
try:
_write_codex_cli_tokens(
updated.access_token,
updated.refresh_token,
last_refresh=updated.last_refresh,
)
except Exception as wexc:
logger.debug("Failed to write refreshed Codex tokens to CLI file (retry): %s", wexc)
return updated
except Exception as retry_exc:
logger.debug("Codex retry refresh also failed: %s", retry_exc)
@ -718,6 +727,17 @@ class CredentialPool:
# _seed_from_singletons() on the next load_pool() sees fresh state
# instead of re-seeding stale/consumed tokens.
self._sync_device_code_entry_to_auth_store(updated)
# Write refreshed tokens back to ~/.codex/auth.json so Codex CLI
# and VS Code don't hit "refresh_token_reused" on their next refresh.
if self.provider == "openai-codex":
try:
_write_codex_cli_tokens(
updated.access_token,
updated.refresh_token,
last_refresh=updated.last_refresh,
)
except Exception as wexc:
logger.debug("Failed to write refreshed Codex tokens to CLI file: %s", wexc)
return updated
def _entry_needs_refresh(self, entry: PooledCredential) -> bool:
@ -1128,6 +1148,23 @@ def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tup
elif provider == "openai-codex":
state = _load_provider_state(auth_store, "openai-codex")
tokens = state.get("tokens") if isinstance(state, dict) else None
# Fallback: import from Codex CLI (~/.codex/auth.json) if Hermes auth
# store has no tokens. This mirrors resolve_codex_runtime_credentials()
# so that load_pool() and list_authenticated_providers() detect tokens
# that only exist in the Codex CLI shared file.
if not (isinstance(tokens, dict) and tokens.get("access_token")):
try:
from hermes_cli.auth import _import_codex_cli_tokens, _save_codex_tokens
cli_tokens = _import_codex_cli_tokens()
if cli_tokens:
logger.info("Importing Codex CLI tokens into Hermes auth store.")
_save_codex_tokens(cli_tokens)
# Re-read state after import
auth_store = _load_auth_store()
state = _load_provider_state(auth_store, "openai-codex")
tokens = state.get("tokens") if isinstance(state, dict) else None
except Exception as exc:
logger.debug("Codex CLI token import failed: %s", exc)
if isinstance(tokens, dict) and tokens.get("access_token"):
active_sources.add("device_code")
changed |= _upsert_entry(

View file

@ -85,6 +85,11 @@ CONTEXT_PROBE_TIERS = [
# Default context length when no detection method succeeds.
DEFAULT_FALLBACK_CONTEXT = CONTEXT_PROBE_TIERS[0]
# Minimum context length required to run Hermes Agent. Models with fewer
# tokens cannot maintain enough working memory for tool-calling workflows.
# Sessions, model switches, and cron jobs should reject models below this.
MINIMUM_CONTEXT_LENGTH = 64_000
# Thin fallback defaults — only broad model family patterns.
# These fire only when provider is unknown AND models.dev/OpenRouter/Anthropic
# all miss. Replaced the previous 80+ entry dict.
@ -1040,16 +1045,21 @@ def get_model_context_length(
def estimate_tokens_rough(text: str) -> int:
"""Rough token estimate (~4 chars/token) for pre-flight checks."""
"""Rough token estimate (~4 chars/token) for pre-flight checks.
Uses ceiling division so short texts (1-3 chars) never estimate as
0 tokens, which would cause the compressor and pre-flight checks to
systematically undercount when many short tool results are present.
"""
if not text:
return 0
return len(text) // 4
return (len(text) + 3) // 4
def estimate_messages_tokens_rough(messages: List[Dict[str, Any]]) -> int:
"""Rough token estimate for a message list (pre-flight only)."""
total_chars = sum(len(str(msg)) for msg in messages)
return total_chars // 4
return (total_chars + 3) // 4
def estimate_request_tokens_rough(
@ -1072,4 +1082,4 @@ def estimate_request_tokens_rough(
total_chars += sum(len(str(msg)) for msg in messages)
if tools:
total_chars += len(str(tools))
return total_chars // 4
return (total_chars + 3) // 4

View file

@ -144,6 +144,8 @@ class ProviderInfo:
PROVIDER_TO_MODELS_DEV: Dict[str, str] = {
"openrouter": "openrouter",
"anthropic": "anthropic",
"openai": "openai",
"openai-codex": "openai",
"zai": "zai",
"kimi-coding": "kimi-for-coding",
"minimax": "minimax",

View file

@ -12,7 +12,7 @@ import threading
from collections import OrderedDict
from pathlib import Path
from hermes_constants import get_hermes_home, get_skills_dir
from hermes_constants import get_hermes_home, get_skills_dir, is_wsl
from typing import Optional
from agent.skill_utils import (
@ -366,6 +366,36 @@ PLATFORM_HINTS = {
),
}
# ---------------------------------------------------------------------------
# Environment hints — execution-environment awareness for the agent.
# Unlike PLATFORM_HINTS (which describe the messaging channel), these describe
# the machine/OS the agent's tools actually run on.
# ---------------------------------------------------------------------------
WSL_ENVIRONMENT_HINT = (
"You are running inside WSL (Windows Subsystem for Linux). "
"The Windows host filesystem is mounted under /mnt/ — "
"/mnt/c/ is the C: drive, /mnt/d/ is D:, etc. "
"The user's Windows files are typically at "
"/mnt/c/Users/<username>/Desktop/, Documents/, Downloads/, etc. "
"When the user references Windows paths or desktop files, translate "
"to the /mnt/c/ equivalent. You can list /mnt/c/Users/ to discover "
"the Windows username if needed."
)
def build_environment_hints() -> str:
"""Return environment-specific guidance for the system prompt.
Detects WSL, and can be extended for Termux, Docker, etc.
Returns an empty string when no special environment is detected.
"""
hints: list[str] = []
if is_wsl():
hints.append(WSL_ENVIRONMENT_HINT)
return "\n\n".join(hints)
CONTEXT_FILE_MAX_CHARS = 20_000
CONTEXT_TRUNCATE_HEAD_RATIO = 0.7
CONTEXT_TRUNCATE_TAIL_RATIO = 0.2
@ -726,8 +756,16 @@ def build_skills_system_prompt(
result = (
"## Skills (mandatory)\n"
"Before replying, scan the skills below. If one clearly matches your task, "
"load it with skill_view(name) and follow its instructions. "
"Before replying, scan the skills below. If a skill matches or is even partially relevant "
"to your task, you MUST load it with skill_view(name) and follow its instructions. "
"Err on the side of loading — it is always better to have context you don't need "
"than to miss critical steps, pitfalls, or established workflows. "
"Skills contain specialized knowledge — API endpoints, tool-specific commands, "
"and proven workflows that outperform general-purpose approaches. Load the skill "
"even if you think you could handle the task with basic tools like web_search or terminal. "
"Skills also encode the user's preferred approach, conventions, and quality standards "
"for tasks like code review, planning, and testing — load them even for tasks you "
"already know how to do, because the skill defines how it should be done here.\n"
"If a skill has issues, fix it with skill_manage(action='patch').\n"
"After difficult/iterative tasks, offer to save as a skill. "
"If a skill you loaded was missing steps, had wrong commands, or needed "
@ -737,7 +775,7 @@ def build_skills_system_prompt(
+ "\n".join(index_lines) + "\n"
"</available_skills>\n"
"\n"
"If none match, proceed normally without loading a skill."
"Only proceed without loading a skill if genuinely none are relevant to the task."
)
# ── Store in LRU cache ────────────────────────────────────────────

View file

@ -36,7 +36,7 @@ def generate_title(user_message: str, assistant_response: str, timeout: float =
try:
response = call_llm(
task="compression", # reuse compression task config (cheap/fast model)
task="title_generation",
messages=messages,
max_tokens=30,
temperature=0.3,

View file

@ -774,6 +774,11 @@ display:
# Toggle at runtime with /verbose in the CLI
tool_progress: all
# Gateway-only natural mid-turn assistant updates.
# When true, completed assistant status messages are sent as separate chat
# messages. This is independent of tool_progress and gateway streaming.
interim_assistant_messages: true
# What Enter does when Hermes is already busy in the CLI.
# interrupt: Interrupt the current run and redirect Hermes (default)
# queue: Queue your message for the next turn
@ -782,7 +787,7 @@ display:
# Background process notifications (gateway/messaging only).
# Controls how chatty the process watcher is when you use
# terminal(background=true, check_interval=...) from Telegram/Discord/etc.
# terminal(background=true, notify_on_complete=true) from Telegram/Discord/etc.
# off: No watcher messages at all
# result: Only the final completion message
# error: Only the final message when exit code != 0

614
cli.py
View file

@ -1393,6 +1393,19 @@ class ChatConsole:
for line in output.rstrip("\n").split("\n"):
_cprint(line)
@contextmanager
def status(self, *_args, **_kwargs):
"""Provide a no-op Rich-compatible status context.
Some slash command helpers use ``console.status(...)`` when running in
the standalone CLI. Interactive chat routes those helpers through
``ChatConsole()``, which historically only implemented ``print()``.
Returning a silent context manager keeps slash commands compatible
without duplicating the higher-level busy indicator already shown by
``HermesCLI._busy_command()``.
"""
yield self
# ASCII Art - HERMES-AGENT logo (full width, single line - requires ~95 char terminal)
HERMES_AGENT_LOGO = """[bold #FFD700]██╗ ██╗███████╗██████╗ ███╗ ███╗███████╗███████╗ █████╗ ██████╗ ███████╗███╗ ██╗████████╗[/]
[bold #FFD700]██║ ██║██╔════╝██╔══██╗████╗ ████║██╔════╝██╔════╝ ██╔══██╗██╔════╝ ██╔════╝████╗ ██║╚══██╔══╝[/]
@ -1842,10 +1855,13 @@ class HermesCLI:
self._approval_state = None
self._approval_deadline = 0
self._approval_lock = threading.Lock()
self._model_picker_state = None
self._secret_state = None
self._secret_deadline = 0
self._spinner_text: str = "" # thinking spinner text for TUI
self._tool_start_time: float = 0.0 # monotonic timestamp when current tool started (for live elapsed)
self._pending_tool_info: dict = {} # function_name -> list of (preview, args) for stacked scrollback
self._last_scrollback_tool: str = "" # last tool name printed to scrollback (for "new" dedup)
self._command_running = False
self._command_status = ""
self._attached_images: list[Path] = []
@ -2084,7 +2100,7 @@ class HermesCLI:
return f"{self.model if getattr(self, 'model', None) else 'Hermes'}"
def _get_status_bar_fragments(self):
if not self._status_bar_visible:
if not self._status_bar_visible or getattr(self, '_model_picker_state', None):
return []
try:
snapshot = self._get_status_bar_snapshot()
@ -2757,6 +2773,22 @@ class HermesCLI:
if runtime_model and isinstance(runtime_model, str):
self.model = runtime_model
# If model is still empty (e.g. user ran `hermes auth add openai-codex`
# without `hermes model`), fall back to the provider's first catalog
# model so the API call doesn't fail with "model must be non-empty".
if not self.model and resolved_provider:
try:
from hermes_cli.models import get_default_model_for_provider
_default = get_default_model_for_provider(resolved_provider)
if _default:
self.model = _default
logger.info(
"No model configured — defaulting to %s for provider %s",
_default, resolved_provider,
)
except Exception:
pass
# Normalize model for the resolved provider (e.g. swap non-Codex
# models when provider is openai-codex). Fixes #651.
model_changed = self._normalize_model_for_provider(resolved_provider)
@ -4376,6 +4408,265 @@ class HermesCLI:
remaining = len(self.conversation_history)
print(f" {remaining} message(s) remaining in history.")
def _run_curses_picker(self, title: str, items: list[str], default_index: int = 0) -> int | None:
"""Run curses_single_select via run_in_terminal so prompt_toolkit handles terminal ownership cleanly."""
import threading
from hermes_cli.curses_ui import curses_single_select
result = [None]
def _pick():
result[0] = curses_single_select(title, items, default_index=default_index)
# run_in_terminal requires an asyncio event loop — only exists in the
# main prompt_toolkit thread. If we're in a background thread (e.g.
# process_loop), fall back to direct curses call.
in_main_thread = threading.current_thread() is threading.main_thread()
if self._app and in_main_thread:
from prompt_toolkit.application import run_in_terminal
was_visible = self._status_bar_visible
self._status_bar_visible = False
self._app.invalidate()
try:
run_in_terminal(_pick)
finally:
self._status_bar_visible = was_visible
self._app.invalidate()
else:
_pick()
return result[0]
def _prompt_text_input(self, prompt_text: str) -> str | None:
"""Prompt for free-text input safely inside or outside prompt_toolkit."""
result = [None]
def _ask():
try:
result[0] = input(prompt_text).strip() or None
except (KeyboardInterrupt, EOFError):
pass
if self._app:
from prompt_toolkit.application import run_in_terminal
was_visible = self._status_bar_visible
self._status_bar_visible = False
self._app.invalidate()
try:
run_in_terminal(_ask)
finally:
self._status_bar_visible = was_visible
self._app.invalidate()
else:
_ask()
return result[0]
def _interactive_provider_selection(
self, providers: list, current_model: str, current_provider: str
) -> str | None:
"""Show provider picker, return slug or None on cancel."""
choices = []
for p in providers:
count = p.get("total_models", len(p.get("models", [])))
label = f"{p['name']} ({count} model{'s' if count != 1 else ''})"
if p.get("is_current"):
label += " ← current"
choices.append(label)
default_idx = next(
(i for i, p in enumerate(providers) if p.get("is_current")), 0
)
idx = self._run_curses_picker(
f"Select a provider (current: {current_model} on {current_provider}):",
choices,
default_index=default_idx,
)
if idx is None:
return None
return providers[idx]["slug"]
def _interactive_model_selection(
self, model_list: list, provider_data: dict
) -> str | None:
"""Show model picker for a given provider, return model_id or None on cancel."""
pname = provider_data.get("name", provider_data.get("slug", ""))
total = provider_data.get("total_models", len(model_list))
if not model_list:
_cprint(f"\n No models listed for {pname}.")
return self._prompt_text_input(" Enter model name manually (or Enter to cancel): ")
choices = list(model_list) + ["Enter custom model name"]
idx = self._run_curses_picker(
f"Select model from {pname} ({len(model_list)} of {total}):",
choices,
)
if idx is None:
return None
if idx < len(model_list):
return model_list[idx]
return self._prompt_text_input(" Enter model name: ")
def _open_model_picker(self, providers: list, current_model: str, current_provider: str, user_provs=None, custom_provs=None) -> None:
"""Open prompt_toolkit-native /model picker modal."""
self._capture_modal_input_snapshot()
default_idx = next((i for i, p in enumerate(providers) if p.get("is_current")), 0)
self._model_picker_state = {
"stage": "provider",
"providers": providers,
"selected": default_idx,
"current_model": current_model,
"current_provider": current_provider,
"user_provs": user_provs,
"custom_provs": custom_provs,
}
self._invalidate(min_interval=0.0)
def _close_model_picker(self) -> None:
self._model_picker_state = None
self._restore_modal_input_snapshot()
self._invalidate(min_interval=0.0)
def _apply_model_switch_result(self, result, persist_global: bool) -> None:
if not result.success:
_cprint(f"{result.error_message}")
return
old_model = self.model
self.model = result.new_model
self.provider = result.target_provider
self.requested_provider = result.target_provider
if result.api_key:
self.api_key = result.api_key
self._explicit_api_key = result.api_key
if result.base_url:
self.base_url = result.base_url
self._explicit_base_url = result.base_url
if result.api_mode:
self.api_mode = result.api_mode
if self.agent is not None:
try:
self.agent.switch_model(
new_model=result.new_model,
new_provider=result.target_provider,
api_key=result.api_key,
base_url=result.base_url,
api_mode=result.api_mode,
)
except Exception as exc:
_cprint(f" ⚠ Agent swap failed ({exc}); change applied to next session.")
self._pending_model_switch_note = (
f"[Note: model was just switched from {old_model} to {result.new_model} "
f"via {result.provider_label or result.target_provider}. "
f"Adjust your self-identification accordingly.]"
)
provider_label = result.provider_label or result.target_provider
_cprint(f" ✓ Model switched: {result.new_model}")
_cprint(f" Provider: {provider_label}")
mi = result.model_info
if mi:
if mi.context_window:
_cprint(f" Context: {mi.context_window:,} tokens")
if mi.max_output:
_cprint(f" Max output: {mi.max_output:,} tokens")
if mi.has_cost_data():
_cprint(f" Cost: {mi.format_cost()}")
_cprint(f" Capabilities: {mi.format_capabilities()}")
else:
try:
from agent.model_metadata import get_model_context_length
ctx = get_model_context_length(
result.new_model,
base_url=result.base_url or self.base_url,
api_key=result.api_key or self.api_key,
provider=result.target_provider,
)
_cprint(f" Context: {ctx:,} tokens")
except Exception:
pass
cache_enabled = (
("openrouter" in (result.base_url or "").lower() and "claude" in result.new_model.lower())
or result.api_mode == "anthropic_messages"
)
if cache_enabled:
_cprint(" Prompt caching: enabled")
if result.warning_message:
_cprint(f"{result.warning_message}")
if persist_global:
save_config_value("model.default", result.new_model)
if result.provider_changed:
save_config_value("model.provider", result.target_provider)
_cprint(" Saved to config.yaml (--global)")
else:
_cprint(" (session only — add --global to persist)")
def _handle_model_picker_selection(self, persist_global: bool = False) -> None:
state = self._model_picker_state
if not state:
return
selected = state.get("selected", 0)
stage = state.get("stage")
if stage == "provider":
providers = state.get("providers") or []
if selected >= len(providers):
self._close_model_picker()
return
provider_data = providers[selected]
model_list = []
try:
from hermes_cli.models import provider_model_ids
live = provider_model_ids(provider_data["slug"])
if live:
model_list = live
except Exception:
pass
if not model_list:
model_list = provider_data.get("models", [])
state["stage"] = "model"
state["provider_data"] = provider_data
state["model_list"] = model_list
state["selected"] = 0
self._invalidate(min_interval=0.0)
return
if stage == "model":
provider_data = state.get("provider_data") or {}
model_list = state.get("model_list") or []
back_idx = len(model_list)
cancel_idx = len(model_list) + 1
if selected == back_idx:
state["stage"] = "provider"
state["selected"] = next((i for i, p in enumerate(state.get("providers") or []) if p.get("slug") == provider_data.get("slug")), 0)
self._invalidate(min_interval=0.0)
return
if selected >= cancel_idx:
self._close_model_picker()
return
if selected < len(model_list):
from hermes_cli.model_switch import switch_model
chosen_model = model_list[selected]
result = switch_model(
raw_input=chosen_model,
current_provider=self.provider or "",
current_model=self.model or "",
current_base_url=self.base_url or "",
current_api_key=self.api_key or "",
is_global=persist_global,
explicit_provider=provider_data.get("slug"),
user_providers=state.get("user_provs"),
custom_providers=state.get("custom_provs"),
)
self._close_model_picker()
self._apply_model_switch_result(result, persist_global)
return
self._close_model_picker()
def _handle_model_switch(self, cmd_original: str):
"""Handle /model command — switch model for this session.
@ -4398,56 +4689,46 @@ class HermesCLI:
user_provs = None
custom_provs = None
try:
from hermes_cli.config import load_config
cfg = load_config()
user_provs = cfg.get("providers")
custom_provs = cfg.get("custom_providers")
except Exception:
pass
# No args at all: show available providers + models
# No args at all: open prompt_toolkit-native picker modal
if not model_input and not explicit_provider:
model_display = self.model or "unknown"
provider_display = get_label(self.provider) if self.provider else "unknown"
_cprint(f" Current: {model_display} on {provider_display}")
_cprint("")
# Show authenticated providers with top models
user_provs = None
custom_provs = None
try:
from hermes_cli.config import load_config
cfg = load_config()
user_provs = cfg.get("providers")
custom_provs = cfg.get("custom_providers")
except Exception:
pass
try:
providers = list_authenticated_providers(
current_provider=self.provider or "",
user_providers=user_provs,
custom_providers=custom_provs,
max_models=6,
max_models=50,
)
if providers:
for p in providers:
tag = " (current)" if p["is_current"] else ""
_cprint(f" {p['name']} [--provider {p['slug']}]{tag}:")
if p["models"]:
model_strs = ", ".join(p["models"])
extra = f" (+{p['total_models'] - len(p['models'])} more)" if p["total_models"] > len(p["models"]) else ""
_cprint(f" {model_strs}{extra}")
elif p.get("api_url"):
_cprint(f" {p['api_url']} (use /model <name> --provider {p['slug']})")
else:
_cprint(f" (no models listed)")
_cprint("")
else:
_cprint(" No authenticated providers found.")
_cprint("")
except Exception:
pass
providers = []
# Aliases
from hermes_cli.model_switch import MODEL_ALIASES
alias_list = ", ".join(sorted(MODEL_ALIASES.keys()))
_cprint(f" Aliases: {alias_list}")
_cprint("")
_cprint(" /model <name> switch model")
_cprint(" /model <name> --provider <slug> switch provider")
_cprint(" /model <name> --global persist to config")
if not providers:
_cprint(" No authenticated providers found.")
_cprint("")
_cprint(" /model <name> switch model")
_cprint(" /model --provider <slug> switch provider")
return
self._open_model_picker(
providers,
model_display,
provider_display,
user_provs=user_provs,
custom_provs=custom_provs,
)
return
# Perform the switch
@ -4555,6 +4836,18 @@ class HermesCLI:
else:
_cprint(" (session only — add --global to persist)")
def _should_handle_model_command_inline(self, text: str, has_images: bool = False) -> bool:
"""Return True when /model should be handled immediately on the UI thread."""
if not text or has_images or not _looks_like_slash_command(text):
return False
try:
from hermes_cli.commands import resolve_command
base = text.split(None, 1)[0].lower().lstrip('/')
cmd = resolve_command(base)
return bool(cmd and cmd.name == "model")
except Exception:
return False
def _show_model_and_providers(self):
"""Show current model + provider and list all authenticated providers.
@ -5065,9 +5358,33 @@ class HermesCLI:
context_length=ctx_len,
)
_cprint(" ✨ (◕‿◕)✨ Fresh start! Screen cleared and conversation reset.\n")
# Show a random tip on new session
try:
from hermes_cli.tips import get_random_tip
_tip = get_random_tip()
try:
from hermes_cli.skin_engine import get_active_skin
_tip_color = get_active_skin().get_color("banner_dim", "#B8860B")
except Exception:
_tip_color = "#B8860B"
cc.print(f"[dim {_tip_color}]✦ Tip: {_tip}[/]")
except Exception:
pass
else:
self.show_banner()
print(" ✨ (◕‿◕)✨ Fresh start! Screen cleared and conversation reset.\n")
# Show a random tip on new session
try:
from hermes_cli.tips import get_random_tip
_tip = get_random_tip()
try:
from hermes_cli.skin_engine import get_active_skin
_tip_color = get_active_skin().get_color("banner_dim", "#B8860B")
except Exception:
_tip_color = "#B8860B"
self.console.print(f"[dim {_tip_color}]✦ Tip: {_tip}[/]")
except Exception:
pass
elif canonical == "history":
self.show_history()
elif canonical == "title":
@ -5167,7 +5484,7 @@ class HermesCLI:
elif canonical == "fast":
self._handle_fast_command(cmd_original)
elif canonical == "compress":
self._manual_compress()
self._manual_compress(cmd_original)
elif canonical == "usage":
self._show_usage()
elif canonical == "insights":
@ -6028,8 +6345,14 @@ class HermesCLI:
self._reasoning_preview_buf = getattr(self, "_reasoning_preview_buf", "") + reasoning_text
self._flush_reasoning_preview(force=False)
def _manual_compress(self):
"""Manually trigger context compression on the current conversation."""
def _manual_compress(self, cmd_original: str = ""):
"""Manually trigger context compression on the current conversation.
Accepts an optional focus topic: ``/compress <focus>`` guides the
summariser to preserve information related to *focus* while being
more aggressive about discarding everything else. Inspired by
Claude Code's ``/compact <focus>`` feature.
"""
if not self.conversation_history or len(self.conversation_history) < 4:
print("(._.) Not enough conversation to compress (need at least 4 messages).")
return
@ -6042,18 +6365,30 @@ class HermesCLI:
print("(._.) Compression is disabled in config.")
return
# Extract optional focus topic from the command (e.g. "/compress database schema")
focus_topic = ""
if cmd_original:
parts = cmd_original.strip().split(None, 1)
if len(parts) > 1:
focus_topic = parts[1].strip()
original_count = len(self.conversation_history)
try:
from agent.model_metadata import estimate_messages_tokens_rough
from agent.manual_compression_feedback import summarize_manual_compression
original_history = list(self.conversation_history)
approx_tokens = estimate_messages_tokens_rough(original_history)
print(f"🗜️ Compressing {original_count} messages (~{approx_tokens:,} tokens)...")
if focus_topic:
print(f"🗜️ Compressing {original_count} messages (~{approx_tokens:,} tokens), "
f"focus: \"{focus_topic}\"...")
else:
print(f"🗜️ Compressing {original_count} messages (~{approx_tokens:,} tokens)...")
compressed, _ = self.agent._compress_context(
original_history,
self.agent._cached_system_prompt or "",
approx_tokens=approx_tokens,
focus_topic=focus_topic or None,
)
self.conversation_history = compressed
new_tokens = estimate_messages_tokens_rough(self.conversation_history)
@ -6369,10 +6704,36 @@ class HermesCLI:
On tool.started, records a monotonic timestamp so get_spinner_text()
can show a live elapsed timer (the TUI poll loop already invalidates
every ~0.15s, so the counter updates automatically).
When tool_progress_mode is "all" or "new", also prints a persistent
stacked line to scrollback on tool.completed so users can see the
full history of tool calls (not just the current one in the spinner).
"""
if event_type == "tool.completed":
import time as _time
self._tool_start_time = 0.0
# Print stacked scrollback line for "all" / "new" modes
if function_name and self.tool_progress_mode in ("all", "new"):
duration = kwargs.get("duration", 0.0)
is_error = kwargs.get("is_error", False)
# Pop stored args from tool.started for this function
stored = self._pending_tool_info.get(function_name)
stored_args = stored.pop(0) if stored else {}
if stored is not None and not stored:
del self._pending_tool_info[function_name]
# "new" mode: skip consecutive repeats of the same tool
if self.tool_progress_mode == "new" and function_name == self._last_scrollback_tool:
self._invalidate()
return
self._last_scrollback_tool = function_name
try:
from agent.display import get_cute_tool_message
line = get_cute_tool_message(function_name, stored_args, duration)
if is_error:
line = f"{line} [error]"
_cprint(f" {line}")
except Exception:
pass
self._invalidate()
return
if event_type != "tool.started":
@ -6388,6 +6749,10 @@ class HermesCLI:
label = label[:_pl - 3] + "..."
self._spinner_text = f"{emoji} {label}"
self._tool_start_time = _time.monotonic()
# Store args for stacked scrollback line on completion
self._pending_tool_info.setdefault(function_name, []).append(
function_args if function_args is not None else {}
)
self._invalidate()
if not self._voice_mode:
@ -7781,7 +8146,8 @@ class HermesCLI:
secret_widget,
approval_widget,
clarify_widget,
spinner_widget,
model_picker_widget=None,
spinner_widget=None,
spacer,
status_bar,
input_rule_top,
@ -7798,21 +8164,24 @@ class HermesCLI:
ordering.
"""
return [
Window(height=0),
sudo_widget,
secret_widget,
approval_widget,
clarify_widget,
spinner_widget,
spacer,
*self._get_extra_tui_widgets(),
status_bar,
input_rule_top,
image_bar,
input_area,
input_rule_bot,
voice_status_bar,
completions_menu,
item for item in [
Window(height=0),
sudo_widget,
secret_widget,
approval_widget,
clarify_widget,
model_picker_widget,
spinner_widget,
spacer,
*self._get_extra_tui_widgets(),
status_bar,
input_rule_top,
image_bar,
input_area,
input_rule_bot,
voice_status_bar,
completions_menu,
] if item is not None
]
def run(self):
@ -7848,6 +8217,17 @@ class HermesCLI:
_welcome_text = "Welcome to Hermes Agent! Type your message or /help for commands."
_welcome_color = "#FFF8DC"
self.console.print(f"[{_welcome_color}]{_welcome_text}[/]")
# Show a random tip to help users discover features
try:
from hermes_cli.tips import get_random_tip
_tip = get_random_tip()
try:
_tip_color = _welcome_skin.get_color("banner_dim", "#B8860B")
except Exception:
_tip_color = "#B8860B"
self.console.print(f"[dim {_tip_color}]✦ Tip: {_tip}[/]")
except Exception:
pass # Tips are non-critical — never break startup
if self.preloaded_skills and not self._startup_skills_line_shown:
skills_label = ", ".join(self.preloaded_skills)
self.console.print(
@ -7973,6 +8353,12 @@ class HermesCLI:
event.app.invalidate()
return
# --- /model picker modal ---
if self._model_picker_state:
self._handle_model_picker_selection()
event.app.invalidate()
return
# --- Clarify freetext mode: user typed their own answer ---
if self._clarify_freetext and self._clarify_state:
text = event.app.current_buffer.text.strip()
@ -8003,6 +8389,16 @@ class HermesCLI:
text = event.app.current_buffer.text.strip()
has_images = bool(self._attached_images)
if text or has_images:
# Handle /model directly on the UI thread so interactive pickers
# can safely use prompt_toolkit terminal handoff helpers.
if self._should_handle_model_command_inline(text, has_images=has_images):
if not self.process_command(text):
self._should_exit = True
if event.app.is_running:
event.app.exit()
event.app.current_buffer.reset(append_to_history=True)
return
# Snapshot and clear attached images
images = list(self._attached_images)
self._attached_images.clear()
@ -8106,12 +8502,31 @@ class HermesCLI:
self._approval_state["selected"] = min(max_idx, self._approval_state["selected"] + 1)
event.app.invalidate()
# --- /model picker: arrow-key navigation ---
@kb.add('up', filter=Condition(lambda: bool(self._model_picker_state)))
def model_picker_up(event):
if self._model_picker_state:
self._model_picker_state["selected"] = max(0, self._model_picker_state.get("selected", 0) - 1)
event.app.invalidate()
@kb.add('down', filter=Condition(lambda: bool(self._model_picker_state)))
def model_picker_down(event):
state = self._model_picker_state
if not state:
return
if state.get("stage") == "provider":
max_idx = len(state.get("providers") or [])
else:
max_idx = len(state.get("model_list") or []) + 1
state["selected"] = min(max_idx, state.get("selected", 0) + 1)
event.app.invalidate()
# --- History navigation: up/down browse history in normal input mode ---
# The TextArea is multiline, so by default up/down only move the cursor.
# Buffer.auto_up/auto_down handle both: cursor movement when multi-line,
# history browsing when on the first/last line (or single-line input).
_normal_input = Condition(
lambda: not self._clarify_state and not self._approval_state and not self._sudo_state and not self._secret_state
lambda: not self._clarify_state and not self._approval_state and not self._sudo_state and not self._secret_state and not self._model_picker_state
)
@kb.add('up', filter=_normal_input)
@ -8177,6 +8592,13 @@ class HermesCLI:
event.app.invalidate()
return
# Cancel /model picker
if self._model_picker_state:
self._close_model_picker()
event.app.current_buffer.reset()
event.app.invalidate()
return
# Cancel clarify prompt
if self._clarify_state:
self._clarify_state["response_queue"].put(
@ -8229,7 +8651,7 @@ class HermesCLI:
agent_name = get_active_skin().get_branding("agent_name", "Hermes Agent")
msg = f"\n{agent_name} has been suspended. Run `fg` to bring {agent_name} back."
def _suspend():
os.write(1, msg.encode("utf-8", errors="replace"))
os.write(1, msg.encode())
os.kill(0, _sig.SIGTSTP)
run_in_terminal(_suspend)
@ -8794,6 +9216,60 @@ class HermesCLI:
filter=Condition(lambda: cli_ref._approval_state is not None),
)
# --- /model picker: display widget ---
def _get_model_picker_display():
state = cli_ref._model_picker_state
if not state:
return []
stage = state.get("stage", "provider")
if stage == "provider":
title = "⚙ Model Picker — Select Provider"
choices = []
for p in state.get("providers") or []:
count = p.get("total_models", len(p.get("models", [])))
label = f"{p['name']} ({count} model{'s' if count != 1 else ''})"
if p.get("is_current"):
label += " ← current"
choices.append(label)
choices.append("Cancel")
hint = f"Current: {state.get('current_model', 'unknown')} on {state.get('current_provider', 'unknown')}"
else:
provider_data = state.get("provider_data") or {}
model_list = state.get("model_list") or []
title = f"⚙ Model Picker — {provider_data.get('name', provider_data.get('slug', 'Provider'))}"
choices = list(model_list) + ["← Back", "Cancel"]
if model_list:
hint = f"Select a model ({len(model_list)} available)"
else:
hint = "No models listed for this provider. Use Back or Cancel."
box_width = _panel_box_width(title, [hint] + choices, min_width=46, max_width=84)
inner_text_width = max(8, box_width - 6)
lines = []
lines.append(('class:clarify-border', '╭─ '))
lines.append(('class:clarify-title', title))
lines.append(('class:clarify-border', ' ' + ('' * max(0, box_width - len(title) - 3)) + '\n'))
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
_append_panel_line(lines, 'class:clarify-border', 'class:clarify-hint', hint, box_width)
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
selected = state.get("selected", 0)
for idx, choice in enumerate(choices):
style = 'class:clarify-selected' if idx == selected else 'class:clarify-choice'
prefix = ' ' if idx == selected else ' '
for wrapped in _wrap_panel_text(prefix + choice, inner_text_width, subsequent_indent=' '):
_append_panel_line(lines, 'class:clarify-border', style, wrapped, box_width)
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
lines.append(('class:clarify-border', '' + ('' * box_width) + '\n'))
return lines
model_picker_widget = ConditionalContainer(
Window(
FormattedTextControl(_get_model_picker_display),
wrap_lines=True,
),
filter=Condition(lambda: cli_ref._model_picker_state is not None),
)
# Horizontal rules above and below the input.
# On narrow/mobile terminals we keep the top separator for structure but
# hide the bottom one to recover a full row for conversation content.
@ -8869,6 +9345,7 @@ class HermesCLI:
secret_widget=secret_widget,
approval_widget=approval_widget,
clarify_widget=clarify_widget,
model_picker_widget=model_picker_widget,
spinner_widget=spinner_widget,
spacer=spacer,
status_bar=status_bar,
@ -9026,9 +9503,14 @@ class HermesCLI:
from tools.process_registry import process_registry
if not process_registry.completion_queue.empty():
evt = process_registry.completion_queue.get_nowait()
_synth = _format_process_notification(evt)
if _synth:
self._pending_input.put(_synth)
# Skip if the agent already consumed this via wait/poll/log
_evt_sid = evt.get("session_id", "")
if evt.get("type") == "completion" and process_registry.is_completion_consumed(_evt_sid):
pass # already delivered via tool result
else:
_synth = _format_process_notification(evt)
if _synth:
self._pending_input.put(_synth)
except Exception:
pass
continue
@ -9127,6 +9609,8 @@ class HermesCLI:
self._agent_running = False
self._spinner_text = ""
self._tool_start_time = 0.0
self._pending_tool_info.clear()
self._last_scrollback_tool = ""
app.invalidate() # Refresh status line
@ -9152,6 +9636,10 @@ class HermesCLI:
from tools.process_registry import process_registry
while not process_registry.completion_queue.empty():
evt = process_registry.completion_queue.get_nowait()
# Skip if the agent already consumed this via wait/poll/log
_evt_sid = evt.get("session_id", "")
if evt.get("type") == "completion" and process_registry.is_completion_consumed(_evt_sid):
continue # already delivered via tool result
_synth = _format_process_notification(evt)
if _synth:
self._pending_input.put(_synth)

View file

@ -44,7 +44,7 @@ logger = logging.getLogger(__name__)
_KNOWN_DELIVERY_PLATFORMS = frozenset({
"telegram", "discord", "slack", "whatsapp", "signal",
"matrix", "mattermost", "homeassistant", "dingtalk", "feishu",
"wecom", "weixin", "sms", "email", "webhook", "bluebubbles",
"wecom", "wecom_callback", "weixin", "sms", "email", "webhook", "bluebubbles",
})
from cron.jobs import get_due_jobs, mark_job_run, save_job_output, advance_next_run
@ -219,6 +219,21 @@ def _deliver_result(job: dict, content: str, adapters=None, loop=None) -> Option
chat_id = target["chat_id"]
thread_id = target.get("thread_id")
# Diagnostic: log thread_id for topic-aware delivery debugging
origin = job.get("origin") or {}
origin_thread = origin.get("thread_id")
if origin_thread and not thread_id:
logger.warning(
"Job '%s': origin has thread_id=%s but delivery target lost it "
"(deliver=%s, target=%s)",
job["id"], origin_thread, job.get("deliver", "local"), target,
)
elif thread_id:
logger.debug(
"Job '%s': delivering to %s:%s thread_id=%s",
job["id"], platform_name, chat_id, thread_id,
)
from tools.send_message_tool import _send_to_platform
from gateway.config import load_gateway_config, Platform
@ -234,6 +249,7 @@ def _deliver_result(job: dict, content: str, adapters=None, loop=None) -> Option
"dingtalk": Platform.DINGTALK,
"feishu": Platform.FEISHU,
"wecom": Platform.WECOM,
"wecom_callback": Platform.WECOM_CALLBACK,
"weixin": Platform.WEIXIN,
"email": Platform.EMAIL,
"sms": Platform.SMS,
@ -625,6 +641,15 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
except Exception as e:
logger.warning("Job '%s': failed to load config.yaml, using defaults: %s", job_id, e)
# Apply IPv4 preference if configured.
try:
from hermes_constants import apply_ipv4_preference
_net_cfg = _cfg.get("network", {})
if isinstance(_net_cfg, dict) and _net_cfg.get("force_ipv4"):
apply_ipv4_preference(force=True)
except Exception:
pass
# Reasoning config from config.yaml
from hermes_constants import parse_reasoning_effort
effort = str(_cfg.get("agent", {}).get("reasoning_effort", "")).strip()

View file

@ -5,6 +5,33 @@ set -e
HERMES_HOME="/opt/data"
INSTALL_DIR="/opt/hermes"
# --- Privilege dropping via gosu ---
# When started as root (the default), optionally remap the hermes user/group
# to match host-side ownership, fix volume permissions, then re-exec as hermes.
if [ "$(id -u)" = "0" ]; then
if [ -n "$HERMES_UID" ] && [ "$HERMES_UID" != "$(id -u hermes)" ]; then
echo "Changing hermes UID to $HERMES_UID"
usermod -u "$HERMES_UID" hermes
fi
if [ -n "$HERMES_GID" ] && [ "$HERMES_GID" != "$(id -g hermes)" ]; then
echo "Changing hermes GID to $HERMES_GID"
groupmod -g "$HERMES_GID" hermes
fi
actual_hermes_uid=$(id -u hermes)
if [ "$(stat -c %u "$HERMES_HOME" 2>/dev/null)" != "$actual_hermes_uid" ]; then
echo "$HERMES_HOME is not owned by $actual_hermes_uid, fixing"
chown -R hermes:hermes "$HERMES_HOME"
fi
echo "Dropping root privileges"
exec gosu hermes "$0" "$@"
fi
# --- Running as hermes from here ---
source "${INSTALL_DIR}/.venv/bin/activate"
# Create essential directory structure. Cache and platform directories
# (cache/images, cache/audio, platforms/whatsapp, etc.) are created on
# demand by the application — don't pre-create them here so new installs

View file

@ -118,7 +118,7 @@ For executed migrations, the full report is saved to `~/.hermes/migration/opencl
## Troubleshooting
### "OpenClaw directory not found"
The migration looks for `~/.openclaw` by default, then tries `~/.clawdbot` and `~/.moldbot`. If your OpenClaw is installed elsewhere, use `--source`:
The migration looks for `~/.openclaw` by default, then tries `~/.clawdbot` and `~/.moltbot`. If your OpenClaw is installed elsewhere, use `--source`:
```bash
hermes claw migrate --source /path/to/.openclaw
```

View file

@ -0,0 +1,329 @@
# Container-Aware CLI Review Fixes Spec
**PR:** NousResearch/hermes-agent#7543
**Review:** cursor[bot] bugbot review (4094049442) + two prior rounds
**Date:** 2026-04-12
**Branch:** `feat/container-aware-cli-clean`
## Review Issues Summary
Six issues were raised across three bugbot review rounds. Three were fixed in intermediate commits (38277a6a, 726cf90f). This spec addresses remaining design concerns surfaced by those reviews and simplifies the implementation based on interview decisions.
| # | Issue | Severity | Status |
|---|-------|----------|--------|
| 1 | `os.execvp` retry loop unreachable | Medium | Fixed in 79e8cd12 (switched to subprocess.run) |
| 2 | Redundant `shutil.which("sudo")` | Medium | Fixed in 38277a6a (reuses `sudo` var) |
| 3 | Missing `chown -h` on symlink update | Low | Fixed in 38277a6a |
| 4 | Container routing after `parse_args()` | High | Fixed in 726cf90f |
| 5 | Hardcoded `/home/${user}` | Medium | Fixed in 726cf90f |
| 6 | Group membership not gated on `container.enable` | Low | Fixed in 726cf90f |
The mechanical fixes are in place but the overall design needs revision. The retry loop, error swallowing, and process model have deeper issues than what the bugbot flagged.
---
## Spec: Revised `_exec_in_container`
### Design Principles
1. **Let it crash.** No silent fallbacks. If `.container-mode` exists but something goes wrong, the error propagates naturally (Python traceback). The only case where container routing is skipped is when `.container-mode` doesn't exist or `HERMES_DEV=1`.
2. **No retries.** Probe once for sudo, exec once. If it fails, docker/podman's stderr reaches the user verbatim.
3. **Completely transparent.** No error wrapping, no prefixes, no spinners. Docker's output goes straight through.
4. **`os.execvp` on the happy path.** Replace the Python process entirely so there's no idle parent during interactive sessions. Note: `execvp` never returns on success (process is replaced) and raises `OSError` on failure (it does not return a value). The container process's exit code becomes the process exit code by definition — no explicit propagation needed.
5. **One human-readable exception to "let it crash".** `subprocess.TimeoutExpired` from the sudo probe gets a specific catch with a readable message, since a raw traceback for "your Docker daemon is slow" is confusing. All other exceptions propagate naturally.
### Execution Flow
```
1. get_container_exec_info()
- HERMES_DEV=1 → return None (skip routing)
- Inside container → return None (skip routing)
- .container-mode doesn't exist → return None (skip routing)
- .container-mode exists → parse and return dict
- .container-mode exists but malformed/unreadable → LET IT CRASH (no try/except)
2. _exec_in_container(container_info, sys.argv[1:])
a. shutil.which(backend) → if None, print "{backend} not found on PATH" and sys.exit(1)
b. Sudo probe: subprocess.run([runtime, "inspect", "--format", "ok", container_name], timeout=15)
- If succeeds → needs_sudo = False
- If fails → try subprocess.run([sudo, "-n", runtime, "inspect", ...], timeout=15)
- If succeeds → needs_sudo = True
- If fails → print error with sudoers hint (including why -n is required) and sys.exit(1)
- If TimeoutExpired → catch specifically, print human-readable message about slow daemon
c. Build exec_cmd: [sudo? + runtime, "exec", tty_flags, "-u", exec_user, env_flags, container, hermes_bin, *cli_args]
d. os.execvp(exec_cmd[0], exec_cmd)
- On success: process is replaced — Python is gone, container exit code IS the process exit code
- On OSError: let it crash (natural traceback)
```
### Changes to `hermes_cli/main.py`
#### `_exec_in_container` — rewrite
Remove:
- The entire retry loop (`max_retries`, `for attempt in range(...)`)
- Spinner logic (`"Waiting for container..."`, dots)
- Exit code classification (125/126/127 handling)
- `subprocess.run` for the exec call (keep it only for the sudo probe)
- Special TTY vs non-TTY retry counts
- The `time` import (no longer needed)
Change:
- Use `os.execvp(exec_cmd[0], exec_cmd)` as the final call
- Keep the `subprocess` import only for the sudo probe
- Keep TTY detection for the `-it` vs `-i` flag
- Keep env var forwarding (TERM, COLORTERM, LANG, LC_ALL)
- Keep the sudo probe as-is (it's the one "smart" part)
- Bump probe `timeout` from 5s to 15s — cold podman on a loaded machine needs headroom
- Catch `subprocess.TimeoutExpired` specifically on both probe calls — print a readable message about the daemon being unresponsive instead of a raw traceback
- Expand the sudoers hint error message to explain *why* `-n` (non-interactive) is required: a password prompt would hang the CLI or break piped commands
The function becomes roughly:
```python
def _exec_in_container(container_info: dict, cli_args: list):
"""Replace the current process with a command inside the managed container.
Probes whether sudo is needed (rootful containers), then os.execvp
into the container. If exec fails, the OS error propagates naturally.
"""
import shutil
import subprocess
backend = container_info["backend"]
container_name = container_info["container_name"]
exec_user = container_info["exec_user"]
hermes_bin = container_info["hermes_bin"]
runtime = shutil.which(backend)
if not runtime:
print(f"Error: {backend} not found on PATH. Cannot route to container.",
file=sys.stderr)
sys.exit(1)
# Probe whether we need sudo to see the rootful container.
# Timeout is 15s — cold podman on a loaded machine can take a while.
# TimeoutExpired is caught specifically for a human-readable message;
# all other exceptions propagate naturally.
needs_sudo = False
sudo = None
try:
probe = subprocess.run(
[runtime, "inspect", "--format", "ok", container_name],
capture_output=True, text=True, timeout=15,
)
except subprocess.TimeoutExpired:
print(
f"Error: timed out waiting for {backend} to respond.\n"
f"The {backend} daemon may be unresponsive or starting up.",
file=sys.stderr,
)
sys.exit(1)
if probe.returncode != 0:
sudo = shutil.which("sudo")
if sudo:
try:
probe2 = subprocess.run(
[sudo, "-n", runtime, "inspect", "--format", "ok", container_name],
capture_output=True, text=True, timeout=15,
)
except subprocess.TimeoutExpired:
print(
f"Error: timed out waiting for sudo {backend} to respond.",
file=sys.stderr,
)
sys.exit(1)
if probe2.returncode == 0:
needs_sudo = True
else:
print(
f"Error: container '{container_name}' not found via {backend}.\n"
f"\n"
f"The NixOS service runs the container as root. Your user cannot\n"
f"see it because {backend} uses per-user namespaces.\n"
f"\n"
f"Fix: grant passwordless sudo for {backend}. The -n (non-interactive)\n"
f"flag is required because the CLI calls sudo non-interactively —\n"
f"a password prompt would hang or break piped commands:\n"
f"\n"
f' security.sudo.extraRules = [{{\n'
f' users = [ "{os.getenv("USER", "your-user")}" ];\n'
f' commands = [{{ command = "{runtime}"; options = [ "NOPASSWD" ]; }}];\n'
f' }}];\n'
f"\n"
f"Or run: sudo hermes {' '.join(cli_args)}",
file=sys.stderr,
)
sys.exit(1)
else:
print(
f"Error: container '{container_name}' not found via {backend}.\n"
f"The container may be running under root. Try: sudo hermes {' '.join(cli_args)}",
file=sys.stderr,
)
sys.exit(1)
is_tty = sys.stdin.isatty()
tty_flags = ["-it"] if is_tty else ["-i"]
env_flags = []
for var in ("TERM", "COLORTERM", "LANG", "LC_ALL"):
val = os.environ.get(var)
if val:
env_flags.extend(["-e", f"{var}={val}"])
cmd_prefix = [sudo, "-n", runtime] if needs_sudo else [runtime]
exec_cmd = (
cmd_prefix + ["exec"]
+ tty_flags
+ ["-u", exec_user]
+ env_flags
+ [container_name, hermes_bin]
+ cli_args
)
# execvp replaces this process entirely — it never returns on success.
# On failure it raises OSError, which propagates naturally.
os.execvp(exec_cmd[0], exec_cmd)
```
#### Container routing call site in `main()` — remove try/except
Current:
```python
try:
from hermes_cli.config import get_container_exec_info
container_info = get_container_exec_info()
if container_info:
_exec_in_container(container_info, sys.argv[1:])
sys.exit(1) # exec failed if we reach here
except SystemExit:
raise
except Exception:
pass # Container routing unavailable, proceed locally
```
Revised:
```python
from hermes_cli.config import get_container_exec_info
container_info = get_container_exec_info()
if container_info:
_exec_in_container(container_info, sys.argv[1:])
# Unreachable: os.execvp never returns on success (process is replaced)
# and raises OSError on failure (which propagates as a traceback).
# This line exists only as a defensive assertion.
sys.exit(1)
```
No try/except. If `.container-mode` doesn't exist, `get_container_exec_info()` returns `None` and we skip routing. If it exists but is broken, the exception propagates with a natural traceback.
Note: `sys.exit(1)` after `_exec_in_container` is dead code in all paths — `os.execvp` either replaces the process or raises. It's kept as a belt-and-suspenders assertion with a comment marking it unreachable, not as actual error handling.
### Changes to `hermes_cli/config.py`
#### `get_container_exec_info` — remove inner try/except
Current code catches `(OSError, IOError)` and returns `None`. This silently hides permission errors, corrupt files, etc.
Change: Remove the try/except around file reading. Keep the early returns for `HERMES_DEV=1` and `_is_inside_container()`. The `FileNotFoundError` from `open()` when `.container-mode` doesn't exist should still return `None` (this is the "container mode not enabled" case). All other exceptions propagate.
```python
def get_container_exec_info() -> Optional[dict]:
if os.environ.get("HERMES_DEV") == "1":
return None
if _is_inside_container():
return None
container_mode_file = get_hermes_home() / ".container-mode"
try:
with open(container_mode_file, "r") as f:
# ... parse key=value lines ...
except FileNotFoundError:
return None
# All other exceptions (PermissionError, malformed data, etc.) propagate
return { ... }
```
---
## Spec: NixOS Module Changes
### Symlink creation — simplify to two branches
Current: 4 branches (symlink exists, directory exists, other file, doesn't exist).
Revised: 2 branches.
```bash
if [ -d "${symlinkPath}" ] && [ ! -L "${symlinkPath}" ]; then
# Real directory — back it up, then create symlink
_backup="${symlinkPath}.bak.$(date +%s)"
echo "hermes-agent: backing up existing ${symlinkPath} to $_backup"
mv "${symlinkPath}" "$_backup"
fi
# For everything else (symlink, doesn't exist, etc.) — just force-create
ln -sfn "${target}" "${symlinkPath}"
chown -h ${user}:${cfg.group} "${symlinkPath}"
```
`ln -sfn` handles: existing symlink (replaces), doesn't exist (creates), and after the `mv` above (creates). The only case that needs special handling is a real directory, because `ln -sfn` cannot atomically replace a directory.
Note: there is a theoretical race between the `[ -d ... ]` check and the `mv` (something could create/remove the directory in between). In practice this is a NixOS activation script running as root during `nixos-rebuild switch` — no other process should be touching `~/.hermes` at that moment. Not worth adding locking for.
### Sudoers — document, don't auto-configure
Do NOT add `security.sudo.extraRules` to the module. Document the sudoers requirement in the module's description/comments and in the error message the CLI prints when sudo probe fails.
### Group membership gating — keep as-is
The fix in 726cf90f (`cfg.container.enable && cfg.container.hostUsers != []`) is correct. Leftover group membership when container mode is disabled is harmless. No cleanup needed.
---
## Spec: Test Rewrite
The existing test file (`tests/hermes_cli/test_container_aware_cli.py`) has 16 tests. With the simplified exec model, several are obsolete.
### Tests to keep (update as needed)
- `test_is_inside_container_dockerenv` — unchanged
- `test_is_inside_container_containerenv` — unchanged
- `test_is_inside_container_cgroup_docker` — unchanged
- `test_is_inside_container_false_on_host` — unchanged
- `test_get_container_exec_info_returns_metadata` — unchanged
- `test_get_container_exec_info_none_inside_container` — unchanged
- `test_get_container_exec_info_none_without_file` — unchanged
- `test_get_container_exec_info_skipped_when_hermes_dev` — unchanged
- `test_get_container_exec_info_not_skipped_when_hermes_dev_zero` — unchanged
- `test_get_container_exec_info_defaults` — unchanged
- `test_get_container_exec_info_docker_backend` — unchanged
### Tests to add
- `test_get_container_exec_info_crashes_on_permission_error` — verify that `PermissionError` propagates (no silent `None` return)
- `test_exec_in_container_calls_execvp` — verify `os.execvp` is called with correct args (runtime, tty flags, user, env, container, binary, cli args)
- `test_exec_in_container_sudo_probe_sets_prefix` — verify that when first probe fails and sudo probe succeeds, `os.execvp` is called with `sudo -n` prefix
- `test_exec_in_container_no_runtime_hard_fails` — keep existing, verify `sys.exit(1)` when `shutil.which` returns None
- `test_exec_in_container_non_tty_uses_i_only` — update to check `os.execvp` args instead of `subprocess.run` args
- `test_exec_in_container_probe_timeout_prints_message` — verify that `subprocess.TimeoutExpired` from the probe produces a human-readable error and `sys.exit(1)`, not a raw traceback
- `test_exec_in_container_container_not_running_no_sudo` — verify the path where runtime exists (`shutil.which` returns a path) but probe returns non-zero and no sudo is available. Should print the "container may be running under root" error. This is distinct from `no_runtime_hard_fails` which covers `shutil.which` returning None.
### Tests to delete
- `test_exec_in_container_tty_retries_on_container_failure` — retry loop removed
- `test_exec_in_container_non_tty_retries_silently_exits_126` — retry loop removed
- `test_exec_in_container_propagates_hermes_exit_code` — no subprocess.run to check exit codes; execvp replaces the process. Note: exit code propagation still works correctly — when `os.execvp` succeeds, the container's process *becomes* this process, so its exit code is the process exit code by OS semantics. No application code needed, no test needed. A comment in the function docstring documents this intent for future readers.
---
## Out of Scope
- Auto-configuring sudoers rules in the NixOS module
- Any changes to `get_container_exec_info` parsing logic beyond the try/except narrowing
- Changes to `.container-mode` file format
- Changes to the `HERMES_DEV=1` bypass
- Changes to container detection logic (`_is_inside_container`)

View file

@ -63,6 +63,7 @@ class Platform(Enum):
WEBHOOK = "webhook"
FEISHU = "feishu"
WECOM = "wecom"
WECOM_CALLBACK = "wecom_callback"
WEIXIN = "weixin"
BLUEBUBBLES = "bluebubbles"
@ -291,9 +292,14 @@ class GatewayConfig:
# Feishu uses extra dict for app credentials
elif platform == Platform.FEISHU and config.extra.get("app_id"):
connected.append(platform)
# WeCom uses extra dict for bot credentials
# WeCom bot mode uses extra dict for bot credentials
elif platform == Platform.WECOM and config.extra.get("bot_id"):
connected.append(platform)
# WeCom callback mode uses corp_id or apps list
elif platform == Platform.WECOM_CALLBACK and (
config.extra.get("corp_id") or config.extra.get("apps")
):
connected.append(platform)
# BlueBubbles uses extra dict for local server config
elif platform == Platform.BLUEBUBBLES and config.extra.get("server_url") and config.extra.get("password"):
connected.append(platform)
@ -987,6 +993,23 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
name=os.getenv("WECOM_HOME_CHANNEL_NAME", "Home"),
)
# WeCom callback mode (self-built apps)
wecom_callback_corp_id = os.getenv("WECOM_CALLBACK_CORP_ID")
wecom_callback_corp_secret = os.getenv("WECOM_CALLBACK_CORP_SECRET")
if wecom_callback_corp_id and wecom_callback_corp_secret:
if Platform.WECOM_CALLBACK not in config.platforms:
config.platforms[Platform.WECOM_CALLBACK] = PlatformConfig()
config.platforms[Platform.WECOM_CALLBACK].enabled = True
config.platforms[Platform.WECOM_CALLBACK].extra.update({
"corp_id": wecom_callback_corp_id,
"corp_secret": wecom_callback_corp_secret,
"agent_id": os.getenv("WECOM_CALLBACK_AGENT_ID", ""),
"token": os.getenv("WECOM_CALLBACK_TOKEN", ""),
"encoding_aes_key": os.getenv("WECOM_CALLBACK_ENCODING_AES_KEY", ""),
"host": os.getenv("WECOM_CALLBACK_HOST", "0.0.0.0"),
"port": int(os.getenv("WECOM_CALLBACK_PORT", "8645")),
})
# Weixin (personal WeChat via iLink Bot API)
weixin_token = os.getenv("WEIXIN_TOKEN")
weixin_account_id = os.getenv("WEIXIN_ACCOUNT_ID")

206
gateway/display_config.py Normal file
View file

@ -0,0 +1,206 @@
"""Per-platform display/verbosity configuration resolver.
Provides ``resolve_display_setting()`` the single entry-point for reading
display settings with platform-specific overrides and sensible defaults.
Resolution order (first non-None wins):
1. ``display.platforms.<platform>.<key>`` explicit per-platform user override
2. ``display.<key>`` global user setting
3. ``_PLATFORM_DEFAULTS[<platform>][<key>]`` built-in sensible default
4. ``_GLOBAL_DEFAULTS[<key>]`` built-in global default
Backward compatibility: ``display.tool_progress_overrides`` is still read as a
fallback for ``tool_progress`` when no ``display.platforms`` entry exists. A
config migration (version bump) automatically moves the old format into the new
``display.platforms`` structure.
"""
from __future__ import annotations
from typing import Any
# ---------------------------------------------------------------------------
# Overrideable display settings and their global defaults
# ---------------------------------------------------------------------------
# These are the settings that can be configured per-platform.
# Other display settings (compact, personality, skin, etc.) are CLI-only
# and don't participate in per-platform resolution.
_GLOBAL_DEFAULTS: dict[str, Any] = {
"tool_progress": "all",
"show_reasoning": False,
"tool_preview_length": 0,
"streaming": None, # None = follow top-level streaming config
}
# ---------------------------------------------------------------------------
# Sensible per-platform defaults — tiered by platform capability
# ---------------------------------------------------------------------------
# Tier 1 (high): Supports message editing, typically personal/team use
# Tier 2 (medium): Supports editing but often workspace/customer-facing
# Tier 3 (low): No edit support — each progress msg is permanent
# Tier 4 (minimal): Batch/non-interactive delivery
_TIER_HIGH = {
"tool_progress": "all",
"show_reasoning": False,
"tool_preview_length": 40,
"streaming": None, # follow global
}
_TIER_MEDIUM = {
"tool_progress": "new",
"show_reasoning": False,
"tool_preview_length": 40,
"streaming": None,
}
_TIER_LOW = {
"tool_progress": "off",
"show_reasoning": False,
"tool_preview_length": 40,
"streaming": False,
}
_TIER_MINIMAL = {
"tool_progress": "off",
"show_reasoning": False,
"tool_preview_length": 0,
"streaming": False,
}
_PLATFORM_DEFAULTS: dict[str, dict[str, Any]] = {
# Tier 1 — full edit support, personal/team use
"telegram": _TIER_HIGH,
"discord": _TIER_HIGH,
# Tier 2 — edit support, often customer/workspace channels
"slack": _TIER_MEDIUM,
"mattermost": _TIER_MEDIUM,
"matrix": _TIER_MEDIUM,
"feishu": _TIER_MEDIUM,
# Tier 3 — no edit support, progress messages are permanent
"signal": _TIER_LOW,
"whatsapp": _TIER_LOW,
"bluebubbles": _TIER_LOW,
"weixin": _TIER_LOW,
"wecom": _TIER_LOW,
"wecom_callback": _TIER_LOW,
"dingtalk": _TIER_LOW,
# Tier 4 — batch or non-interactive delivery
"email": _TIER_MINIMAL,
"sms": _TIER_MINIMAL,
"webhook": _TIER_MINIMAL,
"homeassistant": _TIER_MINIMAL,
"api_server": {**_TIER_HIGH, "tool_preview_length": 0},
}
# Canonical set of per-platform overrideable keys (for validation).
OVERRIDEABLE_KEYS = frozenset(_GLOBAL_DEFAULTS.keys())
def resolve_display_setting(
user_config: dict,
platform_key: str,
setting: str,
fallback: Any = None,
) -> Any:
"""Resolve a display setting with per-platform override support.
Parameters
----------
user_config : dict
The full parsed config.yaml dict.
platform_key : str
Platform config key (e.g. ``"telegram"``, ``"slack"``). Use
``_platform_config_key(source.platform)`` from gateway/run.py.
setting : str
Display setting name (e.g. ``"tool_progress"``, ``"show_reasoning"``).
fallback : Any
Fallback value when the setting isn't found anywhere.
Returns
-------
The resolved value, or *fallback* if nothing is configured.
"""
display_cfg = user_config.get("display") or {}
# 1. Explicit per-platform override (display.platforms.<platform>.<key>)
platforms = display_cfg.get("platforms") or {}
plat_overrides = platforms.get(platform_key)
if isinstance(plat_overrides, dict):
val = plat_overrides.get(setting)
if val is not None:
return _normalise(setting, val)
# 1b. Backward compat: display.tool_progress_overrides.<platform>
if setting == "tool_progress":
legacy = display_cfg.get("tool_progress_overrides")
if isinstance(legacy, dict):
val = legacy.get(platform_key)
if val is not None:
return _normalise(setting, val)
# 2. Global user setting (display.<key>)
val = display_cfg.get(setting)
if val is not None:
return _normalise(setting, val)
# 3. Built-in platform default
plat_defaults = _PLATFORM_DEFAULTS.get(platform_key)
if plat_defaults:
val = plat_defaults.get(setting)
if val is not None:
return val
# 4. Built-in global default
val = _GLOBAL_DEFAULTS.get(setting)
if val is not None:
return val
return fallback
def get_platform_defaults(platform_key: str) -> dict[str, Any]:
"""Return the built-in default display settings for a platform.
Falls back to ``_GLOBAL_DEFAULTS`` for unknown platforms.
"""
return dict(_PLATFORM_DEFAULTS.get(platform_key, _GLOBAL_DEFAULTS))
def get_effective_display(user_config: dict, platform_key: str) -> dict[str, Any]:
"""Return the fully-resolved display settings for a platform.
Useful for status commands that want to show all effective settings.
"""
return {
key: resolve_display_setting(user_config, platform_key, key)
for key in OVERRIDEABLE_KEYS
}
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _normalise(setting: str, value: Any) -> Any:
"""Normalise YAML quirks (bare ``off`` → False in YAML 1.1)."""
if setting == "tool_progress":
if value is False:
return "off"
if value is True:
return "all"
return str(value).lower()
if setting in ("show_reasoning", "streaming"):
if isinstance(value, str):
return value.lower() in ("true", "1", "yes", "on")
return bool(value)
if setting == "tool_preview_length":
try:
return int(value)
except (TypeError, ValueError):
return 0
return value

View file

@ -456,6 +456,7 @@ class DiscordAdapter(BasePlatformAdapter):
# show the standard typing gateway event for bots)
self._typing_tasks: Dict[str, asyncio.Task] = {}
self._bot_task: Optional[asyncio.Task] = None
self._post_connect_task: Optional[asyncio.Task] = None
# Dedup cache: prevents duplicate bot responses when Discord
# RESUME replays events after reconnects.
self._dedup = MessageDeduplicator()
@ -545,15 +546,14 @@ class DiscordAdapter(BasePlatformAdapter):
# Resolve any usernames in the allowed list to numeric IDs
await adapter_self._resolve_allowed_usernames()
# Sync slash commands with Discord
try:
synced = await adapter_self._client.tree.sync()
logger.info("[%s] Synced %d slash command(s)", adapter_self.name, len(synced))
except Exception as e: # pragma: no cover - defensive logging
logger.warning("[%s] Slash command sync failed: %s", adapter_self.name, e, exc_info=True)
adapter_self._ready_event.set()
if adapter_self._post_connect_task and not adapter_self._post_connect_task.done():
adapter_self._post_connect_task.cancel()
adapter_self._post_connect_task = asyncio.create_task(
adapter_self._run_post_connect_initialization()
)
@self._client.event
async def on_message(message: DiscordMessage):
# Dedup: Discord RESUME replays events after reconnects (#4777)
@ -686,14 +686,36 @@ class DiscordAdapter(BasePlatformAdapter):
except Exception as e: # pragma: no cover - defensive logging
logger.warning("[%s] Error during disconnect: %s", self.name, e, exc_info=True)
if self._post_connect_task and not self._post_connect_task.done():
self._post_connect_task.cancel()
try:
await self._post_connect_task
except asyncio.CancelledError:
pass
self._running = False
self._client = None
self._ready_event.clear()
self._post_connect_task = None
self._release_platform_lock()
logger.info("[%s] Disconnected", self.name)
async def _run_post_connect_initialization(self) -> None:
"""Finish non-critical startup work after Discord is connected."""
if not self._client:
return
try:
synced = await asyncio.wait_for(self._client.tree.sync(), timeout=30)
logger.info("[%s] Synced %d slash command(s)", self.name, len(synced))
except asyncio.TimeoutError:
logger.warning("[%s] Slash command sync timed out after 30s", self.name)
except asyncio.CancelledError:
raise
except Exception as e: # pragma: no cover - defensive logging
logger.warning("[%s] Slash command sync failed: %s", self.name, e, exc_info=True)
async def _add_reaction(self, message: Any, emoji: str) -> bool:
"""Add an emoji reaction to a Discord message."""
if not message or not hasattr(message, "add_reaction"):

View file

@ -18,6 +18,7 @@ Environment variables:
MATRIX_REQUIRE_MENTION Require @mention in rooms (default: true)
MATRIX_FREE_RESPONSE_ROOMS Comma-separated room IDs exempt from mention requirement
MATRIX_AUTO_THREAD Auto-create threads for room messages (default: true)
MATRIX_RECOVERY_KEY Recovery key for cross-signing verification after device key rotation
MATRIX_DM_MENTION_THREADS Create a thread when bot is @mentioned in a DM (default: false)
"""
@ -104,7 +105,7 @@ MAX_MESSAGE_LENGTH = 4000
# Uses get_hermes_home() so each profile gets its own Matrix store.
from hermes_constants import get_hermes_dir as _get_hermes_dir
_STORE_DIR = _get_hermes_dir("platforms/matrix/store", "matrix/store")
_CRYPTO_PICKLE_PATH = _STORE_DIR / "crypto_store.pickle"
_CRYPTO_DB_PATH = _STORE_DIR / "crypto.db"
# Grace period: ignore messages older than this many seconds before startup.
_STARTUP_GRACE_SECONDS = 5
@ -165,6 +166,33 @@ def check_matrix_requirements() -> bool:
return True
class _CryptoStateStore:
"""Adapter that satisfies the mautrix crypto StateStore interface.
OlmMachine requires a StateStore with ``is_encrypted``,
``get_encryption_info``, and ``find_shared_rooms``. The basic
``MemoryStateStore`` from ``mautrix.client`` doesn't implement these,
so we provide simple implementations that consult the client's room
state.
"""
def __init__(self, client_state_store: Any, joined_rooms: set):
self._ss = client_state_store
self._joined_rooms = joined_rooms
async def is_encrypted(self, room_id: str) -> bool:
return (await self.get_encryption_info(room_id)) is not None
async def get_encryption_info(self, room_id: str):
if hasattr(self._ss, "get_encryption_info"):
return await self._ss.get_encryption_info(room_id)
return None
async def find_shared_rooms(self, user_id: str) -> list:
# Return all joined rooms — simple but correct for a single-user bot.
return list(self._joined_rooms)
class MatrixAdapter(BasePlatformAdapter):
"""Gateway adapter for Matrix (any homeserver)."""
@ -199,6 +227,7 @@ class MatrixAdapter(BasePlatformAdapter):
)
self._client: Any = None # mautrix.client.Client
self._crypto_db: Any = None # mautrix.util.async_db.Database
self._sync_task: Optional[asyncio.Task] = None
self._closing = False
self._startup_ts: float = 0.0
@ -252,6 +281,92 @@ class MatrixAdapter(BasePlatformAdapter):
self._processed_events_set.add(event_id)
return False
# ------------------------------------------------------------------
# E2EE helpers
# ------------------------------------------------------------------
async def _verify_device_keys_on_server(self, client: Any, olm: Any) -> bool:
"""Verify our device keys are on the homeserver after loading crypto state.
Returns True if keys are valid or were successfully re-uploaded.
Returns False if verification fails (caller should refuse E2EE).
"""
try:
resp = await client.query_keys({client.mxid: [client.device_id]})
except Exception as exc:
logger.error(
"Matrix: cannot verify device keys on server: %s — refusing E2EE", exc,
)
return False
# query_keys returns typed objects (QueryKeysResponse, DeviceKeys
# with KeyID keys). Normalise to plain strings for comparison.
device_keys_map = getattr(resp, "device_keys", {}) or {}
our_user_devices = device_keys_map.get(str(client.mxid)) or {}
our_keys = our_user_devices.get(str(client.device_id))
if not our_keys:
logger.warning("Matrix: device keys missing from server — re-uploading")
olm.account.shared = False
try:
await olm.share_keys()
except Exception as exc:
logger.error("Matrix: failed to re-upload device keys: %s", exc)
return False
return True
# DeviceKeys.keys is a dict[KeyID, str]. Iterate to find the
# ed25519 key rather than constructing a KeyID for lookup.
server_ed25519 = None
keys_dict = getattr(our_keys, "keys", {}) or {}
for key_id, key_value in keys_dict.items():
if str(key_id).startswith("ed25519:"):
server_ed25519 = str(key_value)
break
local_ed25519 = olm.account.identity_keys.get("ed25519")
if server_ed25519 != local_ed25519:
if olm.account.shared:
# Restored account from DB but server has different keys — corrupted state.
logger.error(
"Matrix: server has different identity keys for device %s"
"local crypto state is stale. Delete %s and restart.",
client.device_id,
_CRYPTO_DB_PATH,
)
return False
# Fresh account (never uploaded). Server has stale keys from a
# previous installation. Try to delete the old device and re-upload.
logger.warning(
"Matrix: server has stale keys for device %s — attempting re-upload",
client.device_id,
)
try:
await client.api.request(
client.api.Method.DELETE
if hasattr(client.api, "Method")
else "DELETE",
f"/_matrix/client/v3/devices/{client.device_id}",
)
logger.info("Matrix: deleted stale device %s from server", client.device_id)
except Exception:
# Device deletion often requires UIA or may simply not be
# permitted — that's fine, share_keys will try to overwrite.
pass
try:
await olm.share_keys()
except Exception as exc:
logger.error(
"Matrix: cannot upload device keys for %s: %s. "
"Try generating a new access token to get a fresh device.",
client.device_id,
exc,
)
return False
return True
# ------------------------------------------------------------------
# Required overrides
# ------------------------------------------------------------------
@ -350,54 +465,67 @@ class MatrixAdapter(BasePlatformAdapter):
return False
try:
from mautrix.crypto import OlmMachine
from mautrix.crypto.store import MemoryCryptoStore
from mautrix.crypto.store.asyncpg import PgCryptoStore
from mautrix.util.async_db import Database
_STORE_DIR.mkdir(parents=True, exist_ok=True)
# Remove legacy pickle file from pre-SQLite era.
legacy_pickle = _STORE_DIR / "crypto_store.pickle"
if legacy_pickle.exists():
logger.info("Matrix: removing legacy crypto_store.pickle (migrated to SQLite)")
legacy_pickle.unlink()
# Open SQLite-backed crypto store.
crypto_db = Database.create(
f"sqlite:///{_CRYPTO_DB_PATH}",
upgrade_table=PgCryptoStore.upgrade_table,
)
await crypto_db.start()
self._crypto_db = crypto_db
# account_id and pickle_key are required by mautrix ≥0.21.
# Use the Matrix user ID as account_id for stable identity.
# pickle_key secures in-memory serialisation; derive from
# the same user_id:device_id pair used for the on-disk HMAC.
_acct_id = self._user_id or "hermes"
_pickle_key = f"{_acct_id}:{self._device_id}"
crypto_store = MemoryCryptoStore(
_pickle_key = f"{_acct_id}:{self._device_id or 'default'}"
crypto_store = PgCryptoStore(
account_id=_acct_id,
pickle_key=_pickle_key,
db=crypto_db,
)
await crypto_store.open()
# Restore persisted crypto state from a previous run.
# Uses HMAC to verify integrity before unpickling.
pickle_path = _CRYPTO_PICKLE_PATH
if pickle_path.exists():
try:
import hashlib, hmac, pickle
raw = pickle_path.read_bytes()
# Format: 32-byte HMAC-SHA256 signature + pickle data.
if len(raw) > 32:
sig, payload = raw[:32], raw[32:]
# Key is derived from the device_id + user_id (stable per install).
hmac_key = f"{self._user_id}:{self._device_id}".encode()
expected = hmac.new(hmac_key, payload, hashlib.sha256).digest()
if hmac.compare_digest(sig, expected):
saved = pickle.loads(payload) # noqa: S301
if isinstance(saved, MemoryCryptoStore):
crypto_store = saved
logger.info("Matrix: restored E2EE crypto store from %s", pickle_path)
else:
logger.warning("Matrix: crypto store HMAC mismatch — ignoring stale/tampered file")
except Exception as exc:
logger.warning("Matrix: could not restore crypto store: %s", exc)
crypto_state = _CryptoStateStore(state_store, self._joined_rooms)
olm = OlmMachine(client, crypto_store, crypto_state)
olm = OlmMachine(client, crypto_store, state_store)
# Set trust policy: accept unverified devices so senders
# share Megolm session keys with us automatically.
# Accept unverified devices so senders share Megolm
# session keys with us automatically.
olm.share_keys_min_trust = TrustState.UNVERIFIED
olm.send_keys_min_trust = TrustState.UNVERIFIED
await olm.load()
# Verify our device keys are still on the homeserver.
if not await self._verify_device_keys_on_server(client, olm):
await crypto_db.stop()
await api.session.close()
return False
# Import cross-signing private keys from SSSS and self-sign
# the current device. Required after any device-key rotation
# (fresh crypto.db, share_keys re-upload) — otherwise the
# device's self-signing signature is stale and peers refuse
# to share Megolm sessions with the rotated device.
recovery_key = os.getenv("MATRIX_RECOVERY_KEY", "").strip()
if recovery_key:
try:
await olm.verify_with_recovery_key(recovery_key)
logger.info("Matrix: cross-signing verified via recovery key")
except Exception as exc:
logger.warning("Matrix: recovery key verification failed: %s", exc)
client.crypto = olm
logger.info(
"Matrix: E2EE enabled (store: %s%s)",
str(_STORE_DIR),
str(_CRYPTO_DB_PATH),
f", device_id={client.device_id}" if client.device_id else "",
)
except Exception as exc:
@ -438,6 +566,15 @@ class MatrixAdapter(BasePlatformAdapter):
)
# Build DM room cache from m.direct account data.
await self._refresh_dm_cache()
# Dispatch events from the initial sync so the OlmMachine
# receives to-device key shares queued while we were offline.
try:
tasks = client.handle_sync(sync_data)
if tasks:
await asyncio.gather(*tasks)
except Exception as exc:
logger.warning("Matrix: initial sync event dispatch error: %s", exc)
else:
logger.warning("Matrix: initial sync returned unexpected type %s", type(sync_data).__name__)
except Exception as exc:
@ -466,21 +603,12 @@ class MatrixAdapter(BasePlatformAdapter):
except (asyncio.CancelledError, Exception):
pass
# Persist E2EE crypto store before closing so the next restart
# can decrypt events using sessions from this run.
if self._client and self._encryption and getattr(self._client, "crypto", None):
# Close the SQLite crypto store database.
if hasattr(self, "_crypto_db") and self._crypto_db:
try:
import hashlib, hmac, pickle
crypto_store = self._client.crypto.crypto_store
_STORE_DIR.mkdir(parents=True, exist_ok=True)
pickle_path = _CRYPTO_PICKLE_PATH
payload = pickle.dumps(crypto_store)
hmac_key = f"{self._user_id}:{self._device_id}".encode()
sig = hmac.new(hmac_key, payload, hashlib.sha256).digest()
pickle_path.write_bytes(sig + payload)
logger.info("Matrix: persisted E2EE crypto store to %s", pickle_path)
await self._crypto_db.stop()
except Exception as exc:
logger.debug("Matrix: could not persist crypto store on disconnect: %s", exc)
logger.debug("Matrix: could not close crypto DB on disconnect: %s", exc)
if self._client:
try:
@ -853,13 +981,6 @@ class MatrixAdapter(BasePlatformAdapter):
except Exception as exc:
logger.warning("Matrix: sync event dispatch error: %s", exc)
# Share keys periodically if E2EE is enabled.
if self._encryption and getattr(client, "crypto", None):
try:
await client.crypto.share_keys()
except Exception as exc:
logger.warning("Matrix: E2EE key share failed: %s", exc)
# Retry any buffered undecrypted events.
if self._pending_megolm:
await self._retry_pending_decryptions()

View file

@ -201,6 +201,7 @@ class WebhookAdapter(BasePlatformAdapter):
"dingtalk",
"feishu",
"wecom",
"wecom_callback",
"weixin",
"bluebubbles",
):

View file

@ -0,0 +1,387 @@
"""WeCom callback-mode adapter for self-built enterprise applications.
Unlike the bot/websocket adapter in ``wecom.py``, this handles the standard
WeCom callback flow: WeCom POSTs encrypted XML to an HTTP endpoint, the
adapter decrypts it, queues the message for the agent, and immediately
acknowledges. The agent's reply is delivered later via the proactive
``message/send`` API using an access-token.
Supports multiple self-built apps under one gateway instance, scoped by
``corp_id:user_id`` to avoid cross-corp collisions.
"""
from __future__ import annotations
import asyncio
import logging
import socket as _socket
import time
from typing import Any, Dict, List, Optional
from xml.etree import ElementTree as ET
try:
from aiohttp import web
AIOHTTP_AVAILABLE = True
except ImportError:
web = None # type: ignore[assignment]
AIOHTTP_AVAILABLE = False
try:
import httpx
HTTPX_AVAILABLE = True
except ImportError:
httpx = None # type: ignore[assignment]
HTTPX_AVAILABLE = False
from gateway.config import Platform, PlatformConfig
from gateway.platforms.base import BasePlatformAdapter, MessageEvent, MessageType, SendResult
from gateway.platforms.wecom_crypto import WXBizMsgCrypt, WeComCryptoError
logger = logging.getLogger(__name__)
DEFAULT_HOST = "0.0.0.0"
DEFAULT_PORT = 8645
DEFAULT_PATH = "/wecom/callback"
ACCESS_TOKEN_TTL_SECONDS = 7200
MESSAGE_DEDUP_TTL_SECONDS = 300
def check_wecom_callback_requirements() -> bool:
return AIOHTTP_AVAILABLE and HTTPX_AVAILABLE
class WecomCallbackAdapter(BasePlatformAdapter):
def __init__(self, config: PlatformConfig):
super().__init__(config, Platform.WECOM_CALLBACK)
extra = config.extra or {}
self._host = str(extra.get("host") or DEFAULT_HOST)
self._port = int(extra.get("port") or DEFAULT_PORT)
self._path = str(extra.get("path") or DEFAULT_PATH)
self._apps: List[Dict[str, Any]] = self._normalize_apps(extra)
self._runner: Optional[web.AppRunner] = None
self._site: Optional[web.TCPSite] = None
self._app: Optional[web.Application] = None
self._http_client: Optional[httpx.AsyncClient] = None
self._message_queue: asyncio.Queue[MessageEvent] = asyncio.Queue()
self._poll_task: Optional[asyncio.Task] = None
self._seen_messages: Dict[str, float] = {}
self._user_app_map: Dict[str, str] = {}
self._access_tokens: Dict[str, Dict[str, Any]] = {}
# ------------------------------------------------------------------
# App normalisation
# ------------------------------------------------------------------
@staticmethod
def _user_app_key(corp_id: str, user_id: str) -> str:
return f"{corp_id}:{user_id}" if corp_id else user_id
@staticmethod
def _normalize_apps(extra: Dict[str, Any]) -> List[Dict[str, Any]]:
apps = extra.get("apps")
if isinstance(apps, list) and apps:
return [dict(app) for app in apps if isinstance(app, dict)]
if extra.get("corp_id"):
return [
{
"name": extra.get("name") or "default",
"corp_id": extra.get("corp_id", ""),
"corp_secret": extra.get("corp_secret", ""),
"agent_id": str(extra.get("agent_id", "")),
"token": extra.get("token", ""),
"encoding_aes_key": extra.get("encoding_aes_key", ""),
}
]
return []
# ------------------------------------------------------------------
# Lifecycle
# ------------------------------------------------------------------
async def connect(self) -> bool:
if not self._apps:
logger.warning("[WecomCallback] No callback apps configured")
return False
if not check_wecom_callback_requirements():
logger.warning("[WecomCallback] aiohttp/httpx not installed")
return False
# Quick port-in-use check.
try:
with _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) as sock:
sock.settimeout(1)
sock.connect(("127.0.0.1", self._port))
logger.error("[WecomCallback] Port %d already in use", self._port)
return False
except (ConnectionRefusedError, OSError):
pass
try:
self._http_client = httpx.AsyncClient(timeout=20.0)
self._app = web.Application()
self._app.router.add_get("/health", self._handle_health)
self._app.router.add_get(self._path, self._handle_verify)
self._app.router.add_post(self._path, self._handle_callback)
self._runner = web.AppRunner(self._app)
await self._runner.setup()
self._site = web.TCPSite(self._runner, self._host, self._port)
await self._site.start()
self._poll_task = asyncio.create_task(self._poll_loop())
self._mark_connected()
logger.info(
"[WecomCallback] HTTP server listening on %s:%s%s",
self._host, self._port, self._path,
)
for app in self._apps:
try:
await self._refresh_access_token(app)
except Exception as exc:
logger.warning(
"[WecomCallback] Initial token refresh failed for app '%s': %s",
app.get("name", "default"), exc,
)
return True
except Exception:
await self._cleanup()
logger.exception("[WecomCallback] Failed to start")
return False
async def disconnect(self) -> None:
self._running = False
if self._poll_task:
self._poll_task.cancel()
try:
await self._poll_task
except asyncio.CancelledError:
pass
self._poll_task = None
await self._cleanup()
self._mark_disconnected()
logger.info("[WecomCallback] Disconnected")
async def _cleanup(self) -> None:
self._site = None
if self._runner:
await self._runner.cleanup()
self._runner = None
self._app = None
if self._http_client:
await self._http_client.aclose()
self._http_client = None
# ------------------------------------------------------------------
# Outbound: proactive send via access-token API
# ------------------------------------------------------------------
async def send(
self,
chat_id: str,
content: str,
reply_to: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> SendResult:
app = self._resolve_app_for_chat(chat_id)
touser = chat_id.split(":", 1)[1] if ":" in chat_id else chat_id
try:
token = await self._get_access_token(app)
payload = {
"touser": touser,
"msgtype": "text",
"agentid": int(str(app.get("agent_id") or 0)),
"text": {"content": content[:2048]},
"safe": 0,
}
resp = await self._http_client.post(
f"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={token}",
json=payload,
)
data = resp.json()
if data.get("errcode") != 0:
return SendResult(success=False, error=str(data))
return SendResult(
success=True,
message_id=str(data.get("msgid", "")),
raw_response=data,
)
except Exception as exc:
return SendResult(success=False, error=str(exc))
def _resolve_app_for_chat(self, chat_id: str) -> Dict[str, Any]:
"""Pick the app associated with *chat_id*, falling back sensibly."""
app_name = self._user_app_map.get(chat_id)
if not app_name and ":" not in chat_id:
# Legacy bare user_id — try to find a unique match.
matching = [k for k in self._user_app_map if k.endswith(f":{chat_id}")]
if len(matching) == 1:
app_name = self._user_app_map.get(matching[0])
app = self._get_app_by_name(app_name) if app_name else None
return app or self._apps[0]
async def get_chat_info(self, chat_id: str) -> Dict[str, Any]:
return {"name": chat_id, "type": "dm"}
# ------------------------------------------------------------------
# Inbound: HTTP callback handlers
# ------------------------------------------------------------------
async def _handle_health(self, request: web.Request) -> web.Response:
return web.json_response({"status": "ok", "platform": "wecom_callback"})
async def _handle_verify(self, request: web.Request) -> web.Response:
"""GET endpoint — WeCom URL verification handshake."""
msg_signature = request.query.get("msg_signature", "")
timestamp = request.query.get("timestamp", "")
nonce = request.query.get("nonce", "")
echostr = request.query.get("echostr", "")
for app in self._apps:
try:
crypt = self._crypt_for_app(app)
plain = crypt.verify_url(msg_signature, timestamp, nonce, echostr)
return web.Response(text=plain, content_type="text/plain")
except Exception:
continue
return web.Response(status=403, text="signature verification failed")
async def _handle_callback(self, request: web.Request) -> web.Response:
"""POST endpoint — receive an encrypted message callback."""
msg_signature = request.query.get("msg_signature", "")
timestamp = request.query.get("timestamp", "")
nonce = request.query.get("nonce", "")
body = await request.text()
for app in self._apps:
try:
decrypted = self._decrypt_request(
app, body, msg_signature, timestamp, nonce,
)
event = self._build_event(app, decrypted)
if event is not None:
# Record which app this user belongs to.
if event.source and event.source.user_id:
map_key = self._user_app_key(
str(app.get("corp_id") or ""), event.source.user_id,
)
self._user_app_map[map_key] = app["name"]
await self._message_queue.put(event)
# Immediately acknowledge — the agent's reply will arrive
# later via the proactive message/send API.
return web.Response(text="success", content_type="text/plain")
except WeComCryptoError:
continue
except Exception:
logger.exception("[WecomCallback] Error handling message")
break
return web.Response(status=400, text="invalid callback payload")
async def _poll_loop(self) -> None:
"""Drain the message queue and dispatch to the gateway runner."""
while True:
event = await self._message_queue.get()
try:
task = asyncio.create_task(self.handle_message(event))
self._background_tasks.add(task)
task.add_done_callback(self._background_tasks.discard)
except Exception:
logger.exception("[WecomCallback] Failed to enqueue event")
# ------------------------------------------------------------------
# XML / crypto helpers
# ------------------------------------------------------------------
def _decrypt_request(
self, app: Dict[str, Any], body: str,
msg_signature: str, timestamp: str, nonce: str,
) -> str:
root = ET.fromstring(body)
encrypt = root.findtext("Encrypt", default="")
crypt = self._crypt_for_app(app)
return crypt.decrypt(msg_signature, timestamp, nonce, encrypt).decode("utf-8")
def _build_event(self, app: Dict[str, Any], xml_text: str) -> Optional[MessageEvent]:
root = ET.fromstring(xml_text)
msg_type = (root.findtext("MsgType") or "").lower()
# Silently acknowledge lifecycle events.
if msg_type == "event":
event_name = (root.findtext("Event") or "").lower()
if event_name in {"enter_agent", "subscribe"}:
return None
if msg_type not in {"text", "event"}:
return None
user_id = root.findtext("FromUserName", default="")
corp_id = root.findtext("ToUserName", default=app.get("corp_id", ""))
scoped_chat_id = self._user_app_key(corp_id, user_id)
content = root.findtext("Content", default="").strip()
if not content and msg_type == "event":
content = "/start"
msg_id = (
root.findtext("MsgId")
or f"{user_id}:{root.findtext('CreateTime', default='0')}"
)
source = self.build_source(
chat_id=scoped_chat_id,
chat_name=user_id,
chat_type="dm",
user_id=user_id,
user_name=user_id,
)
return MessageEvent(
text=content,
message_type=MessageType.TEXT,
source=source,
raw_message=xml_text,
message_id=msg_id,
)
def _crypt_for_app(self, app: Dict[str, Any]) -> WXBizMsgCrypt:
return WXBizMsgCrypt(
token=str(app.get("token") or ""),
encoding_aes_key=str(app.get("encoding_aes_key") or ""),
receive_id=str(app.get("corp_id") or ""),
)
def _get_app_by_name(self, name: Optional[str]) -> Optional[Dict[str, Any]]:
if not name:
return None
for app in self._apps:
if app.get("name") == name:
return app
return None
# ------------------------------------------------------------------
# Access-token management
# ------------------------------------------------------------------
async def _get_access_token(self, app: Dict[str, Any]) -> str:
cached = self._access_tokens.get(app["name"])
now = time.time()
if cached and cached.get("expires_at", 0) > now + 60:
return cached["token"]
return await self._refresh_access_token(app)
async def _refresh_access_token(self, app: Dict[str, Any]) -> str:
resp = await self._http_client.get(
"https://qyapi.weixin.qq.com/cgi-bin/gettoken",
params={
"corpid": app.get("corp_id"),
"corpsecret": app.get("corp_secret"),
},
)
data = resp.json()
if data.get("errcode") != 0:
raise RuntimeError(f"WeCom token refresh failed: {data}")
token = data["access_token"]
expires_in = int(data.get("expires_in", ACCESS_TOKEN_TTL_SECONDS))
self._access_tokens[app["name"]] = {
"token": token,
"expires_at": time.time() + expires_in,
}
logger.info(
"[WecomCallback] Token refreshed for app '%s' (corp=%s), expires in %ss",
app.get("name", "default"),
app.get("corp_id", ""),
expires_in,
)
return token

View file

@ -0,0 +1,142 @@
"""WeCom BizMsgCrypt-compatible AES-CBC encryption for callback mode.
Implements the same wire format as Tencent's official ``WXBizMsgCrypt``
SDK so that WeCom can verify, encrypt, and decrypt callback payloads.
"""
from __future__ import annotations
import base64
import hashlib
import os
import secrets
import socket
import struct
from typing import Optional
from xml.etree import ElementTree as ET
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
class WeComCryptoError(Exception):
pass
class SignatureError(WeComCryptoError):
pass
class DecryptError(WeComCryptoError):
pass
class EncryptError(WeComCryptoError):
pass
class PKCS7Encoder:
block_size = 32
@classmethod
def encode(cls, text: bytes) -> bytes:
amount_to_pad = cls.block_size - (len(text) % cls.block_size)
if amount_to_pad == 0:
amount_to_pad = cls.block_size
pad = bytes([amount_to_pad]) * amount_to_pad
return text + pad
@classmethod
def decode(cls, decrypted: bytes) -> bytes:
if not decrypted:
raise DecryptError("empty decrypted payload")
pad = decrypted[-1]
if pad < 1 or pad > cls.block_size:
raise DecryptError("invalid PKCS7 padding")
if decrypted[-pad:] != bytes([pad]) * pad:
raise DecryptError("malformed PKCS7 padding")
return decrypted[:-pad]
def _sha1_signature(token: str, timestamp: str, nonce: str, encrypt: str) -> str:
parts = sorted([token, timestamp, nonce, encrypt])
return hashlib.sha1("".join(parts).encode("utf-8")).hexdigest()
class WXBizMsgCrypt:
"""Minimal WeCom callback crypto helper compatible with BizMsgCrypt semantics."""
def __init__(self, token: str, encoding_aes_key: str, receive_id: str):
if not token:
raise ValueError("token is required")
if not encoding_aes_key:
raise ValueError("encoding_aes_key is required")
if len(encoding_aes_key) != 43:
raise ValueError("encoding_aes_key must be 43 chars")
if not receive_id:
raise ValueError("receive_id is required")
self.token = token
self.receive_id = receive_id
self.key = base64.b64decode(encoding_aes_key + "=")
self.iv = self.key[:16]
def verify_url(self, msg_signature: str, timestamp: str, nonce: str, echostr: str) -> str:
plain = self.decrypt(msg_signature, timestamp, nonce, echostr)
return plain.decode("utf-8")
def decrypt(self, msg_signature: str, timestamp: str, nonce: str, encrypt: str) -> bytes:
expected = _sha1_signature(self.token, timestamp, nonce, encrypt)
if expected != msg_signature:
raise SignatureError("signature mismatch")
try:
cipher_text = base64.b64decode(encrypt)
except Exception as exc:
raise DecryptError(f"invalid base64 payload: {exc}") from exc
try:
cipher = Cipher(algorithms.AES(self.key), modes.CBC(self.iv), backend=default_backend())
decryptor = cipher.decryptor()
padded = decryptor.update(cipher_text) + decryptor.finalize()
plain = PKCS7Encoder.decode(padded)
content = plain[16:] # skip 16-byte random prefix
xml_length = socket.ntohl(struct.unpack("I", content[:4])[0])
xml_content = content[4:4 + xml_length]
receive_id = content[4 + xml_length:].decode("utf-8")
except WeComCryptoError:
raise
except Exception as exc:
raise DecryptError(f"decrypt failed: {exc}") from exc
if receive_id != self.receive_id:
raise DecryptError("receive_id mismatch")
return xml_content
def encrypt(self, plaintext: str, nonce: Optional[str] = None, timestamp: Optional[str] = None) -> str:
nonce = nonce or self._random_nonce()
timestamp = timestamp or str(int(__import__("time").time()))
encrypt = self._encrypt_bytes(plaintext.encode("utf-8"))
signature = _sha1_signature(self.token, timestamp, nonce, encrypt)
root = ET.Element("xml")
ET.SubElement(root, "Encrypt").text = encrypt
ET.SubElement(root, "MsgSignature").text = signature
ET.SubElement(root, "TimeStamp").text = timestamp
ET.SubElement(root, "Nonce").text = nonce
return ET.tostring(root, encoding="unicode")
def _encrypt_bytes(self, raw: bytes) -> str:
try:
random_prefix = os.urandom(16)
msg_len = struct.pack("I", socket.htonl(len(raw)))
payload = random_prefix + msg_len + raw + self.receive_id.encode("utf-8")
padded = PKCS7Encoder.encode(payload)
cipher = Cipher(algorithms.AES(self.key), modes.CBC(self.iv), backend=default_backend())
encryptor = cipher.encryptor()
encrypted = encryptor.update(padded) + encryptor.finalize()
return base64.b64encode(encrypted).decode("utf-8")
except Exception as exc:
raise EncryptError(f"encrypt failed: {exc}") from exc
@staticmethod
def _random_nonce(length: int = 10) -> str:
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
return "".join(secrets.choice(alphabet) for _ in range(length))

View file

@ -734,6 +734,42 @@ def _split_delivery_units_for_weixin(content: str) -> List[str]:
return [unit for unit in units if unit]
def _looks_like_chatty_line_for_weixin(line: str) -> bool:
"""Return True when a line looks like a standalone chat utterance."""
stripped = line.strip()
if not stripped:
return False
if len(stripped) > 48:
return False
if line.startswith((" ", "\t")):
return False
if stripped.startswith((">", "-", "*", "")):
return False
if re.match(r"^\*\*[^*]+\*\*$", stripped):
return False
if re.match(r"^\d+\.\s", stripped):
return False
return True
def _looks_like_heading_line_for_weixin(line: str) -> bool:
"""Return True when a short line behaves like a plain-text heading."""
stripped = line.strip()
if not stripped:
return False
return len(stripped) <= 24 and stripped.endswith((":", ""))
def _should_split_short_chat_block_for_weixin(block: str) -> bool:
"""Split only chat-like multiline blocks into separate bubbles."""
lines = [line for line in block.splitlines() if line.strip()]
if not 2 <= len(lines) <= 6:
return False
if _looks_like_heading_line_for_weixin(lines[0]):
return False
return all(_looks_like_chatty_line_for_weixin(line) for line in lines)
def _pack_markdown_blocks_for_weixin(content: str, max_length: int) -> List[str]:
if len(content) <= max_length:
return [content]
@ -787,9 +823,15 @@ def _split_text_for_weixin_delivery(
chunks.extend(_pack_markdown_blocks_for_weixin(unit, max_length))
return chunks or [content]
# Compact (default): single message when under the limit.
# Compact (default): single message when under the limit — unless the
# content looks like a short chatty exchange, in which case split into
# separate bubbles for a more natural chat feel.
if len(content) <= max_length:
return [content]
return (
_split_delivery_units_for_weixin(content)
if _should_split_short_chat_block_for_weixin(content)
else [content]
)
return _pack_markdown_blocks_for_weixin(content, max_length) or [content]

View file

@ -76,7 +76,7 @@ sys.path.insert(0, str(Path(__file__).parent.parent))
# Resolve Hermes home directory (respects HERMES_HOME override)
from hermes_constants import get_hermes_home
from utils import atomic_yaml_write
from utils import atomic_yaml_write, is_truthy_value
_hermes_home = get_hermes_home()
# Load environment variables from ~/.hermes/.env first.
@ -206,6 +206,15 @@ if _config_path.exists():
except Exception:
pass # Non-fatal; gateway can still run with .env values
# Apply IPv4 preference if configured (before any HTTP clients are created).
try:
from hermes_constants import apply_ipv4_preference
_network_cfg = (_cfg if '_cfg' in dir() else {}).get("network", {})
if isinstance(_network_cfg, dict) and _network_cfg.get("force_ipv4"):
apply_ipv4_preference(force=True)
except Exception:
pass
# Validate config structure early — log warnings so gateway operators see problems
try:
from hermes_cli.config import print_config_warnings
@ -867,13 +876,47 @@ class GatewayRunner:
"api_mode": override.get("api_mode"),
}
if override_runtime.get("api_key"):
logger.debug(
"Session model override (fast): session=%s config_model=%s -> override_model=%s provider=%s",
(resolved_session_key or "")[:30], model, override_model,
override_runtime.get("provider"),
)
return override_model, override_runtime
# Override exists but has no api_key — fall through to env-based
# resolution and apply model/provider from the override on top.
logger.debug(
"Session model override (no api_key, fallback): session=%s config_model=%s override_model=%s",
(resolved_session_key or "")[:30], model, override_model,
)
else:
logger.debug(
"No session model override: session=%s config_model=%s override_keys=%s",
(resolved_session_key or "")[:30], model,
list(self._session_model_overrides.keys())[:5] if self._session_model_overrides else "[]",
)
runtime_kwargs = _resolve_runtime_agent_kwargs()
if override and resolved_session_key:
model, runtime_kwargs = self._apply_session_model_override(
resolved_session_key, model, runtime_kwargs
)
# When the config has no model.default but a provider was resolved
# (e.g. user ran `hermes auth add openai-codex` without `hermes model`),
# fall back to the provider's first catalog model so the API call
# doesn't fail with "model must be a non-empty string".
if not model and runtime_kwargs.get("provider"):
try:
from hermes_cli.models import get_default_model_for_provider
model = get_default_model_for_provider(runtime_kwargs["provider"])
if model:
logger.info(
"No model configured — defaulting to %s for provider %s",
model, runtime_kwargs["provider"],
)
except Exception:
pass
return model, runtime_kwargs
def _resolve_turn_agent_config(self, user_message: str, model: str, runtime_kwargs: dict) -> dict:
@ -916,6 +959,12 @@ class GatewayRunner:
adapter.fatal_error_code or "unknown",
adapter.fatal_error_message or "unknown error",
)
self._update_platform_runtime_status(
adapter.platform.value,
platform_state="retrying" if adapter.fatal_error_retryable else "fatal",
error_code=adapter.fatal_error_code,
error_message=adapter.fatal_error_message,
)
existing = self.adapters.get(adapter.platform)
if existing is adapter:
@ -993,6 +1042,25 @@ class GatewayRunner:
)
except Exception:
pass
def _update_platform_runtime_status(
self,
platform: str,
*,
platform_state: Optional[str] = None,
error_code: Optional[str] = None,
error_message: Optional[str] = None,
) -> None:
try:
from gateway.status import write_runtime_status
write_runtime_status(
platform=platform,
platform_state=platform_state,
error_code=error_code,
error_message=error_message,
)
except Exception:
pass
@staticmethod
def _load_prefill_messages() -> List[Dict[str, Any]]:
@ -1426,6 +1494,7 @@ class GatewayRunner:
"MATRIX_ALLOWED_USERS", "DINGTALK_ALLOWED_USERS",
"FEISHU_ALLOWED_USERS",
"WECOM_ALLOWED_USERS",
"WECOM_CALLBACK_ALLOWED_USERS",
"WEIXIN_ALLOWED_USERS",
"BLUEBUBBLES_ALLOWED_USERS",
"GATEWAY_ALLOWED_USERS")
@ -1439,6 +1508,7 @@ class GatewayRunner:
"MATRIX_ALLOW_ALL_USERS", "DINGTALK_ALLOW_ALL_USERS",
"FEISHU_ALLOW_ALL_USERS",
"WECOM_ALLOW_ALL_USERS",
"WECOM_CALLBACK_ALLOW_ALL_USERS",
"WEIXIN_ALLOW_ALL_USERS",
"BLUEBUBBLES_ALLOW_ALL_USERS")
)
@ -1465,12 +1535,25 @@ class GatewayRunner:
# This prevents stuck sessions from being blindly resumed on restart,
# which can create an unrecoverable loop (#7536). Suspended sessions
# auto-reset on the next incoming message, giving the user a clean start.
try:
suspended = self.session_store.suspend_recently_active()
if suspended:
logger.info("Suspended %d in-flight session(s) from previous run", suspended)
except Exception as e:
logger.warning("Session suspension on startup failed: %s", e)
#
# SKIP suspension after a clean (graceful) shutdown — the previous
# process already drained active agents, so sessions aren't stuck.
# This prevents unwanted auto-resets after `hermes update`,
# `hermes gateway restart`, or `/restart`.
_clean_marker = _hermes_home / ".clean_shutdown"
if _clean_marker.exists():
logger.info("Previous gateway exited cleanly — skipping session suspension")
try:
_clean_marker.unlink()
except Exception:
pass
else:
try:
suspended = self.session_store.suspend_recently_active()
if suspended:
logger.info("Suspended %d in-flight session(s) from previous run", suspended)
except Exception as e:
logger.warning("Session suspension on startup failed: %s", e)
connected_count = 0
enabled_platform_count = 0
@ -1496,16 +1579,34 @@ class GatewayRunner:
# Try to connect
logger.info("Connecting to %s...", platform.value)
self._update_platform_runtime_status(
platform.value,
platform_state="connecting",
error_code=None,
error_message=None,
)
try:
success = await adapter.connect()
if success:
self.adapters[platform] = adapter
self._sync_voice_mode_state_to_adapter(adapter)
connected_count += 1
self._update_platform_runtime_status(
platform.value,
platform_state="connected",
error_code=None,
error_message=None,
)
logger.info("%s connected", platform.value)
else:
logger.warning("%s failed to connect", platform.value)
if adapter.has_fatal_error:
self._update_platform_runtime_status(
platform.value,
platform_state="retrying" if adapter.fatal_error_retryable else "fatal",
error_code=adapter.fatal_error_code,
error_message=adapter.fatal_error_message,
)
target = (
startup_retryable_errors
if adapter.fatal_error_retryable
@ -1522,6 +1623,12 @@ class GatewayRunner:
"next_retry": time.monotonic() + 30,
}
else:
self._update_platform_runtime_status(
platform.value,
platform_state="retrying",
error_code=None,
error_message="failed to connect",
)
startup_retryable_errors.append(
f"{platform.value}: failed to connect"
)
@ -1533,6 +1640,12 @@ class GatewayRunner:
}
except Exception as e:
logger.error("%s error: %s", platform.value, e)
self._update_platform_runtime_status(
platform.value,
platform_state="retrying",
error_code=None,
error_message=str(e),
)
startup_retryable_errors.append(f"{platform.value}: {e}")
# Unexpected exceptions are typically transient — queue for retry
self._failed_platforms[platform] = {
@ -1811,6 +1924,12 @@ class GatewayRunner:
self._sync_voice_mode_state_to_adapter(adapter)
self.delivery_router.adapters = self.adapters
del self._failed_platforms[platform]
self._update_platform_runtime_status(
platform.value,
platform_state="connected",
error_code=None,
error_message=None,
)
logger.info("%s reconnected successfully", platform.value)
# Rebuild channel directory with the new adapter
@ -1822,12 +1941,24 @@ class GatewayRunner:
else:
# Check if the failure is non-retryable
if adapter.has_fatal_error and not adapter.fatal_error_retryable:
self._update_platform_runtime_status(
platform.value,
platform_state="fatal",
error_code=adapter.fatal_error_code,
error_message=adapter.fatal_error_message,
)
logger.warning(
"Reconnect %s: non-retryable error (%s), removing from retry queue",
platform.value, adapter.fatal_error_message,
)
del self._failed_platforms[platform]
else:
self._update_platform_runtime_status(
platform.value,
platform_state="retrying",
error_code=adapter.fatal_error_code,
error_message=adapter.fatal_error_message or "failed to reconnect",
)
backoff = min(30 * (2 ** (attempt - 1)), _BACKOFF_CAP)
info["attempts"] = attempt
info["next_retry"] = time.monotonic() + backoff
@ -1836,6 +1967,12 @@ class GatewayRunner:
platform.value, backoff,
)
except Exception as e:
self._update_platform_runtime_status(
platform.value,
platform_state="retrying",
error_code=None,
error_message=str(e),
)
backoff = min(30 * (2 ** (attempt - 1)), _BACKOFF_CAP)
info["attempts"] = attempt
info["next_retry"] = time.monotonic() + backoff
@ -1942,6 +2079,15 @@ class GatewayRunner:
from gateway.status import remove_pid_file
remove_pid_file()
# Write a clean-shutdown marker so the next startup knows this
# wasn't a crash. suspend_recently_active() only needs to run
# after unexpected exits — graceful shutdowns already drain
# active agents, so there's no stuck-session risk.
try:
(_hermes_home / ".clean_shutdown").touch()
except Exception:
pass
if self._restart_requested and self._restart_via_service:
self._exit_code = GATEWAY_SERVICE_RESTART_EXIT_CODE
self._exit_reason = self._exit_reason or "Gateway restart requested"
@ -2043,6 +2189,16 @@ class GatewayRunner:
return None
return FeishuAdapter(config)
elif platform == Platform.WECOM_CALLBACK:
from gateway.platforms.wecom_callback import (
WecomCallbackAdapter,
check_wecom_callback_requirements,
)
if not check_wecom_callback_requirements():
logger.warning("WeComCallback: aiohttp/httpx not installed")
return None
return WecomCallbackAdapter(config)
elif platform == Platform.WECOM:
from gateway.platforms.wecom import WeComAdapter, check_wecom_requirements
if not check_wecom_requirements():
@ -2132,6 +2288,7 @@ class GatewayRunner:
Platform.DINGTALK: "DINGTALK_ALLOWED_USERS",
Platform.FEISHU: "FEISHU_ALLOWED_USERS",
Platform.WECOM: "WECOM_ALLOWED_USERS",
Platform.WECOM_CALLBACK: "WECOM_CALLBACK_ALLOWED_USERS",
Platform.WEIXIN: "WEIXIN_ALLOWED_USERS",
Platform.BLUEBUBBLES: "BLUEBUBBLES_ALLOWED_USERS",
}
@ -2148,6 +2305,7 @@ class GatewayRunner:
Platform.DINGTALK: "DINGTALK_ALLOW_ALL_USERS",
Platform.FEISHU: "FEISHU_ALLOW_ALL_USERS",
Platform.WECOM: "WECOM_ALLOW_ALL_USERS",
Platform.WECOM_CALLBACK: "WECOM_CALLBACK_ALLOW_ALL_USERS",
Platform.WEIXIN: "WEIXIN_ALLOW_ALL_USERS",
Platform.BLUEBUBBLES: "BLUEBUBBLES_ALLOW_ALL_USERS",
}
@ -3454,8 +3612,18 @@ class GatewayRunner:
if agent_result.get("session_id") and agent_result["session_id"] != session_entry.session_id:
session_entry.session_id = agent_result["session_id"]
# Prepend reasoning/thinking if display is enabled
if getattr(self, "_show_reasoning", False) and response:
# Prepend reasoning/thinking if display is enabled (per-platform)
try:
from gateway.display_config import resolve_display_setting as _rds
_show_reasoning_effective = _rds(
_load_gateway_config(),
_platform_config_key(source.platform),
"show_reasoning",
getattr(self, "_show_reasoning", False),
)
except Exception:
_show_reasoning_effective = getattr(self, "_show_reasoning", False)
if _show_reasoning_effective and response:
last_reasoning = agent_result.get("last_reasoning")
if last_reasoning:
# Collapse long reasoning to keep messages readable
@ -3860,9 +4028,16 @@ class GatewayRunner:
except Exception:
pass
# Append a random tip to the reset message
try:
from hermes_cli.tips import get_random_tip
_tip_line = f"\n✦ Tip: {get_random_tip()}"
except Exception:
_tip_line = ""
if session_info:
return f"{header}\n\n{session_info}"
return header
return f"{header}\n\n{session_info}{_tip_line}"
return f"{header}{_tip_line}"
async def _handle_profile_command(self, event: MessageEvent) -> str:
"""Handle /profile — show active profile name and home directory."""
@ -4282,6 +4457,11 @@ class GatewayRunner:
"api_mode": result.api_mode,
}
# Evict cached agent so the next turn creates a fresh
# agent from the override rather than relying on the
# stale cache signature to trigger a rebuild.
_self._evict_cached_agent(_session_key)
# Build confirmation text
plabel = result.provider_label or result.target_provider
lines = [f"Model switched to `{result.new_model}`"]
@ -4395,6 +4575,10 @@ class GatewayRunner:
"api_mode": result.api_mode,
}
# Evict cached agent so the next turn creates a fresh agent from the
# override rather than relying on cache signature mismatch detection.
self._evict_cached_agent(session_key)
# Persist to config if --global
if persist_global:
try:
@ -5531,16 +5715,20 @@ class GatewayRunner:
"_Usage:_ `/reasoning <none|minimal|low|medium|high|xhigh|show|hide>`"
)
# Display toggle
# Display toggle (per-platform)
platform_key = _platform_config_key(event.source.platform)
if args in ("show", "on"):
self._show_reasoning = True
_save_config_key("display.show_reasoning", True)
return "🧠 ✓ Reasoning display: **ON**\nModel thinking will be shown before each response."
_save_config_key(f"display.platforms.{platform_key}.show_reasoning", True)
return (
"🧠 ✓ Reasoning display: **ON**\n"
f"Model thinking will be shown before each response on **{platform_key}**."
)
if args in ("hide", "off"):
self._show_reasoning = False
_save_config_key("display.show_reasoning", False)
return "🧠 ✓ Reasoning display: **OFF**"
_save_config_key(f"display.platforms.{platform_key}.show_reasoning", False)
return f"🧠 ✓ Reasoning display: **OFF** for **{platform_key}**"
# Effort level change
effort = args.strip()
@ -5643,11 +5831,14 @@ class GatewayRunner:
Gated by ``display.tool_progress_command`` in config.yaml (default off).
When enabled, cycles the tool progress mode through off new all
verbose off, same as the CLI.
verbose off for the *current platform*. The setting is saved to
``display.platforms.<platform>.tool_progress`` so each channel can
have its own verbosity level independently.
"""
import yaml
config_path = _hermes_home / "config.yaml"
platform_key = _platform_config_key(event.source.platform)
# --- check config gate ------------------------------------------------
try:
@ -5666,7 +5857,7 @@ class GatewayRunner:
"display:\n tool_progress_command: true\n```"
)
# --- cycle mode -------------------------------------------------------
# --- cycle mode (per-platform) ----------------------------------------
cycle = ["off", "new", "all", "verbose"]
descriptions = {
"off": "⚙️ Tool progress: **OFF** — no tool activity shown.",
@ -5675,32 +5866,40 @@ class GatewayRunner:
"verbose": "⚙️ Tool progress: **VERBOSE** — every tool call with full arguments.",
}
raw_progress = user_config.get("display", {}).get("tool_progress", "all")
# YAML 1.1 parses bare "off" as boolean False — normalise back
if raw_progress is False:
current = "off"
elif raw_progress is True:
current = "all"
else:
current = str(raw_progress).lower()
# Read current effective mode for this platform via the resolver
from gateway.display_config import resolve_display_setting
current = resolve_display_setting(user_config, platform_key, "tool_progress", "all")
if current not in cycle:
current = "all"
idx = (cycle.index(current) + 1) % len(cycle)
new_mode = cycle[idx]
# Save to config.yaml
# Save to display.platforms.<platform>.tool_progress
try:
if "display" not in user_config or not isinstance(user_config.get("display"), dict):
user_config["display"] = {}
user_config["display"]["tool_progress"] = new_mode
display = user_config["display"]
if "platforms" not in display or not isinstance(display.get("platforms"), dict):
display["platforms"] = {}
if platform_key not in display["platforms"] or not isinstance(display["platforms"].get(platform_key), dict):
display["platforms"][platform_key] = {}
display["platforms"][platform_key]["tool_progress"] = new_mode
atomic_yaml_write(config_path, user_config)
return f"{descriptions[new_mode]}\n_(saved to config — takes effect on next message)_"
return (
f"{descriptions[new_mode]}\n"
f"_(saved for **{platform_key}** — takes effect on next message)_"
)
except Exception as e:
logger.warning("Failed to save tool_progress mode: %s", e)
return f"{descriptions[new_mode]}\n_(could not save to config: {e})_"
async def _handle_compress_command(self, event: MessageEvent) -> str:
"""Handle /compress command -- manually compress conversation context."""
"""Handle /compress command -- manually compress conversation context.
Accepts an optional focus topic: ``/compress <focus>`` guides the
summariser to preserve information related to *focus* while being
more aggressive about discarding everything else.
"""
source = event.source
session_entry = self.session_store.get_or_create_session(source)
history = self.session_store.load_transcript(session_entry.session_id)
@ -5708,6 +5907,9 @@ class GatewayRunner:
if not history or len(history) < 4:
return "Not enough conversation to compress (need at least 4 messages)."
# Extract optional focus topic from command args
focus_topic = (event.get_command_args() or "").strip() or None
try:
from run_agent import AIAgent
from agent.manual_compression_feedback import summarize_manual_compression
@ -5749,7 +5951,7 @@ class GatewayRunner:
loop = asyncio.get_event_loop()
compressed, _ = await loop.run_in_executor(
None,
lambda: tmp_agent._compress_context(msgs, "", approx_tokens=approx_tokens)
lambda: tmp_agent._compress_context(msgs, "", approx_tokens=approx_tokens, focus_topic=focus_topic)
)
# _compress_context already calls end_session() on the old session
@ -5773,7 +5975,10 @@ class GatewayRunner:
approx_tokens,
new_tokens,
)
lines = [f"🗜️ {summary['headline']}", summary["token_line"]]
lines = [f"🗜️ {summary['headline']}"]
if focus_topic:
lines.append(f"Focus: \"{focus_topic}\"")
lines.append(summary["token_line"])
if summary["note"]:
lines.append(summary["note"])
return "\n".join(lines)
@ -6315,7 +6520,7 @@ class GatewayRunner:
Platform.TELEGRAM, Platform.DISCORD, Platform.SLACK, Platform.WHATSAPP,
Platform.SIGNAL, Platform.MATTERMOST, Platform.MATRIX,
Platform.HOMEASSISTANT, Platform.EMAIL, Platform.SMS, Platform.DINGTALK,
Platform.FEISHU, Platform.WECOM, Platform.WEIXIN, Platform.BLUEBUBBLES, Platform.LOCAL,
Platform.FEISHU, Platform.WECOM, Platform.WECOM_CALLBACK, Platform.WEIXIN, Platform.BLUEBUBBLES, Platform.LOCAL,
})
async def _handle_update_command(self, event: MessageEvent) -> str:
@ -6560,8 +6765,12 @@ class GatewayRunner:
if buffer.strip() and (loop.time() - last_stream_time) >= stream_interval:
await _flush_buffer()
# Check for prompts
if prompt_path.exists() and session_key:
# Check for prompts — only forward if we haven't already sent
# one that's still awaiting a response. Without this guard the
# watcher would re-read the same .update_prompt.json every poll
# cycle and spam the user with duplicate prompt messages.
if (prompt_path.exists() and session_key
and not self._update_prompt_pending.get(session_key)):
try:
prompt_data = json.loads(prompt_path.read_text())
prompt_text = prompt_data.get("prompt", "")
@ -6593,6 +6802,11 @@ class GatewayRunner:
f"or type your answer directly."
)
self._update_prompt_pending[session_key] = True
# Remove the prompt file so it isn't re-read on the
# next poll cycle. The update process only needs
# .update_response to continue — it doesn't re-check
# .update_prompt.json while waiting.
prompt_path.unlink(missing_ok=True)
logger.info("Forwarded update prompt to %s: %s", session_key, prompt_text[:80])
except (json.JSONDecodeError, OSError) as e:
logger.debug("Failed to read update prompt: %s", e)
@ -6720,6 +6934,7 @@ class GatewayRunner:
thread_id=str(context.source.thread_id) if context.source.thread_id else "",
user_id=str(context.source.user_id) if context.source.user_id else "",
user_name=str(context.source.user_name) if context.source.user_name else "",
session_key=context.session_key,
)
def _clear_session_env(self, tokens: list) -> None:
@ -6965,7 +7180,9 @@ class GatewayRunner:
if session.exited:
# --- Agent-triggered completion: inject synthetic message ---
if agent_notify:
# Skip if the agent already consumed the result via wait/poll/log
from tools.process_registry import process_registry as _pr_check
if agent_notify and not _pr_check.is_completion_consumed(session_id):
from tools.ansi_strip import strip_ansi
_out = strip_ansi(session.output_buffer[-2000:]) if session.output_buffer else ""
synth_text = (
@ -7161,31 +7378,27 @@ class GatewayRunner:
from hermes_cli.tools_config import _get_platform_tools
enabled_toolsets = sorted(_get_platform_tools(user_config, platform_key))
display_config = user_config.get("display", {})
if not isinstance(display_config, dict):
display_config = {}
# Per-platform display settings — resolve via display_config module
# which checks display.platforms.<platform>.<key> first, then
# display.<key> global, then built-in platform defaults.
from gateway.display_config import resolve_display_setting
# Apply tool preview length config (0 = no limit)
try:
from agent.display import set_tool_preview_max_len
_tpl = user_config.get("display", {}).get("tool_preview_length", 0)
_tpl = resolve_display_setting(user_config, platform_key, "tool_preview_length", 0)
set_tool_preview_max_len(int(_tpl) if _tpl else 0)
except Exception:
pass
# Tool progress mode from config.yaml: "all", "new", "verbose", "off"
# Falls back to env vars for backward compatibility.
# YAML 1.1 parses bare `off` as boolean False — normalise before
# the `or` chain so it doesn't silently fall through to "all".
#
# Per-platform overrides (display.tool_progress_overrides) take
# priority over the global setting — e.g. Signal users can set
# tool_progress to "off" while keeping Telegram on "all".
_display_cfg = user_config.get("display", {})
_overrides = _display_cfg.get("tool_progress_overrides", {})
_raw_tp = _overrides.get(platform_key)
if _raw_tp is None:
_raw_tp = _display_cfg.get("tool_progress")
if _raw_tp is False:
_raw_tp = "off"
# Tool progress mode — resolved per-platform with env var fallback
_resolved_tp = resolve_display_setting(user_config, platform_key, "tool_progress")
progress_mode = (
_raw_tp
_resolved_tp
or os.getenv("HERMES_TOOL_PROGRESS_MODE")
or "all"
)
@ -7193,6 +7406,16 @@ class GatewayRunner:
# so each progress line would be sent as a separate message.
from gateway.config import Platform
tool_progress_enabled = progress_mode != "off" and source.platform != Platform.WEBHOOK
# Natural assistant status messages are intentionally independent from
# tool progress and token streaming. Users can keep tool_progress quiet
# in chat platforms while opting into concise mid-turn updates.
interim_assistant_messages_enabled = (
source.platform != Platform.WEBHOOK
and is_truthy_value(
display_config.get("interim_assistant_messages"),
default=True,
)
)
# Queue for progress messages (thread-safe)
progress_queue = queue.Queue() if tool_progress_enabled else None
@ -7462,8 +7685,8 @@ class GatewayRunner:
# `_resolve_turn_agent_config(message, …)`.
nonlocal message
# Pass session_key to process registry via env var so background
# processes can be mapped back to this gateway session
# session_key is now set via contextvars in _set_session_env()
# (concurrency-safe). Keep os.environ as fallback for CLI/cron.
os.environ["HERMES_SESSION_KEY"] = session_key or ""
# Read from env var or use default (same as CLI)
@ -7493,6 +7716,10 @@ class GatewayRunner:
session_key=session_key,
user_config=user_config,
)
logger.debug(
"run_agent resolved: model=%s provider=%s session=%s",
model, runtime_kwargs.get("provider"), (session_key or "")[:30],
)
except Exception as exc:
return {
"final_response": f"⚠️ Provider authentication failed: {exc}",
@ -7505,7 +7732,7 @@ class GatewayRunner:
reasoning_config = self._load_reasoning_config()
self._reasoning_config = reasoning_config
self._service_tier = self._load_service_tier()
# Set up streaming consumer if enabled
# Set up stream consumer for token streaming or interim commentary.
_stream_consumer = None
_stream_delta_cb = None
_scfg = getattr(getattr(self, 'config', None), 'streaming', None)
@ -7513,7 +7740,22 @@ class GatewayRunner:
from gateway.config import StreamingConfig
_scfg = StreamingConfig()
if _scfg.enabled and _scfg.transport != "off":
# Per-platform streaming gate: display.platforms.<plat>.streaming
# can disable streaming for specific platforms even when the global
# streaming config is enabled.
_plat_streaming = resolve_display_setting(
user_config, platform_key, "streaming"
)
# None = no per-platform override → follow global config
_streaming_enabled = (
_scfg.enabled and _scfg.transport != "off"
if _plat_streaming is None
else bool(_plat_streaming)
)
_want_stream_deltas = _streaming_enabled
_want_interim_messages = interim_assistant_messages_enabled
_want_interim_consumer = _want_interim_messages
if _want_stream_deltas or _want_interim_consumer:
try:
from gateway.stream_consumer import GatewayStreamConsumer, StreamConsumerConfig
_adapter = self.adapters.get(source.platform)
@ -7529,11 +7771,33 @@ class GatewayRunner:
config=_consumer_cfg,
metadata={"thread_id": _progress_thread_id} if _progress_thread_id else None,
)
_stream_delta_cb = _stream_consumer.on_delta
if _want_stream_deltas:
_stream_delta_cb = _stream_consumer.on_delta
stream_consumer_holder[0] = _stream_consumer
except Exception as _sc_err:
logger.debug("Could not set up stream consumer: %s", _sc_err)
def _interim_assistant_cb(text: str, *, already_streamed: bool = False) -> None:
if _stream_consumer is not None:
if already_streamed:
_stream_consumer.on_segment_break()
else:
_stream_consumer.on_commentary(text)
return
if already_streamed or not _status_adapter or not str(text or "").strip():
return
try:
asyncio.run_coroutine_threadsafe(
_status_adapter.send(
_status_chat_id,
text,
metadata=_status_thread_metadata,
),
_loop_for_step,
)
except Exception as _e:
logger.debug("interim_assistant_callback error: %s", _e)
turn_route = self._resolve_turn_agent_config(message, model, runtime_kwargs)
# Check agent cache — reuse the AIAgent from the previous message
@ -7591,6 +7855,7 @@ class GatewayRunner:
agent.tool_progress_callback = progress_callback if tool_progress_enabled else None
agent.step_callback = _step_callback_sync if _hooks_ref.loaded_hooks else None
agent.stream_delta_callback = _stream_delta_cb
agent.interim_assistant_callback = _interim_assistant_cb if _want_interim_messages else None
agent.status_callback = _status_callback_sync
agent.reasoning_config = reasoning_config
agent.service_tier = self._service_tier
@ -7894,6 +8159,7 @@ class GatewayRunner:
"output_tokens": _output_toks,
"model": _resolved_model,
"session_id": effective_session_id,
"response_previewed": result.get("response_previewed", False),
}
# Start progress message sender if enabled
@ -7928,26 +8194,51 @@ class GatewayRunner:
tracking_task = asyncio.create_task(track_agent())
# Monitor for interrupts from the adapter (new messages arriving)
# Monitor for interrupts from the adapter (new messages arriving).
# This is the PRIMARY interrupt path for regular text messages —
# Level 1 (base.py) catches them before _handle_message() is reached,
# so the Level 2 running_agent.interrupt() path never fires.
# The inactivity poll loop below has a BACKUP check in case this
# task dies (no error handling = silent death = lost interrupts).
_interrupt_detected = asyncio.Event() # shared with backup check
async def monitor_for_interrupt():
adapter = self.adapters.get(source.platform)
if not adapter or not session_key:
if not session_key:
return
while True:
await asyncio.sleep(0.2) # Check every 200ms
# Check if adapter has a pending interrupt for this session.
# Must use session_key (build_session_key output) — NOT
# source.chat_id — because the adapter stores interrupt events
# under the full session key.
if hasattr(adapter, 'has_pending_interrupt') and adapter.has_pending_interrupt(session_key):
agent = agent_holder[0]
if agent:
pending_event = adapter.get_pending_message(session_key)
pending_text = pending_event.text if pending_event else None
logger.debug("Interrupt detected from adapter, signaling agent...")
agent.interrupt(pending_text)
break
try:
# Re-resolve adapter each iteration so reconnects don't
# leave us holding a stale reference.
_adapter = self.adapters.get(source.platform)
if not _adapter:
continue
# Check if adapter has a pending interrupt for this session.
# Must use session_key (build_session_key output) — NOT
# source.chat_id — because the adapter stores interrupt events
# under the full session key.
if hasattr(_adapter, 'has_pending_interrupt') and _adapter.has_pending_interrupt(session_key):
agent = agent_holder[0]
if agent:
# Peek at the pending message text WITHOUT consuming it.
# The message must remain in _pending_messages so the
# post-run dequeue at _dequeue_pending_event() can
# retrieve the full MessageEvent (with media metadata).
# If we pop here, a race exists: the agent may finish
# before checking _interrupt_requested, and the message
# is lost — neither the interrupt path nor the dequeue
# path finds it.
_peek_event = _adapter._pending_messages.get(session_key)
pending_text = _peek_event.text if _peek_event else None
logger.debug("Interrupt detected from adapter, signaling agent...")
agent.interrupt(pending_text)
_interrupt_detected.set()
break
except asyncio.CancelledError:
raise
except Exception as _mon_err:
logger.debug("monitor_for_interrupt error (will retry): %s", _mon_err)
interrupt_monitor = asyncio.create_task(monitor_for_interrupt())
@ -8012,8 +8303,34 @@ class GatewayRunner:
_POLL_INTERVAL = 5.0
if _agent_timeout is None:
# Unlimited — just await the result.
response = await _executor_task
# Unlimited — still poll periodically for backup interrupt
# detection in case monitor_for_interrupt() silently died.
response = None
while True:
done, _ = await asyncio.wait(
{_executor_task}, timeout=_POLL_INTERVAL
)
if done:
response = _executor_task.result()
break
# Backup interrupt check: if the monitor task died or
# missed the interrupt, catch it here.
if not _interrupt_detected.is_set() and session_key:
_backup_adapter = self.adapters.get(source.platform)
_backup_agent = agent_holder[0]
if (_backup_adapter and _backup_agent
and hasattr(_backup_adapter, 'has_pending_interrupt')
and _backup_adapter.has_pending_interrupt(session_key)):
_bp_event = _backup_adapter._pending_messages.get(session_key)
_bp_text = _bp_event.text if _bp_event else None
logger.info(
"Backup interrupt detected for session %s "
"(monitor task state: %s)",
session_key[:20],
"done" if interrupt_monitor.done() else "running",
)
_backup_agent.interrupt(_bp_text)
_interrupt_detected.set()
else:
# Poll loop: check the agent's built-in activity tracker
# (updated by _touch_activity() on every tool call, API
@ -8057,6 +8374,23 @@ class GatewayRunner:
if _idle_secs >= _agent_timeout:
_inactivity_timeout = True
break
# Backup interrupt check (same as unlimited path).
if not _interrupt_detected.is_set() and session_key:
_backup_adapter = self.adapters.get(source.platform)
_backup_agent = agent_holder[0]
if (_backup_adapter and _backup_agent
and hasattr(_backup_adapter, 'has_pending_interrupt')
and _backup_adapter.has_pending_interrupt(session_key)):
_bp_event = _backup_adapter._pending_messages.get(session_key)
_bp_text = _bp_event.text if _bp_event else None
logger.info(
"Backup interrupt detected for session %s "
"(monitor task state: %s)",
session_key[:20],
"done" if interrupt_monitor.done() else "running",
)
_backup_agent.interrupt(_bp_text)
_interrupt_detected.set()
if _inactivity_timeout:
# Build a diagnostic summary from the agent's activity tracker.
@ -8216,12 +8550,36 @@ class GatewayRunner:
# response before processing the queued follow-up.
# Skip if streaming already delivered it.
_sc = stream_consumer_holder[0]
_already_streamed = _sc and getattr(_sc, "already_sent", False)
if _sc and stream_task:
try:
await asyncio.wait_for(stream_task, timeout=5.0)
except (asyncio.TimeoutError, asyncio.CancelledError):
stream_task.cancel()
try:
await stream_task
except asyncio.CancelledError:
pass
except Exception as e:
logger.debug("Stream consumer wait before queued message failed: %s", e)
_response_previewed = bool(result.get("response_previewed"))
_already_streamed = bool(
_sc
and (
getattr(_sc, "final_response_sent", False)
or (
_response_previewed
and getattr(_sc, "already_sent", False)
)
)
)
first_response = result.get("final_response", "")
if first_response and not _already_streamed:
try:
await adapter.send(source.chat_id, first_response,
metadata={"thread_id": source.thread_id} if source.thread_id else None)
await adapter.send(
source.chat_id,
first_response,
metadata=_status_thread_metadata,
)
except Exception as e:
logger.warning("Failed to send first response before queued message: %s", e)
# else: interrupted — discard the interrupted response ("Operation
@ -8294,8 +8652,15 @@ class GatewayRunner:
# message is new content the user hasn't seen, and it must reach
# them even if streaming had sent earlier partial output.
_sc = stream_consumer_holder[0]
if _sc and _sc.already_sent and isinstance(response, dict):
if not response.get("failed"):
if _sc and isinstance(response, dict) and not response.get("failed"):
_response_previewed = bool(response.get("response_previewed"))
if (
getattr(_sc, "final_response_sent", False)
or (
_response_previewed
and getattr(_sc, "already_sent", False)
)
):
response["already_sent"] = True
return response
@ -8444,23 +8809,11 @@ async def start_gateway(config: Optional[GatewayConfig] = None, replace: bool =
except Exception:
pass
# Centralized logging — agent.log (INFO+) and errors.log (WARNING+).
# Centralized logging — agent.log (INFO+), errors.log (WARNING+),
# and gateway.log (INFO+, gateway-component records only).
# Idempotent, so repeated calls from AIAgent.__init__ won't duplicate.
from hermes_logging import setup_logging
log_dir = setup_logging(hermes_home=_hermes_home, mode="gateway")
# Gateway-specific rotating log — captures all gateway-level messages
# (session management, platform adapters, slash commands, etc.).
from agent.redact import RedactingFormatter
from hermes_logging import _add_rotating_handler
_add_rotating_handler(
logging.getLogger(),
log_dir / 'gateway.log',
level=logging.INFO,
max_bytes=5 * 1024 * 1024,
backup_count=3,
formatter=RedactingFormatter('%(asctime)s %(levelname)s %(name)s: %(message)s'),
)
setup_logging(hermes_home=_hermes_home, mode="gateway")
# Optional stderr handler — level driven by -v/-q flags on the CLI.
# verbosity=None (-q/--quiet): no stderr output
@ -8468,6 +8821,8 @@ async def start_gateway(config: Optional[GatewayConfig] = None, replace: bool =
# verbosity=1 (-v): INFO and above
# verbosity=2+ (-vv/-vvv): DEBUG
if verbosity is not None:
from agent.redact import RedactingFormatter
_stderr_level = {0: logging.WARNING, 1: logging.INFO}.get(verbosity, logging.DEBUG)
_stderr_handler = logging.StreamHandler()
_stderr_handler.setLevel(_stderr_level)

View file

@ -807,9 +807,9 @@ class SessionStore:
to avoid resetting long-idle sessions that are harmless to resume.
Returns the number of sessions that were suspended.
"""
import time as _time
from datetime import timedelta
cutoff = _time.time() - max_age_seconds
cutoff = _now() - timedelta(seconds=max_age_seconds)
count = 0
with self._lock:
self._ensure_loaded_locked()

View file

@ -48,6 +48,7 @@ _SESSION_CHAT_NAME: ContextVar[str] = ContextVar("HERMES_SESSION_CHAT_NAME", def
_SESSION_THREAD_ID: ContextVar[str] = ContextVar("HERMES_SESSION_THREAD_ID", default="")
_SESSION_USER_ID: ContextVar[str] = ContextVar("HERMES_SESSION_USER_ID", default="")
_SESSION_USER_NAME: ContextVar[str] = ContextVar("HERMES_SESSION_USER_NAME", default="")
_SESSION_KEY: ContextVar[str] = ContextVar("HERMES_SESSION_KEY", default="")
_VAR_MAP = {
"HERMES_SESSION_PLATFORM": _SESSION_PLATFORM,
@ -56,6 +57,7 @@ _VAR_MAP = {
"HERMES_SESSION_THREAD_ID": _SESSION_THREAD_ID,
"HERMES_SESSION_USER_ID": _SESSION_USER_ID,
"HERMES_SESSION_USER_NAME": _SESSION_USER_NAME,
"HERMES_SESSION_KEY": _SESSION_KEY,
}
@ -66,6 +68,7 @@ def set_session_vars(
thread_id: str = "",
user_id: str = "",
user_name: str = "",
session_key: str = "",
) -> list:
"""Set all session context variables and return reset tokens.
@ -82,6 +85,7 @@ def set_session_vars(
_SESSION_THREAD_ID.set(thread_id),
_SESSION_USER_ID.set(user_id),
_SESSION_USER_NAME.set(user_name),
_SESSION_KEY.set(session_key),
]
return tokens
@ -97,6 +101,7 @@ def clear_session_vars(tokens: list) -> None:
_SESSION_THREAD_ID,
_SESSION_USER_ID,
_SESSION_USER_NAME,
_SESSION_KEY,
]
for var, token in zip(vars_in_order, tokens):
var.reset(token)

View file

@ -26,6 +26,7 @@ _GATEWAY_KIND = "hermes-gateway"
_RUNTIME_STATUS_FILE = "gateway_state.json"
_LOCKS_DIRNAME = "gateway-locks"
_IS_WINDOWS = sys.platform == "win32"
_UNSET = object()
def _get_pid_path() -> Path:
@ -218,14 +219,14 @@ def write_pid_file() -> None:
def write_runtime_status(
*,
gateway_state: Optional[str] = None,
exit_reason: Optional[str] = None,
restart_requested: Optional[bool] = None,
active_agents: Optional[int] = None,
platform: Optional[str] = None,
platform_state: Optional[str] = None,
error_code: Optional[str] = None,
error_message: Optional[str] = None,
gateway_state: Any = _UNSET,
exit_reason: Any = _UNSET,
restart_requested: Any = _UNSET,
active_agents: Any = _UNSET,
platform: Any = _UNSET,
platform_state: Any = _UNSET,
error_code: Any = _UNSET,
error_message: Any = _UNSET,
) -> None:
"""Persist gateway runtime health information for diagnostics/status."""
path = _get_runtime_status_path()
@ -236,22 +237,22 @@ def write_runtime_status(
payload["start_time"] = _get_process_start_time(os.getpid())
payload["updated_at"] = _utc_now_iso()
if gateway_state is not None:
if gateway_state is not _UNSET:
payload["gateway_state"] = gateway_state
if exit_reason is not None:
if exit_reason is not _UNSET:
payload["exit_reason"] = exit_reason
if restart_requested is not None:
if restart_requested is not _UNSET:
payload["restart_requested"] = bool(restart_requested)
if active_agents is not None:
if active_agents is not _UNSET:
payload["active_agents"] = max(0, int(active_agents))
if platform is not None:
if platform is not _UNSET:
platform_payload = payload["platforms"].get(platform, {})
if platform_state is not None:
if platform_state is not _UNSET:
platform_payload["state"] = platform_state
if error_code is not None:
if error_code is not _UNSET:
platform_payload["error_code"] = error_code
if error_message is not None:
if error_message is not _UNSET:
platform_payload["error_message"] = error_message
platform_payload["updated_at"] = _utc_now_iso()
payload["platforms"][platform] = platform_payload

View file

@ -32,6 +32,10 @@ _DONE = object()
# new one so that subsequent text appears below tool progress messages.
_NEW_SEGMENT = object()
# Queue marker for a completed assistant commentary message emitted between
# API/tool iterations (for example: "I'll inspect the repo first.").
_COMMENTARY = object()
@dataclass
class StreamConsumerConfig:
@ -75,20 +79,43 @@ class GatewayStreamConsumer:
self._accumulated = ""
self._message_id: Optional[str] = None
self._already_sent = False
self._edit_supported = True # Disabled on first edit failure (Signal/Email/HA)
self._edit_supported = True # Disabled when progressive edits are no longer usable
self._last_edit_time = 0.0
self._last_sent_text = "" # Track last-sent text to skip redundant edits
self._fallback_final_send = False
self._fallback_prefix = ""
self._flood_strikes = 0 # Consecutive flood-control edit failures
self._current_edit_interval = self.cfg.edit_interval # Adaptive backoff
self._final_response_sent = False
@property
def already_sent(self) -> bool:
"""True if at least one message was sent/edited — signals the base
adapter to skip re-sending the final response."""
"""True if at least one message was sent or edited during the run."""
return self._already_sent
@property
def final_response_sent(self) -> bool:
"""True when the stream consumer delivered the final assistant reply."""
return self._final_response_sent
def on_segment_break(self) -> None:
"""Finalize the current stream segment and start a fresh message."""
self._queue.put(_NEW_SEGMENT)
def on_commentary(self, text: str) -> None:
"""Queue a completed interim assistant commentary message."""
if text:
self._queue.put((_COMMENTARY, text))
def _reset_segment_state(self, *, preserve_no_edit: bool = False) -> None:
if preserve_no_edit and self._message_id == "__no_edit__":
return
self._message_id = None
self._accumulated = ""
self._last_sent_text = ""
self._fallback_final_send = False
self._fallback_prefix = ""
def on_delta(self, text: str) -> None:
"""Thread-safe callback — called from the agent's worker thread.
@ -99,7 +126,7 @@ class GatewayStreamConsumer:
if text:
self._queue.put(text)
elif text is None:
self._queue.put(_NEW_SEGMENT)
self.on_segment_break()
def finish(self) -> None:
"""Signal that the stream is complete."""
@ -116,6 +143,7 @@ class GatewayStreamConsumer:
# Drain all available items from the queue
got_done = False
got_segment_break = False
commentary_text = None
while True:
try:
item = self._queue.get_nowait()
@ -125,6 +153,9 @@ class GatewayStreamConsumer:
if item is _NEW_SEGMENT:
got_segment_break = True
break
if isinstance(item, tuple) and len(item) == 2 and item[0] is _COMMENTARY:
commentary_text = item[1]
break
self._accumulated += item
except queue.Empty:
break
@ -135,11 +166,13 @@ class GatewayStreamConsumer:
should_edit = (
got_done
or got_segment_break
or commentary_text is not None
or (elapsed >= self._current_edit_interval
and self._accumulated)
or len(self._accumulated) >= self.cfg.buffer_threshold
)
current_update_visible = False
if should_edit and self._accumulated:
# Split overflow: if accumulated text exceeds the platform
# limit, split into properly sized chunks.
@ -161,6 +194,7 @@ class GatewayStreamConsumer:
self._last_sent_text = ""
self._last_edit_time = time.monotonic()
if got_done:
self._final_response_sent = self._already_sent
return
if got_segment_break:
self._message_id = None
@ -192,10 +226,10 @@ class GatewayStreamConsumer:
self._last_sent_text = ""
display_text = self._accumulated
if not got_done and not got_segment_break:
if not got_done and not got_segment_break and commentary_text is None:
display_text += self.cfg.cursor
await self._send_or_edit(display_text)
current_update_visible = await self._send_or_edit(display_text)
self._last_edit_time = time.monotonic()
if got_done:
@ -206,12 +240,20 @@ class GatewayStreamConsumer:
if self._accumulated:
if self._fallback_final_send:
await self._send_fallback_final(self._accumulated)
elif current_update_visible:
self._final_response_sent = True
elif self._message_id:
await self._send_or_edit(self._accumulated)
self._final_response_sent = await self._send_or_edit(self._accumulated)
elif not self._already_sent:
await self._send_or_edit(self._accumulated)
self._final_response_sent = await self._send_or_edit(self._accumulated)
return
if commentary_text is not None:
self._reset_segment_state()
await self._send_commentary(commentary_text)
self._last_edit_time = time.monotonic()
self._reset_segment_state()
# Tool boundary: reset message state so the next text chunk
# creates a fresh message below any tool-progress messages.
#
@ -220,17 +262,14 @@ class GatewayStreamConsumer:
# github_comment delivery). Resetting to None would re-enter
# the "first send" path on every tool boundary and post one
# platform message per tool call — that is what caused 155
# comments under a single PR. Instead, keep all state so the
# full continuation is delivered once via _send_fallback_final.
# comments under a single PR. Instead, preserve the sentinel
# so the full continuation is delivered once via
# _send_fallback_final.
# (When editing fails mid-stream due to flood control the id is
# a real string like "msg_1", not "__no_edit__", so that case
# still resets and creates a fresh segment as intended.)
if got_segment_break and self._message_id != "__no_edit__":
self._message_id = None
self._accumulated = ""
self._last_sent_text = ""
self._fallback_final_send = False
self._fallback_prefix = ""
if got_segment_break:
self._reset_segment_state(preserve_no_edit=True)
await asyncio.sleep(0.05) # Small yield to not busy-loop
@ -339,6 +378,7 @@ class GatewayStreamConsumer:
if not continuation.strip():
# Nothing new to send — the visible partial already matches final text.
self._already_sent = True
self._final_response_sent = True
return
raw_limit = getattr(self.adapter, "MAX_MESSAGE_LENGTH", 4096)
@ -373,6 +413,7 @@ class GatewayStreamConsumer:
# the base gateway final-send path so we don't resend the
# full response and create another duplicate.
self._already_sent = True
self._final_response_sent = True
self._message_id = last_message_id
self._last_sent_text = last_successful_chunk
self._fallback_prefix = ""
@ -390,6 +431,7 @@ class GatewayStreamConsumer:
self._message_id = last_message_id
self._already_sent = True
self._final_response_sent = True
self._last_sent_text = chunks[-1]
self._fallback_prefix = ""
@ -420,6 +462,24 @@ class GatewayStreamConsumer:
except Exception:
pass # best-effort — don't let this block the fallback path
async def _send_commentary(self, text: str) -> bool:
"""Send a completed interim assistant commentary message."""
text = self._clean_for_display(text)
if not text.strip():
return False
try:
result = await self.adapter.send(
chat_id=self.chat_id,
content=text,
metadata=self.metadata,
)
if result.success:
self._already_sent = True
return True
except Exception as e:
logger.error("Commentary send error: %s", e)
return False
async def _send_or_edit(self, text: str) -> bool:
"""Send or edit the streaming message.
@ -501,23 +561,21 @@ class GatewayStreamConsumer:
content=text,
metadata=self.metadata,
)
if result.success and result.message_id:
self._message_id = result.message_id
if result.success:
if result.message_id:
self._message_id = result.message_id
else:
self._edit_supported = False
self._already_sent = True
self._last_sent_text = text
if not result.message_id:
self._fallback_prefix = self._visible_prefix()
self._fallback_final_send = True
# Sentinel prevents re-entering the first-send path on
# every delta/tool boundary when platforms accept a
# message but do not return an editable message id.
self._message_id = "__no_edit__"
return True
elif result.success:
# Platform accepted the message but returned no message_id
# (e.g. Signal). Can't edit without an ID — switch to
# fallback mode: suppress intermediate deltas, send only
# the missing tail once the final response is ready.
self._already_sent = True
self._edit_supported = False
self._fallback_prefix = self._clean_for_display(text)
self._fallback_final_send = True
# Sentinel prevents re-entering this branch on every delta
self._message_id = "__no_edit__"
return True # platform accepted, just can't edit
else:
# Initial send failed — disable streaming for this session
self._edit_supported = False

View file

@ -1303,6 +1303,49 @@ def _read_codex_tokens(*, _lock: bool = True) -> Dict[str, Any]:
}
def _write_codex_cli_tokens(
access_token: str,
refresh_token: str,
*,
last_refresh: Optional[str] = None,
) -> None:
"""Write refreshed tokens back to ~/.codex/auth.json.
OpenAI OAuth refresh tokens are single-use and rotate on every refresh.
When Hermes refreshes a token it consumes the old refresh_token; if we
don't write the new pair back, the Codex CLI (or VS Code extension) will
fail with ``refresh_token_reused`` on its next refresh attempt.
This mirrors the Anthropic write-back to ~/.claude/.credentials.json
via ``_write_claude_code_credentials()``.
"""
codex_home = os.getenv("CODEX_HOME", "").strip()
if not codex_home:
codex_home = str(Path.home() / ".codex")
auth_path = Path(codex_home).expanduser() / "auth.json"
try:
existing: Dict[str, Any] = {}
if auth_path.is_file():
existing = json.loads(auth_path.read_text(encoding="utf-8"))
if not isinstance(existing, dict):
existing = {}
tokens_dict = existing.get("tokens")
if not isinstance(tokens_dict, dict):
tokens_dict = {}
tokens_dict["access_token"] = access_token
tokens_dict["refresh_token"] = refresh_token
existing["tokens"] = tokens_dict
if last_refresh is not None:
existing["last_refresh"] = last_refresh
auth_path.parent.mkdir(parents=True, exist_ok=True)
auth_path.write_text(json.dumps(existing, indent=2), encoding="utf-8")
auth_path.chmod(0o600)
except (OSError, IOError) as exc:
logger.debug("Failed to write refreshed tokens to %s: %s", auth_path, exc)
def _save_codex_tokens(tokens: Dict[str, str], last_refresh: str = None) -> None:
"""Save Codex OAuth tokens to Hermes auth store (~/.hermes/auth.json)."""
if last_refresh is None:
@ -1425,6 +1468,12 @@ def _refresh_codex_auth_tokens(
updated_tokens["refresh_token"] = refreshed["refresh_token"]
_save_codex_tokens(updated_tokens)
# Write back to ~/.codex/auth.json so Codex CLI / VS Code stay in sync.
_write_codex_cli_tokens(
refreshed["access_token"],
refreshed["refresh_token"],
last_refresh=refreshed.get("last_refresh"),
)
return updated_tokens

399
hermes_cli/backup.py Normal file
View file

@ -0,0 +1,399 @@
"""
Backup and import commands for hermes CLI.
`hermes backup` creates a zip archive of the entire ~/.hermes/ directory
(excluding the hermes-agent repo and transient files).
`hermes import` restores from a backup zip, overlaying onto the current
HERMES_HOME root.
"""
import os
import sys
import time
import zipfile
from datetime import datetime
from pathlib import Path
from hermes_constants import get_default_hermes_root, display_hermes_home
# ---------------------------------------------------------------------------
# Exclusion rules
# ---------------------------------------------------------------------------
# Directory names to skip entirely (matched against each path component)
_EXCLUDED_DIRS = {
"hermes-agent", # the codebase repo — re-clone instead
"__pycache__", # bytecode caches — regenerated on import
".git", # nested git dirs (profiles shouldn't have these, but safety)
"node_modules", # js deps if website/ somehow leaks in
}
# File-name suffixes to skip
_EXCLUDED_SUFFIXES = (
".pyc",
".pyo",
)
# File names to skip (runtime state that's meaningless on another machine)
_EXCLUDED_NAMES = {
"gateway.pid",
"cron.pid",
}
def _should_exclude(rel_path: Path) -> bool:
"""Return True if *rel_path* (relative to hermes root) should be skipped."""
parts = rel_path.parts
# Any path component matches an excluded dir name
for part in parts:
if part in _EXCLUDED_DIRS:
return True
name = rel_path.name
if name in _EXCLUDED_NAMES:
return True
if name.endswith(_EXCLUDED_SUFFIXES):
return True
return False
# ---------------------------------------------------------------------------
# Backup
# ---------------------------------------------------------------------------
def _format_size(nbytes: int) -> str:
"""Human-readable file size."""
for unit in ("B", "KB", "MB", "GB"):
if nbytes < 1024:
return f"{nbytes:.1f} {unit}" if unit != "B" else f"{nbytes} {unit}"
nbytes /= 1024
return f"{nbytes:.1f} TB"
def run_backup(args) -> None:
"""Create a zip backup of the Hermes home directory."""
hermes_root = get_default_hermes_root()
if not hermes_root.is_dir():
print(f"Error: Hermes home directory not found at {hermes_root}")
sys.exit(1)
# Determine output path
if args.output:
out_path = Path(args.output).expanduser().resolve()
# If user gave a directory, put the zip inside it
if out_path.is_dir():
stamp = datetime.now().strftime("%Y-%m-%d-%H%M%S")
out_path = out_path / f"hermes-backup-{stamp}.zip"
else:
stamp = datetime.now().strftime("%Y-%m-%d-%H%M%S")
out_path = Path.home() / f"hermes-backup-{stamp}.zip"
# Ensure the suffix is .zip
if out_path.suffix.lower() != ".zip":
out_path = out_path.with_suffix(out_path.suffix + ".zip")
# Ensure parent directory exists
out_path.parent.mkdir(parents=True, exist_ok=True)
# Collect files
print(f"Scanning {display_hermes_home()} ...")
files_to_add: list[tuple[Path, Path]] = [] # (absolute, relative)
skipped_dirs = set()
for dirpath, dirnames, filenames in os.walk(hermes_root, followlinks=False):
dp = Path(dirpath)
rel_dir = dp.relative_to(hermes_root)
# Prune excluded directories in-place so os.walk doesn't descend
orig_dirnames = dirnames[:]
dirnames[:] = [
d for d in dirnames
if d not in _EXCLUDED_DIRS
]
for removed in set(orig_dirnames) - set(dirnames):
skipped_dirs.add(str(rel_dir / removed))
for fname in filenames:
fpath = dp / fname
rel = fpath.relative_to(hermes_root)
if _should_exclude(rel):
continue
# Skip the output zip itself if it happens to be inside hermes root
try:
if fpath.resolve() == out_path.resolve():
continue
except (OSError, ValueError):
pass
files_to_add.append((fpath, rel))
if not files_to_add:
print("No files to back up.")
return
# Create the zip
file_count = len(files_to_add)
print(f"Backing up {file_count} files ...")
total_bytes = 0
errors = []
t0 = time.monotonic()
with zipfile.ZipFile(out_path, "w", zipfile.ZIP_DEFLATED, compresslevel=6) as zf:
for i, (abs_path, rel_path) in enumerate(files_to_add, 1):
try:
zf.write(abs_path, arcname=str(rel_path))
total_bytes += abs_path.stat().st_size
except (PermissionError, OSError) as exc:
errors.append(f" {rel_path}: {exc}")
continue
# Progress every 500 files
if i % 500 == 0:
print(f" {i}/{file_count} files ...")
elapsed = time.monotonic() - t0
zip_size = out_path.stat().st_size
# Summary
print()
print(f"Backup complete: {out_path}")
print(f" Files: {file_count}")
print(f" Original: {_format_size(total_bytes)}")
print(f" Compressed: {_format_size(zip_size)}")
print(f" Time: {elapsed:.1f}s")
if skipped_dirs:
print(f"\n Excluded directories:")
for d in sorted(skipped_dirs):
print(f" {d}/")
if errors:
print(f"\n Warnings ({len(errors)} files skipped):")
for e in errors[:10]:
print(e)
if len(errors) > 10:
print(f" ... and {len(errors) - 10} more")
print(f"\nRestore with: hermes import {out_path.name}")
# ---------------------------------------------------------------------------
# Import
# ---------------------------------------------------------------------------
def _validate_backup_zip(zf: zipfile.ZipFile) -> tuple[bool, str]:
"""Check that a zip looks like a Hermes backup.
Returns (ok, reason).
"""
names = zf.namelist()
if not names:
return False, "zip archive is empty"
# Look for telltale files that a hermes home would have
markers = {"config.yaml", ".env", "hermes_state.db", "memory_store.db"}
found = set()
for n in names:
# Could be at the root or one level deep (if someone zipped the directory)
basename = Path(n).name
if basename in markers:
found.add(basename)
if not found:
return False, (
"zip does not appear to be a Hermes backup "
"(no config.yaml, .env, or state databases found)"
)
return True, ""
def _detect_prefix(zf: zipfile.ZipFile) -> str:
"""Detect if the zip has a common directory prefix wrapping all entries.
Some tools zip as `.hermes/config.yaml` instead of `config.yaml`.
Returns the prefix to strip (empty string if none).
"""
names = [n for n in zf.namelist() if not n.endswith("/")]
if not names:
return ""
# Find common prefix
parts_list = [Path(n).parts for n in names]
# Check if all entries share a common first directory
first_parts = {p[0] for p in parts_list if len(p) > 1}
if len(first_parts) == 1:
prefix = first_parts.pop()
# Only strip if it looks like a hermes dir name
if prefix in (".hermes", "hermes"):
return prefix + "/"
return ""
def run_import(args) -> None:
"""Restore a Hermes backup from a zip file."""
zip_path = Path(args.zipfile).expanduser().resolve()
if not zip_path.is_file():
print(f"Error: File not found: {zip_path}")
sys.exit(1)
if not zipfile.is_zipfile(zip_path):
print(f"Error: Not a valid zip file: {zip_path}")
sys.exit(1)
hermes_root = get_default_hermes_root()
with zipfile.ZipFile(zip_path, "r") as zf:
# Validate
ok, reason = _validate_backup_zip(zf)
if not ok:
print(f"Error: {reason}")
sys.exit(1)
prefix = _detect_prefix(zf)
members = [n for n in zf.namelist() if not n.endswith("/")]
file_count = len(members)
print(f"Backup contains {file_count} files")
print(f"Target: {display_hermes_home()}")
if prefix:
print(f"Detected archive prefix: {prefix!r} (will be stripped)")
# Check for existing installation
has_config = (hermes_root / "config.yaml").exists()
has_env = (hermes_root / ".env").exists()
if (has_config or has_env) and not args.force:
print()
print("Warning: Target directory already has Hermes configuration.")
print("Importing will overwrite existing files with backup contents.")
print()
try:
answer = input("Continue? [y/N] ").strip().lower()
except (EOFError, KeyboardInterrupt):
print("\nAborted.")
sys.exit(1)
if answer not in ("y", "yes"):
print("Aborted.")
return
# Extract
print(f"\nImporting {file_count} files ...")
hermes_root.mkdir(parents=True, exist_ok=True)
errors = []
restored = 0
t0 = time.monotonic()
for member in members:
# Strip prefix if detected
if prefix and member.startswith(prefix):
rel = member[len(prefix):]
else:
rel = member
if not rel:
continue
target = hermes_root / rel
# Security: reject absolute paths and traversals
try:
target.resolve().relative_to(hermes_root.resolve())
except ValueError:
errors.append(f" {rel}: path traversal blocked")
continue
try:
target.parent.mkdir(parents=True, exist_ok=True)
with zf.open(member) as src, open(target, "wb") as dst:
dst.write(src.read())
restored += 1
except (PermissionError, OSError) as exc:
errors.append(f" {rel}: {exc}")
if restored % 500 == 0:
print(f" {restored}/{file_count} files ...")
elapsed = time.monotonic() - t0
# Summary
print()
print(f"Import complete: {restored} files restored in {elapsed:.1f}s")
print(f" Target: {display_hermes_home()}")
if errors:
print(f"\n Warnings ({len(errors)} files skipped):")
for e in errors[:10]:
print(e)
if len(errors) > 10:
print(f" ... and {len(errors) - 10} more")
# Post-import: restore profile wrapper scripts
profiles_dir = hermes_root / "profiles"
restored_profiles = []
if profiles_dir.is_dir():
try:
from hermes_cli.profiles import (
create_wrapper_script, check_alias_collision,
_is_wrapper_dir_in_path, _get_wrapper_dir,
)
for entry in sorted(profiles_dir.iterdir()):
if not entry.is_dir():
continue
profile_name = entry.name
# Only create wrappers for directories with config
if not (entry / "config.yaml").exists() and not (entry / ".env").exists():
continue
collision = check_alias_collision(profile_name)
if collision:
print(f" Skipped alias '{profile_name}': {collision}")
restored_profiles.append((profile_name, False))
else:
wrapper = create_wrapper_script(profile_name)
restored_profiles.append((profile_name, wrapper is not None))
if restored_profiles:
created = [n for n, ok in restored_profiles if ok]
skipped = [n for n, ok in restored_profiles if not ok]
if created:
print(f"\n Profile aliases restored: {', '.join(created)}")
if skipped:
print(f" Profile aliases skipped: {', '.join(skipped)}")
if not _is_wrapper_dir_in_path():
print(f"\n Note: {_get_wrapper_dir()} is not in your PATH.")
print(' Add to your shell config (~/.bashrc or ~/.zshrc):')
print(' export PATH="$HOME/.local/bin:$PATH"')
except ImportError:
# hermes_cli.profiles might not be available (fresh install)
if any(profiles_dir.iterdir()):
print(f"\n Profiles detected but aliases could not be created.")
print(f" Run: hermes profile list (after installing hermes)")
# Guidance
print()
if not (hermes_root / "hermes-agent").is_dir():
print("Note: The hermes-agent codebase was not included in the backup.")
print(" If this is a fresh install, run: hermes update")
if restored_profiles:
gw_profiles = [n for n, _ in restored_profiles]
print("\nTo re-enable gateway services for profiles:")
for pname in gw_profiles:
print(f" hermes -p {pname} gateway install")
print("Done. Your Hermes configuration has been restored.")

View file

@ -50,7 +50,7 @@ _OPENCLAW_SCRIPT_INSTALLED = (
)
# Known OpenClaw directory names (current + legacy)
_OPENCLAW_DIR_NAMES = (".openclaw", ".clawdbot", ".moldbot")
_OPENCLAW_DIR_NAMES = (".openclaw", ".clawdbot", ".moltbot")
def _warn_if_gateway_running(auto_yes: bool) -> None:
"""Check if a Hermes gateway is running with connected platforms.
@ -87,8 +87,8 @@ def _warn_if_gateway_running(auto_yes: bool) -> None:
print_info("Migration cancelled. Stop the gateway and try again.")
sys.exit(0)
# State files commonly found in OpenClaw workspace directories that cause
# confusion after migration (the agent discovers them and writes to them)
# State files commonly found in OpenClaw workspace directories — listed
# during cleanup to help the user decide whether to archive
_WORKSPACE_STATE_GLOBS = (
"*/todo.json",
"*/sessions/*",
@ -133,7 +133,7 @@ def _find_openclaw_dirs() -> list[Path]:
def _scan_workspace_state(source_dir: Path) -> list[tuple[Path, str]]:
"""Scan an OpenClaw directory for workspace state files that cause confusion.
"""Scan an OpenClaw directory for workspace state files.
Returns a list of (path, description) tuples.
"""
@ -216,7 +216,7 @@ def _cmd_migrate(args):
source_dir = Path.home() / ".openclaw"
if not source_dir.is_dir():
# Try legacy directory names
for legacy in (".clawdbot", ".moldbot"):
for legacy in (".clawdbot", ".moltbot"):
candidate = Path.home() / legacy
if candidate.is_dir():
source_dir = candidate
@ -384,65 +384,16 @@ def _cmd_migrate(args):
# Print results
_print_migration_report(report, dry_run=False)
# After successful migration, offer to archive the source directory
if report.get("summary", {}).get("migrated", 0) > 0:
_offer_source_archival(source_dir, auto_yes)
def _offer_source_archival(source_dir: Path, auto_yes: bool = False):
"""After migration, offer to rename the source directory to prevent state fragmentation.
OpenClaw workspace directories contain state files (todo.json, sessions, etc.)
that the agent may discover and write to, causing confusion. Renaming the
directory prevents this.
"""
if not source_dir.is_dir():
return
# Scan for state files that could cause problems
state_files = _scan_workspace_state(source_dir)
print()
print_header("Post-Migration Cleanup")
print_info("The OpenClaw directory still exists and contains workspace state files")
print_info("that can confuse the agent (todo lists, sessions, logs).")
if state_files:
print()
print(color(" Found state files:", Colors.YELLOW))
# Show up to 10 most relevant findings
for path, desc in state_files[:10]:
print(f" {desc}")
if len(state_files) > 10:
print(f" ... and {len(state_files) - 10} more")
print()
print_info(f"Recommend: rename {source_dir.name}/ to {source_dir.name}.pre-migration/")
print_info("This prevents the agent from discovering old workspace directories.")
print_info("You can always rename it back if needed.")
print()
if not auto_yes and not sys.stdin.isatty():
print_info("Non-interactive session — skipping archival.")
print_info("Run later with: hermes claw cleanup")
return
if auto_yes or prompt_yes_no(f"Archive {source_dir} now?", default=True):
try:
archive_path = _archive_directory(source_dir)
print_success(f"Archived: {source_dir}{archive_path}")
print_info("The original directory has been renamed, not deleted.")
print_info(f"To undo: mv {archive_path} {source_dir}")
except OSError as e:
print_error(f"Could not archive: {e}")
print_info(f"You can do it manually: mv {source_dir} {source_dir}.pre-migration")
else:
print_info("Skipped. You can archive later with: hermes claw cleanup")
# Source directory is left untouched — archiving is not the migration
# tool's responsibility. Users who want to clean up can run
# 'hermes claw cleanup' separately.
def _cmd_cleanup(args):
"""Archive leftover OpenClaw directories after migration.
Scans for OpenClaw directories that still exist after migration and offers
to rename them to .pre-migration to prevent state fragmentation.
to rename them to .pre-migration to free disk space.
"""
dry_run = getattr(args, "dry_run", False)
auto_yes = getattr(args, "yes", False)
@ -517,7 +468,7 @@ def _cmd_cleanup(args):
if state_files:
print()
print(color(f" {len(state_files)} state file(s) that could cause confusion:", Colors.YELLOW))
print(color(f" {len(state_files)} state file(s) found:", Colors.YELLOW))
for path, desc in state_files[:8]:
print(f" {desc}")
if len(state_files) > 8:

View file

@ -69,7 +69,8 @@ COMMAND_REGISTRY: list[CommandDef] = [
args_hint="[name]"),
CommandDef("branch", "Branch the current session (explore a different path)", "Session",
aliases=("fork",), args_hint="[name]"),
CommandDef("compress", "Manually compress conversation context", "Session"),
CommandDef("compress", "Manually compress conversation context", "Session",
args_hint="[focus topic]"),
CommandDef("rollback", "List or restore filesystem checkpoints", "Session",
args_hint="[number]"),
CommandDef("stop", "Kill all running background processes", "Session"),

View file

@ -38,6 +38,9 @@ _EXTRA_ENV_KEYS = frozenset({
"DINGTALK_CLIENT_ID", "DINGTALK_CLIENT_SECRET",
"FEISHU_APP_ID", "FEISHU_APP_SECRET", "FEISHU_ENCRYPT_KEY", "FEISHU_VERIFICATION_TOKEN",
"WECOM_BOT_ID", "WECOM_SECRET",
"WECOM_CALLBACK_CORP_ID", "WECOM_CALLBACK_CORP_SECRET", "WECOM_CALLBACK_AGENT_ID",
"WECOM_CALLBACK_TOKEN", "WECOM_CALLBACK_ENCODING_AES_KEY",
"WECOM_CALLBACK_HOST", "WECOM_CALLBACK_PORT",
"WEIXIN_ACCOUNT_ID", "WEIXIN_TOKEN", "WEIXIN_BASE_URL", "WEIXIN_CDN_BASE_URL",
"WEIXIN_HOME_CHANNEL", "WEIXIN_HOME_CHANNEL_NAME", "WEIXIN_DM_POLICY", "WEIXIN_GROUP_POLICY",
"WEIXIN_ALLOWED_USERS", "WEIXIN_GROUP_ALLOWED_USERS", "WEIXIN_ALLOW_ALL_USERS",
@ -47,6 +50,7 @@ _EXTRA_ENV_KEYS = frozenset({
"MATTERMOST_HOME_CHANNEL", "MATTERMOST_REPLY_MODE",
"MATRIX_PASSWORD", "MATRIX_ENCRYPTION", "MATRIX_DEVICE_ID", "MATRIX_HOME_ROOM",
"MATRIX_REQUIRE_MENTION", "MATRIX_FREE_RESPONSE_ROOMS", "MATRIX_AUTO_THREAD",
"MATRIX_RECOVERY_KEY",
})
import yaml
@ -140,6 +144,73 @@ def managed_error(action: str = "modify configuration"):
print(format_managed_message(action), file=sys.stderr)
# =============================================================================
# Container-aware CLI (NixOS container mode)
# =============================================================================
def _is_inside_container() -> bool:
"""Detect if we're already running inside a Docker/Podman container."""
# Standard Docker/Podman indicators
if os.path.exists("/.dockerenv"):
return True
# Podman uses /run/.containerenv
if os.path.exists("/run/.containerenv"):
return True
# Check cgroup for container runtime evidence (works for both Docker & Podman)
try:
with open("/proc/1/cgroup", "r") as f:
cgroup = f.read()
if "docker" in cgroup or "podman" in cgroup or "/lxc/" in cgroup:
return True
except OSError:
pass
return False
def get_container_exec_info() -> Optional[dict]:
"""Read container mode metadata from HERMES_HOME/.container-mode.
Returns a dict with keys: backend, container_name, exec_user, hermes_bin
or None if container mode is not active, we're already inside the
container, or HERMES_DEV=1 is set.
The .container-mode file is written by the NixOS activation script when
container.enable = true. It tells the host CLI to exec into the container
instead of running locally.
"""
if os.environ.get("HERMES_DEV") == "1":
return None
if _is_inside_container():
return None
container_mode_file = get_hermes_home() / ".container-mode"
try:
info = {}
with open(container_mode_file, "r") as f:
for line in f:
line = line.strip()
if "=" in line and not line.startswith("#"):
key, _, value = line.partition("=")
info[key.strip()] = value.strip()
except FileNotFoundError:
return None
# All other exceptions (PermissionError, malformed data, etc.) propagate
backend = info.get("backend", "docker")
container_name = info.get("container_name", "hermes-agent")
exec_user = info.get("exec_user", "hermes")
hermes_bin = info.get("hermes_bin", "/data/current-package/bin/hermes")
return {
"backend": backend,
"container_name": container_name,
"exec_user": exec_user,
"hermes_bin": hermes_bin,
}
# =============================================================================
# Config paths
# =============================================================================
@ -445,9 +516,11 @@ DEFAULT_CONFIG = {
"inline_diffs": True, # Show inline diff previews for write actions (write_file, patch, skill_manage)
"show_cost": False, # Show $ cost in the status bar (off by default)
"skin": "default",
"interim_assistant_messages": True, # Gateway: show natural mid-turn assistant status messages
"tool_progress_command": False, # Enable /verbose command in messaging gateway
"tool_progress_overrides": {}, # Per-platform overrides: {"signal": "off", "telegram": "all"}
"tool_progress_overrides": {}, # DEPRECATED — use display.platforms instead
"tool_preview_length": 0, # Max chars for tool call previews (0 = no limit, show full paths/commands)
"platforms": {}, # Per-platform display overrides: {"telegram": {"tool_progress": "all"}, "slack": {"tool_progress": "off"}}
},
# Privacy settings
@ -634,8 +707,16 @@ DEFAULT_CONFIG = {
"backup_count": 3, # Number of rotated backup files to keep
},
# Network settings — workarounds for connectivity issues.
"network": {
# Force IPv4 connections. On servers with broken or unreachable IPv6,
# Python tries AAAA records first and hangs for the full TCP timeout
# before falling back to IPv4. Set to true to skip IPv6 entirely.
"force_ipv4": False,
},
# Config schema version - bump this when adding new required fields
"_config_version": 14,
"_config_version": 16,
}
# =============================================================================
@ -1213,6 +1294,14 @@ OPTIONAL_ENV_VARS = {
"category": "messaging",
"advanced": True,
},
"MATRIX_RECOVERY_KEY": {
"description": "Matrix recovery key for cross-signing verification after device key rotation (from Element: Settings → Security → Recovery Key)",
"prompt": "Matrix recovery key",
"url": None,
"password": True,
"category": "messaging",
"advanced": True,
},
"BLUEBUBBLES_SERVER_URL": {
"description": "BlueBubbles server URL for iMessage integration (e.g. http://192.168.1.10:1234)",
"prompt": "BlueBubbles server URL",
@ -1862,6 +1951,44 @@ def migrate_config(interactive: bool = True, quiet: bool = False) -> Dict[str, A
if not quiet:
print(f" ✓ Migrated legacy stt.model to provider-specific config")
# ── Version 14 → 15: add explicit gateway interim-message gate ──
if current_ver < 15:
config = read_raw_config()
display = config.get("display", {})
if not isinstance(display, dict):
display = {}
if "interim_assistant_messages" not in display:
display["interim_assistant_messages"] = True
config["display"] = display
results["config_added"].append("display.interim_assistant_messages=true (default)")
save_config(config)
if not quiet:
print(" ✓ Added display.interim_assistant_messages=true")
# ── Version 15 → 16: migrate tool_progress_overrides into display.platforms ──
if current_ver < 16:
config = read_raw_config()
display = config.get("display", {})
if not isinstance(display, dict):
display = {}
old_overrides = display.get("tool_progress_overrides")
if isinstance(old_overrides, dict) and old_overrides:
platforms = display.get("platforms", {})
if not isinstance(platforms, dict):
platforms = {}
for plat, mode in old_overrides.items():
if plat not in platforms:
platforms[plat] = {}
if "tool_progress" not in platforms[plat]:
platforms[plat]["tool_progress"] = mode
display["platforms"] = platforms
config["display"] = display
save_config(config)
if not quiet:
migrated = ", ".join(f"{p}={m}" for p, m in old_overrides.items())
print(f" ✓ Migrated tool_progress_overrides → display.platforms: {migrated}")
results["config_added"].append("display.platforms (migrated from tool_progress_overrides)")
if current_ver < latest_ver and not quiet:
print(f"Config version: {current_ver}{latest_ver}")

View file

@ -287,6 +287,129 @@ def _radio_numbered_fallback(
return cancel_returns
def curses_single_select(
title: str,
items: List[str],
default_index: int = 0,
*,
cancel_label: str = "Cancel",
) -> int | None:
"""Curses single-select menu. Returns selected index or None on cancel.
Works inside prompt_toolkit because curses.wrapper() restores the terminal
safely, unlike simple_term_menu which conflicts with /dev/tty.
"""
if not sys.stdin.isatty():
return None
try:
import curses
result_holder: list = [None]
all_items = list(items) + [cancel_label]
cancel_idx = len(items)
def _draw(stdscr):
curses.curs_set(0)
if curses.has_colors():
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_GREEN, -1)
curses.init_pair(2, curses.COLOR_YELLOW, -1)
cursor = min(default_index, len(all_items) - 1)
scroll_offset = 0
while True:
stdscr.clear()
max_y, max_x = stdscr.getmaxyx()
try:
hattr = curses.A_BOLD
if curses.has_colors():
hattr |= curses.color_pair(2)
stdscr.addnstr(0, 0, title, max_x - 1, hattr)
stdscr.addnstr(
1, 0,
" ↑↓ navigate ENTER confirm ESC/q cancel",
max_x - 1, curses.A_DIM,
)
except curses.error:
pass
visible_rows = max_y - 3
if cursor < scroll_offset:
scroll_offset = cursor
elif cursor >= scroll_offset + visible_rows:
scroll_offset = cursor - visible_rows + 1
for draw_i, i in enumerate(
range(scroll_offset, min(len(all_items), scroll_offset + visible_rows))
):
y = draw_i + 3
if y >= max_y - 1:
break
arrow = "" if i == cursor else " "
line = f" {arrow} {all_items[i]}"
attr = curses.A_NORMAL
if i == cursor:
attr = curses.A_BOLD
if curses.has_colors():
attr |= curses.color_pair(1)
try:
stdscr.addnstr(y, 0, line, max_x - 1, attr)
except curses.error:
pass
stdscr.refresh()
key = stdscr.getch()
if key in (curses.KEY_UP, ord("k")):
cursor = (cursor - 1) % len(all_items)
elif key in (curses.KEY_DOWN, ord("j")):
cursor = (cursor + 1) % len(all_items)
elif key in (curses.KEY_ENTER, 10, 13):
result_holder[0] = cursor
return
elif key in (27, ord("q")):
result_holder[0] = None
return
curses.wrapper(_draw)
flush_stdin()
if result_holder[0] is not None and result_holder[0] >= cancel_idx:
return None
return result_holder[0]
except Exception:
all_items = list(items) + [cancel_label]
cancel_idx = len(items)
return _numbered_single_fallback(title, all_items, cancel_idx)
def _numbered_single_fallback(
title: str,
items: List[str],
cancel_idx: int,
) -> int | None:
"""Text-based numbered fallback for single-select."""
print(f"\n {title}\n")
for i, label in enumerate(items, 1):
print(f" {i}. {label}")
print()
try:
val = input(f" Choice [1-{len(items)}]: ").strip()
if not val:
return None
idx = int(val) - 1
if 0 <= idx < len(items) and idx < cancel_idx:
return idx
if idx == cancel_idx:
return None
except (ValueError, KeyboardInterrupt, EOFError):
pass
return None
def _numbered_fallback(
title: str,
items: List[str],

View file

@ -119,6 +119,7 @@ def _configured_platforms() -> list[str]:
"dingtalk": "DINGTALK_CLIENT_ID",
"feishu": "FEISHU_APP_ID",
"wecom": "WECOM_BOT_ID",
"wecom_callback": "WECOM_CALLBACK_CORP_ID",
"weixin": "WEIXIN_ACCOUNT_ID",
}
return [name for name, env in checks.items() if os.getenv(env)]

View file

@ -1821,6 +1821,37 @@ _PLATFORMS = [
"help": "Chat ID for scheduled results and notifications."},
],
},
{
"key": "wecom_callback",
"label": "WeCom Callback (Self-Built App)",
"emoji": "💬",
"token_var": "WECOM_CALLBACK_CORP_ID",
"setup_instructions": [
"1. Go to WeCom Admin Console → Applications → Create Self-Built App",
"2. Note the Corp ID (top of admin console) and create a Corp Secret",
"3. Under Receive Messages, configure the callback URL to point to your server",
"4. Copy the Token and EncodingAESKey from the callback configuration",
"5. The adapter runs an HTTP server — ensure the port is reachable from WeCom",
"6. Restrict access with WECOM_CALLBACK_ALLOWED_USERS for production use",
],
"vars": [
{"name": "WECOM_CALLBACK_CORP_ID", "prompt": "Corp ID", "password": False,
"help": "Your WeCom enterprise Corp ID."},
{"name": "WECOM_CALLBACK_CORP_SECRET", "prompt": "Corp Secret", "password": True,
"help": "The secret for your self-built application."},
{"name": "WECOM_CALLBACK_AGENT_ID", "prompt": "Agent ID", "password": False,
"help": "The Agent ID of your self-built application."},
{"name": "WECOM_CALLBACK_TOKEN", "prompt": "Callback Token", "password": True,
"help": "The Token from your WeCom callback configuration."},
{"name": "WECOM_CALLBACK_ENCODING_AES_KEY", "prompt": "Encoding AES Key", "password": True,
"help": "The EncodingAESKey from your WeCom callback configuration."},
{"name": "WECOM_CALLBACK_PORT", "prompt": "Callback server port (default: 8645)", "password": False,
"help": "Port for the HTTP callback server."},
{"name": "WECOM_CALLBACK_ALLOWED_USERS", "prompt": "Allowed user IDs (comma-separated, or empty)", "password": False,
"is_allowlist": True,
"help": "Restrict which WeCom users can interact with the app."},
],
},
{
"key": "weixin",
"label": "Weixin / WeChat",

View file

@ -1,16 +1,18 @@
"""``hermes logs`` — view and filter Hermes log files.
Supports tailing, following, session filtering, level filtering, and
relative time ranges. All log files live under ``~/.hermes/logs/``.
Supports tailing, following, session filtering, level filtering,
component filtering, and relative time ranges. All log files live
under ``~/.hermes/logs/``.
Usage examples::
hermes logs # last 50 lines of agent.log
hermes logs -f # follow agent.log in real time
hermes logs errors # last 50 lines of errors.log
hermes logs gateway -n 100 # last 100 lines of gateway.log
hermes logs gateway -n 100 # last 100 lines of gateway.log
hermes logs --level WARNING # only WARNING+ lines
hermes logs --session abc123 # filter by session ID substring
hermes logs --component tools # only tool-related lines
hermes logs --since 1h # lines from the last hour
hermes logs --since 30m -f # follow, starting 30 min ago
"""
@ -20,7 +22,7 @@ import sys
import time
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional
from typing import Optional, Sequence
from hermes_constants import get_hermes_home, display_hermes_home
@ -38,6 +40,15 @@ _TS_RE = re.compile(r"^(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})")
# Level extraction — matches " INFO ", " WARNING ", " ERROR ", " DEBUG ", " CRITICAL "
_LEVEL_RE = re.compile(r"\s(DEBUG|INFO|WARNING|ERROR|CRITICAL)\s")
# Logger name extraction — after level and optional session tag, the next
# non-space token before ":" is the logger name.
# Matches: "INFO gateway.run:" or "INFO [sess_abc] tools.terminal_tool:"
_LOGGER_NAME_RE = re.compile(
r"\s(?:DEBUG|INFO|WARNING|ERROR|CRITICAL)" # level
r"(?:\s+\[.*?\])?" # optional session tag
r"\s+(\S+):" # logger name
)
# Level ordering for >= filtering
_LEVEL_ORDER = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3, "CRITICAL": 4}
@ -79,12 +90,27 @@ def _extract_level(line: str) -> Optional[str]:
return m.group(1) if m else None
def _extract_logger_name(line: str) -> Optional[str]:
"""Extract the logger name from a log line."""
m = _LOGGER_NAME_RE.search(line)
return m.group(1) if m else None
def _line_matches_component(line: str, prefixes: Sequence[str]) -> bool:
"""Check if a log line's logger name starts with any of *prefixes*."""
name = _extract_logger_name(line)
if name is None:
return False
return name.startswith(tuple(prefixes))
def _matches_filters(
line: str,
*,
min_level: Optional[str] = None,
session_filter: Optional[str] = None,
since: Optional[datetime] = None,
component_prefixes: Optional[Sequence[str]] = None,
) -> bool:
"""Check if a log line passes all active filters."""
if since is not None:
@ -102,6 +128,10 @@ def _matches_filters(
if session_filter not in line:
return False
if component_prefixes is not None:
if not _line_matches_component(line, component_prefixes):
return False
return True
@ -113,6 +143,7 @@ def tail_log(
level: Optional[str] = None,
session: Optional[str] = None,
since: Optional[str] = None,
component: Optional[str] = None,
) -> None:
"""Read and display log lines, optionally following in real time.
@ -130,6 +161,8 @@ def tail_log(
Session ID substring to filter on.
since
Relative time string (e.g. ``"1h"``, ``"30m"``).
component
Component name to filter by (e.g. ``"gateway"``, ``"tools"``).
"""
filename = LOG_FILES.get(log_name)
if filename is None:
@ -155,13 +188,29 @@ def tail_log(
print(f"Invalid --level: {level!r}. Use DEBUG, INFO, WARNING, ERROR, or CRITICAL.")
sys.exit(1)
has_filters = min_level is not None or session is not None or since_dt is not None
# Resolve component to logger name prefixes
component_prefixes = None
if component:
from hermes_logging import COMPONENT_PREFIXES
component_lower = component.lower()
if component_lower not in COMPONENT_PREFIXES:
available = ", ".join(sorted(COMPONENT_PREFIXES))
print(f"Unknown component: {component!r}. Available: {available}")
sys.exit(1)
component_prefixes = COMPONENT_PREFIXES[component_lower]
has_filters = (
min_level is not None
or session is not None
or since_dt is not None
or component_prefixes is not None
)
# Read and display the tail
try:
lines = _read_tail(log_path, num_lines, has_filters=has_filters,
min_level=min_level, session_filter=session,
since=since_dt)
since=since_dt, component_prefixes=component_prefixes)
except PermissionError:
print(f"Permission denied: {log_path}")
sys.exit(1)
@ -172,6 +221,8 @@ def tail_log(
filter_parts.append(f"level>={min_level}")
if session:
filter_parts.append(f"session={session}")
if component:
filter_parts.append(f"component={component}")
if since:
filter_parts.append(f"since={since}")
filter_desc = f" [{', '.join(filter_parts)}]" if filter_parts else ""
@ -190,7 +241,7 @@ def tail_log(
# Follow mode — poll for new content
try:
_follow_log(log_path, min_level=min_level, session_filter=session,
since=since_dt)
since=since_dt, component_prefixes=component_prefixes)
except KeyboardInterrupt:
print("\n--- stopped ---")
@ -203,6 +254,7 @@ def _read_tail(
min_level: Optional[str] = None,
session_filter: Optional[str] = None,
since: Optional[datetime] = None,
component_prefixes: Optional[Sequence[str]] = None,
) -> list:
"""Read the last *num_lines* matching lines from a log file.
@ -215,7 +267,8 @@ def _read_tail(
filtered = [
l for l in raw_lines
if _matches_filters(l, min_level=min_level,
session_filter=session_filter, since=since)
session_filter=session_filter, since=since,
component_prefixes=component_prefixes)
]
return filtered[-num_lines:]
else:
@ -284,6 +337,7 @@ def _follow_log(
min_level: Optional[str] = None,
session_filter: Optional[str] = None,
since: Optional[datetime] = None,
component_prefixes: Optional[Sequence[str]] = None,
) -> None:
"""Poll a log file for new content and print matching lines."""
with open(path, "r", encoding="utf-8", errors="replace") as f:
@ -293,7 +347,8 @@ def _follow_log(
line = f.readline()
if line:
if _matches_filters(line, min_level=min_level,
session_filter=session_filter, since=since):
session_filter=session_filter, since=since,
component_prefixes=component_prefixes):
print(line, end="")
sys.stdout.flush()
else:

View file

@ -152,6 +152,18 @@ try:
except Exception:
pass # best-effort — don't crash the CLI if logging setup fails
# Apply IPv4 preference early, before any HTTP clients are created.
try:
from hermes_cli.config import load_config as _load_config_early
from hermes_constants import apply_ipv4_preference as _apply_ipv4
_early_cfg = _load_config_early()
_net = _early_cfg.get("network", {})
if isinstance(_net, dict) and _net.get("force_ipv4"):
_apply_ipv4(force=True)
del _early_cfg, _net
except Exception:
pass # best-effort — don't crash if config isn't available yet
import logging
import time as _time
from datetime import datetime
@ -529,6 +541,113 @@ def _resolve_last_session(source: str = "cli") -> Optional[str]:
return None
def _probe_container(cmd: list, backend: str, via_sudo: bool = False):
"""Run a container inspect probe, returning the CompletedProcess.
Catches TimeoutExpired specifically for a human-readable message;
all other exceptions propagate naturally.
"""
try:
return subprocess.run(cmd, capture_output=True, text=True, timeout=15)
except subprocess.TimeoutExpired:
label = f"sudo {backend}" if via_sudo else backend
print(
f"Error: timed out waiting for {label} to respond.\n"
f"The {backend} daemon may be unresponsive or starting up.",
file=sys.stderr,
)
sys.exit(1)
def _exec_in_container(container_info: dict, cli_args: list):
"""Replace the current process with a command inside the managed container.
Probes whether sudo is needed (rootful containers), then os.execvp
into the container. On success the Python process is replaced entirely
and the container's exit code becomes the process exit code (OS semantics).
On failure, OSError propagates naturally.
Args:
container_info: dict with backend, container_name, exec_user, hermes_bin
cli_args: the original CLI arguments (everything after 'hermes')
"""
import shutil
backend = container_info["backend"]
container_name = container_info["container_name"]
exec_user = container_info["exec_user"]
hermes_bin = container_info["hermes_bin"]
runtime = shutil.which(backend)
if not runtime:
print(f"Error: {backend} not found on PATH. Cannot route to container.",
file=sys.stderr)
sys.exit(1)
# Rootful containers (NixOS systemd service) are invisible to unprivileged
# users — Podman uses per-user namespaces, Docker needs group access.
# Probe whether the runtime can see the container; if not, try via sudo.
sudo_path = None
probe = _probe_container(
[runtime, "inspect", "--format", "ok", container_name], backend,
)
if probe.returncode != 0:
sudo_path = shutil.which("sudo")
if sudo_path:
probe2 = _probe_container(
[sudo_path, "-n", runtime, "inspect", "--format", "ok", container_name],
backend, via_sudo=True,
)
if probe2.returncode != 0:
print(
f"Error: container '{container_name}' not found via {backend}.\n"
f"\n"
f"The container is likely running as root. Your user cannot see it\n"
f"because {backend} uses per-user namespaces. Grant passwordless\n"
f"sudo for {backend} — the -n (non-interactive) flag is required\n"
f"because a password prompt would hang or break piped commands.\n"
f"\n"
f"On NixOS:\n"
f"\n"
f' security.sudo.extraRules = [{{\n'
f' users = [ "{os.getenv("USER", "your-user")}" ];\n'
f' commands = [{{ command = "{runtime}"; options = [ "NOPASSWD" ]; }}];\n'
f' }}];\n'
f"\n"
f"Or run: sudo hermes {' '.join(cli_args)}",
file=sys.stderr,
)
sys.exit(1)
else:
print(
f"Error: container '{container_name}' not found via {backend}.\n"
f"The container may be running under root. Try: sudo hermes {' '.join(cli_args)}",
file=sys.stderr,
)
sys.exit(1)
is_tty = sys.stdin.isatty()
tty_flags = ["-it"] if is_tty else ["-i"]
env_flags = []
for var in ("TERM", "COLORTERM", "LANG", "LC_ALL"):
val = os.environ.get(var)
if val:
env_flags.extend(["-e", f"{var}={val}"])
cmd_prefix = [sudo_path, "-n", runtime] if sudo_path else [runtime]
exec_cmd = (
cmd_prefix + ["exec"]
+ tty_flags
+ ["-u", exec_user]
+ env_flags
+ [container_name, hermes_bin]
+ cli_args
)
os.execvp(exec_cmd[0], exec_cmd)
def _resolve_session_by_name_or_id(name_or_id: str) -> Optional[str]:
"""Resolve a session name (title) or ID to a session ID.
@ -1202,6 +1321,7 @@ def select_provider_and_model(args=None):
"base_url": base_url,
"api_key": entry.get("api_key", ""),
"model": entry.get("model", ""),
"api_mode": entry.get("api_mode", ""),
}
return custom_provider_map
@ -2050,6 +2170,12 @@ def _model_flow_named_custom(config, provider_info):
model["base_url"] = base_url
if api_key:
model["api_key"] = api_key
# Apply api_mode from custom_providers entry, or clear stale value
custom_api_mode = provider_info.get("api_mode", "")
if custom_api_mode:
model["api_mode"] = custom_api_mode
else:
model.pop("api_mode", None) # let runtime auto-detect from URL
save_config(cfg)
deactivate_provider()
@ -2587,8 +2713,11 @@ def _model_flow_api_key_provider(config, provider_id, current_model=""):
print()
override = ""
if override and base_url_env:
save_env_value(base_url_env, override)
effective_base = override
if not override.startswith(("http://", "https://")):
print(" Invalid URL — must start with http:// or https://. Keeping current value.")
else:
save_env_value(base_url_env, override)
effective_base = override
# Model selection — resolution order:
# 1. models.dev registry (cached, filtered for agentic/tool-capable models)
@ -2925,6 +3054,18 @@ def cmd_config(args):
config_command(args)
def cmd_backup(args):
"""Back up Hermes home directory to a zip file."""
from hermes_cli.backup import run_backup
run_backup(args)
def cmd_import(args):
"""Restore a Hermes backup from a zip file."""
from hermes_cli.backup import run_import
run_import(args)
def cmd_version(args):
"""Show version."""
print(f"Hermes Agent v{__version__} ({__release_date__})")
@ -4042,6 +4183,26 @@ def cmd_update(args):
print()
print("✓ Update complete!")
# Write exit code *before* the gateway restart attempt.
# When running as ``hermes update --gateway`` (spawned by the gateway's
# /update command), this process lives inside the gateway's systemd
# cgroup. ``systemctl restart hermes-gateway`` kills everything in the
# cgroup (KillMode=mixed → SIGKILL to remaining processes), including
# us and the wrapping bash shell. The shell never reaches its
# ``printf $status > .update_exit_code`` epilogue, so the exit-code
# marker file is never created. The new gateway's update watcher then
# polls for 30 minutes and sends a spurious timeout message.
#
# Writing the marker here — after git pull + pip install succeed but
# before we attempt the restart — ensures the new gateway sees it
# regardless of how we die.
if gateway_mode:
_exit_code_path = get_hermes_home() / ".update_exit_code"
try:
_exit_code_path.write_text("0")
except OSError:
pass
# Auto-restart ALL gateways after update.
# The code update (git pull) is shared across all profiles, so every
# running gateway needs restarting to pick up the new code.
@ -4475,6 +4636,7 @@ def cmd_logs(args):
level=getattr(args, "level", None),
session=getattr(args, "session", None),
since=getattr(args, "since", None),
component=getattr(args, "component", None),
)
@ -5066,7 +5228,43 @@ For more help on a command:
help="Show redacted API key prefixes (first/last 4 chars) instead of just set/not set"
)
dump_parser.set_defaults(func=cmd_dump)
# =========================================================================
# backup command
# =========================================================================
backup_parser = subparsers.add_parser(
"backup",
help="Back up Hermes home directory to a zip file",
description="Create a zip archive of your entire Hermes configuration, "
"skills, sessions, and data (excludes the hermes-agent codebase)"
)
backup_parser.add_argument(
"-o", "--output",
help="Output path for the zip file (default: ~/hermes-backup-<timestamp>.zip)"
)
backup_parser.set_defaults(func=cmd_backup)
# =========================================================================
# import command
# =========================================================================
import_parser = subparsers.add_parser(
"import",
help="Restore a Hermes backup from a zip file",
description="Extract a previously created Hermes backup into your "
"Hermes home directory, restoring configuration, skills, "
"sessions, and data"
)
import_parser.add_argument(
"zipfile",
help="Path to the backup zip file"
)
import_parser.add_argument(
"--force", "-f",
action="store_true",
help="Overwrite existing files without confirmation"
)
import_parser.set_defaults(func=cmd_import)
# =========================================================================
# config command
# =========================================================================
@ -5416,6 +5614,8 @@ For more help on a command:
mcp_add_p.add_argument("--command", help="Stdio command (e.g. npx)")
mcp_add_p.add_argument("--args", nargs="*", default=[], help="Arguments for stdio command")
mcp_add_p.add_argument("--auth", choices=["oauth", "header"], help="Auth method")
mcp_add_p.add_argument("--preset", help="Known MCP preset name")
mcp_add_p.add_argument("--env", nargs="*", default=[], help="Environment variables for stdio servers (KEY=VALUE)")
mcp_rm_p = mcp_sub.add_parser("remove", aliases=["rm"], help="Remove an MCP server")
mcp_rm_p.add_argument("name", help="Server name to remove")
@ -5898,6 +6098,7 @@ Examples:
hermes logs gateway -n 100 Show last 100 lines of gateway.log
hermes logs --level WARNING Only show WARNING and above
hermes logs --session abc123 Filter by session ID
hermes logs --component tools Only show tool-related lines
hermes logs --since 1h Lines from the last hour
hermes logs --since 30m -f Follow, starting from 30 min ago
hermes logs list List available log files with sizes
@ -5927,6 +6128,10 @@ Examples:
"--since", metavar="TIME",
help="Show lines since TIME ago (e.g. 1h, 30m, 2d)",
)
logs_parser.add_argument(
"--component", metavar="NAME",
help="Filter by component: gateway, agent, tools, cli, cron",
)
logs_parser.set_defaults(func=cmd_logs)
# =========================================================================
@ -5935,9 +6140,22 @@ Examples:
# Pre-process argv so unquoted multi-word session names after -c / -r
# are merged into a single token before argparse sees them.
# e.g. ``hermes -c Pokemon Agent Dev`` → ``hermes -c 'Pokemon Agent Dev'``
# ── Container-aware routing ────────────────────────────────────────
# When NixOS container mode is active, route ALL subcommands into
# the managed container. This MUST run before parse_args() so that
# --help, unrecognised flags, and every subcommand are forwarded
# transparently instead of being intercepted by argparse on the host.
from hermes_cli.config import get_container_exec_info
container_info = get_container_exec_info()
if container_info:
_exec_in_container(container_info, sys.argv[1:])
# Unreachable: os.execvp never returns on success (process is replaced)
# and raises OSError on failure (which propagates as a traceback).
sys.exit(1)
_processed_argv = _coalesce_session_name_args(sys.argv[1:])
args = parser.parse_args(_processed_argv)
# Handle --version flag
if args.version:
cmd_version(args)

View file

@ -9,7 +9,6 @@ configuration in ~/.hermes/config.yaml under the ``mcp_servers`` key.
"""
import asyncio
import getpass
import logging
import os
import re
@ -28,6 +27,11 @@ from hermes_constants import display_hermes_home
logger = logging.getLogger(__name__)
_ENV_VAR_NAME_RE = re.compile(r"^[A-Za-z_][A-Za-z0-9_]*$")
_MCP_PRESETS: Dict[str, Dict[str, Any]] = {}
# ─── UI Helpers ───────────────────────────────────────────────────────────────
@ -98,6 +102,59 @@ def _env_key_for_server(name: str) -> str:
return f"MCP_{name.upper().replace('-', '_')}_API_KEY"
def _parse_env_assignments(raw_env: Optional[List[str]]) -> Dict[str, str]:
"""Parse ``KEY=VALUE`` strings from CLI args into an env dict."""
parsed: Dict[str, str] = {}
for item in raw_env or []:
text = str(item or "").strip()
if not text:
continue
if "=" not in text:
raise ValueError(f"Invalid --env value '{text}' (expected KEY=VALUE)")
key, value = text.split("=", 1)
key = key.strip()
if not key:
raise ValueError(f"Invalid --env value '{text}' (missing variable name)")
if not _ENV_VAR_NAME_RE.match(key):
raise ValueError(f"Invalid --env variable name '{key}'")
parsed[key] = value
return parsed
def _apply_mcp_preset(
name: str,
*,
preset_name: Optional[str],
url: Optional[str],
command: Optional[str],
cmd_args: List[str],
server_config: Dict[str, Any],
) -> tuple[Optional[str], Optional[str], List[str], bool]:
"""Apply a known MCP preset when transport details were omitted."""
if not preset_name:
return url, command, cmd_args, False
preset = _MCP_PRESETS.get(preset_name)
if not preset:
raise ValueError(f"Unknown MCP preset: {preset_name}")
if url or command:
return url, command, cmd_args, False
url = preset.get("url")
command = preset.get("command")
cmd_args = list(preset.get("args") or [])
if url:
server_config["url"] = url
if command:
server_config["command"] = command
if cmd_args:
server_config["args"] = cmd_args
return url, command, cmd_args, True
# ─── Discovery (temporary connect) ───────────────────────────────────────────
def _probe_single_server(
@ -166,13 +223,35 @@ def cmd_mcp_add(args):
command = getattr(args, "command", None)
cmd_args = getattr(args, "args", None) or []
auth_type = getattr(args, "auth", None)
preset_name = getattr(args, "preset", None)
raw_env = getattr(args, "env", None)
server_config: Dict[str, Any] = {}
try:
explicit_env = _parse_env_assignments(raw_env)
url, command, cmd_args, _preset_applied = _apply_mcp_preset(
name,
preset_name=preset_name,
url=url,
command=command,
cmd_args=list(cmd_args),
server_config=server_config,
)
except ValueError as exc:
_error(str(exc))
return
if url and explicit_env:
_error("--env is only supported for stdio MCP servers (--command or stdio presets)")
return
# Validate transport
if not url and not command:
_error("Must specify --url <endpoint> or --command <cmd>")
_error("Must specify --url <endpoint>, --command <cmd>, or --preset <name>")
_info("Examples:")
_info(' hermes mcp add ink --url "https://mcp.ml.ink/mcp"')
_info(' hermes mcp add github --command npx --args @modelcontextprotocol/server-github')
_info(' hermes mcp add myserver --preset mypreset')
return
# Check if server already exists
@ -183,13 +262,15 @@ def cmd_mcp_add(args):
return
# Build initial config
server_config: Dict[str, Any] = {}
if url:
server_config["url"] = url
else:
server_config["command"] = command
if cmd_args:
server_config["args"] = cmd_args
if explicit_env:
server_config["env"] = explicit_env
# ── Authentication ────────────────────────────────────────────────
@ -627,6 +708,7 @@ def mcp_command(args):
_info("hermes mcp serve Run as MCP server")
_info("hermes mcp add <name> --url <endpoint> Add an MCP server")
_info("hermes mcp add <name> --command <cmd> Add a stdio server")
_info("hermes mcp add <name> --preset <preset> Add from a known preset")
_info("hermes mcp remove <name> Remove a server")
_info("hermes mcp list List servers")
_info("hermes mcp test <name> Test connection")

View file

@ -74,13 +74,13 @@ _DOT_TO_HYPHEN_PROVIDERS: frozenset[str] = frozenset({
_STRIP_VENDOR_ONLY_PROVIDERS: frozenset[str] = frozenset({
"copilot",
"copilot-acp",
"openai-codex",
})
# Providers whose native naming is authoritative -- pass through unchanged.
_AUTHORITATIVE_NATIVE_PROVIDERS: frozenset[str] = frozenset({
"gemini",
"huggingface",
"openai-codex",
})
# Direct providers that accept bare native names but should repair a matching
@ -360,7 +360,11 @@ def normalize_model_for_provider(model_input: str, target_provider: str) -> str:
# --- Copilot: strip matching provider prefix, keep dots ---
if provider in _STRIP_VENDOR_ONLY_PROVIDERS:
return _strip_matching_provider_prefix(name, provider)
stripped = _strip_matching_provider_prefix(name, provider)
if stripped == name and name.startswith("openai/"):
# openai-codex maps openai/gpt-5.4 -> gpt-5.4
return name.split("/", 1)[1]
return stripped
# --- DeepSeek: map to one of two canonical names ---
if provider == "deepseek":

View file

@ -839,8 +839,11 @@ def list_authenticated_providers(
if any(os.environ.get(ev) for ev in pcfg.api_key_env_vars):
has_creds = True
break
if not has_creds and overlay.auth_type in ("oauth_device_code", "oauth_external", "external_process"):
# These use auth stores, not env vars — check for auth.json entries
# Check auth store and credential pool for non-env-var credentials.
# This applies to OAuth providers AND api_key providers that also
# support OAuth (e.g. anthropic supports both API key and Claude Code
# OAuth via external credential files).
if not has_creds:
try:
from hermes_cli.auth import _load_auth_store
store = _load_auth_store()
@ -853,6 +856,38 @@ def list_authenticated_providers(
has_creds = True
except Exception as exc:
logger.debug("Auth store check failed for %s: %s", pid, exc)
# Fallback: check the credential pool with full auto-seeding.
# This catches credentials that exist in external stores (e.g.
# Codex CLI ~/.codex/auth.json) which _seed_from_singletons()
# imports on demand but aren't in the raw auth.json yet.
if not has_creds:
try:
from agent.credential_pool import load_pool
pool = load_pool(hermes_slug)
if pool.has_credentials():
has_creds = True
except Exception as exc:
logger.debug("Credential pool check failed for %s: %s", hermes_slug, exc)
# Fallback: check external credential files directly.
# The credential pool gates anthropic behind
# is_provider_explicitly_configured() to prevent auxiliary tasks
# from silently consuming Claude Code tokens (PR #4210).
# But the /model picker is discovery-oriented — we WANT to show
# providers the user can switch to, even if they aren't currently
# configured.
if not has_creds and hermes_slug == "anthropic":
try:
from agent.anthropic_adapter import (
read_claude_code_credentials,
read_hermes_oauth_credentials,
)
hermes_creds = read_hermes_oauth_credentials()
cc_creds = read_claude_code_credentials()
if (hermes_creds and hermes_creds.get("accessToken")) or \
(cc_creds and cc_creds.get("accessToken")):
has_creds = True
except Exception as exc:
logger.debug("Anthropic external creds check failed: %s", exc)
if not has_creds:
continue

View file

@ -546,6 +546,20 @@ _PROVIDER_ALIASES = {
}
def get_default_model_for_provider(provider: str) -> str:
"""Return the default model for a provider, or empty string if unknown.
Uses the first entry in _PROVIDER_MODELS as the default. This is the
model a user would be offered first in the ``hermes model`` picker.
Used as a fallback when the user has configured a provider but never
selected a model (e.g. ``hermes auth add openai-codex`` without
``hermes model``).
"""
models = _PROVIDER_MODELS.get(provider, [])
return models[0] if models else ""
def _openrouter_model_is_free(pricing: Any) -> bool:
"""Return True when both prompt and completion pricing are zero."""
if not isinstance(pricing, dict):
@ -1809,6 +1823,35 @@ def validate_requested_model(
"message": message,
}
# OpenAI Codex has its own catalog path; /v1/models probing is not the right validation path.
if normalized == "openai-codex":
try:
codex_models = provider_model_ids("openai-codex")
except Exception:
codex_models = []
if codex_models:
if requested_for_lookup in set(codex_models):
return {
"accepted": True,
"persist": True,
"recognized": True,
"message": None,
}
suggestions = get_close_matches(requested_for_lookup, codex_models, n=3, cutoff=0.5)
suggestion_text = ""
if suggestions:
suggestion_text = "\n Similar models: " + ", ".join(f"`{s}`" for s in suggestions)
return {
"accepted": True,
"persist": True,
"recognized": False,
"message": (
f"Note: `{requested}` was not found in the OpenAI Codex model listing. "
f"It may still work if your account has access to it."
f"{suggestion_text}"
),
}
# Probe the live API to check if the model actually exists
api_models = fetch_api_models(api_key, base_url)

View file

@ -33,6 +33,7 @@ PLATFORMS: OrderedDict[str, PlatformInfo] = OrderedDict([
("dingtalk", PlatformInfo(label="💬 DingTalk", default_toolset="hermes-dingtalk")),
("feishu", PlatformInfo(label="🪽 Feishu", default_toolset="hermes-feishu")),
("wecom", PlatformInfo(label="💬 WeCom", default_toolset="hermes-wecom")),
("wecom_callback", PlatformInfo(label="💬 WeCom Callback", default_toolset="hermes-wecom-callback")),
("weixin", PlatformInfo(label="💬 Weixin", default_toolset="hermes-weixin")),
("webhook", PlatformInfo(label="🔗 Webhook", default_toolset="hermes-webhook")),
("api_server", PlatformInfo(label="🌐 API Server", default_toolset="hermes-api-server")),

View file

@ -2005,6 +2005,12 @@ def _setup_wecom():
_gateway_setup_wecom()
def _setup_wecom_callback():
"""Configure WeCom Callback (self-built app) via gateway setup."""
from hermes_cli.gateway import _setup_wecom_callback as _gw_setup
_gw_setup()
def _setup_bluebubbles():
"""Configure BlueBubbles iMessage gateway."""
print_header("BlueBubbles (iMessage)")
@ -2130,6 +2136,7 @@ _GATEWAY_PLATFORMS = [
("DingTalk", "DINGTALK_CLIENT_ID", _setup_dingtalk),
("Feishu / Lark", "FEISHU_APP_ID", _setup_feishu),
("WeCom (Enterprise WeChat)", "WECOM_BOT_ID", _setup_wecom),
("WeCom Callback (Self-Built App)", "WECOM_CALLBACK_CORP_ID", _setup_wecom_callback),
("Weixin (WeChat)", "WEIXIN_ACCOUNT_ID", _setup_weixin),
("BlueBubbles (iMessage)", "BLUEBUBBLES_SERVER_URL", _setup_bluebubbles),
("Webhooks (GitHub, GitLab, etc.)", "WEBHOOK_ENABLED", _setup_webhooks),

View file

@ -302,6 +302,7 @@ def show_status(args):
"DingTalk": ("DINGTALK_CLIENT_ID", None),
"Feishu": ("FEISHU_APP_ID", "FEISHU_HOME_CHANNEL"),
"WeCom": ("WECOM_BOT_ID", "WECOM_HOME_CHANNEL"),
"WeCom Callback": ("WECOM_CALLBACK_CORP_ID", None),
"Weixin": ("WEIXIN_ACCOUNT_ID", "WEIXIN_HOME_CHANNEL"),
"BlueBubbles": ("BLUEBUBBLES_SERVER_URL", "BLUEBUBBLES_HOME_CHANNEL"),
}

351
hermes_cli/tips.py Normal file
View file

@ -0,0 +1,351 @@
"""Random tips shown at CLI session start to help users discover features."""
import random
from typing import Optional
# ---------------------------------------------------------------------------
# Tip corpus — one-liners covering slash commands, CLI flags, config,
# keybindings, tools, gateway, skills, profiles, and workflow tricks.
# ---------------------------------------------------------------------------
TIPS = [
# --- Slash Commands ---
"/btw <question> asks a quick side question without tools or history — great for clarifications.",
"/background <prompt> runs a task in a separate session while your current one stays free.",
"/branch forks the current session so you can explore a different direction without losing progress.",
"/compress manually compresses conversation context when things get long.",
"/rollback lists filesystem checkpoints — restore files the agent modified to any prior state.",
"/rollback diff 2 previews what changed since checkpoint 2 without restoring anything.",
"/rollback 2 src/file.py restores a single file from a specific checkpoint.",
"/title \"my project\" names your session — resume it later with /resume or hermes -c.",
"/resume picks up where you left off in a previously named session.",
"/queue <prompt> queues a message for the next turn without interrupting the current one.",
"/undo removes the last user/assistant exchange from the conversation.",
"/retry resends your last message — useful when the agent's response wasn't quite right.",
"/verbose cycles tool progress display: off → new → all → verbose.",
"/reasoning high increases the model's thinking depth. /reasoning show displays the reasoning.",
"/fast toggles priority processing for faster API responses (provider-dependent).",
"/yolo skips all dangerous command approval prompts for the rest of the session.",
"/model lets you switch models mid-session — try /model sonnet or /model gpt-5.",
"/model --global changes your default model permanently.",
"/personality pirate sets a fun personality — 14 built-in options from kawaii to shakespeare.",
"/skin changes the CLI theme — try ares, mono, slate, poseidon, or charizard.",
"/statusbar toggles a persistent bar showing model, tokens, context fill %, cost, and duration.",
"/tools disable browser temporarily removes browser tools for the current session.",
"/browser connect attaches browser tools to your running Chrome instance via CDP.",
"/plugins lists installed plugins and their status.",
"/cron manages scheduled tasks — set up recurring prompts with delivery to any platform.",
"/reload-mcp hot-reloads MCP server configuration without restarting.",
"/usage shows token usage, cost breakdown, and session duration.",
"/insights shows usage analytics for the last 30 days.",
"/paste checks your clipboard for an image and attaches it to your next message.",
"/profile shows which profile is active and its home directory.",
"/config shows your current configuration at a glance.",
"/stop kills all running background processes spawned by the agent.",
# --- @ Context References ---
"@file:path/to/file.py injects file contents directly into your message.",
"@file:main.py:10-50 injects only lines 10-50 of a file.",
"@folder:src/ injects a directory tree listing.",
"@diff injects your unstaged git changes into the message.",
"@staged injects your staged git changes (git diff --staged).",
"@git:5 injects the last 5 commits with full patches.",
"@url:https://example.com fetches and injects a web page's content.",
"Typing @ triggers filesystem path completion — navigate to any file interactively.",
"Combine multiple references: \"Review @file:main.py and @file:test.py for consistency.\"",
# --- Keybindings ---
"Alt+Enter (or Ctrl+J) inserts a newline for multi-line input.",
"Ctrl+C interrupts the agent. Double-press within 2 seconds to force exit.",
"Ctrl+Z suspends Hermes to the background — run fg in your shell to resume.",
"Tab accepts auto-suggestion ghost text or autocompletes slash commands.",
"Type a new message while the agent is working to interrupt and redirect it.",
"Alt+V pastes an image from your clipboard into the conversation.",
"Pasting 5+ lines auto-saves to a file and inserts a compact reference instead.",
# --- CLI Flags ---
"hermes -c resumes your most recent CLI session. hermes -c \"project name\" resumes by title.",
"hermes -w creates an isolated git worktree — perfect for parallel agent workflows.",
"hermes -w -q \"Fix issue #42\" combines worktree isolation with a one-shot query.",
"hermes chat -t web,terminal enables only specific toolsets for a focused session.",
"hermes chat -s github-pr-workflow preloads a skill at launch.",
"hermes chat -q \"query\" runs a single non-interactive query and exits.",
"hermes chat --max-turns 200 overrides the default 90-iteration limit per turn.",
"hermes chat --checkpoints enables filesystem snapshots before every destructive file change.",
"hermes --yolo bypasses all dangerous command approval prompts for the entire session.",
"hermes chat --source telegram tags the session for filtering in hermes sessions list.",
"hermes -p work chat runs under a specific profile without changing your default.",
# --- CLI Subcommands ---
"hermes doctor --fix diagnoses and auto-repairs config and dependency issues.",
"hermes dump outputs a compact setup summary — great for bug reports.",
"hermes config set KEY VALUE auto-routes secrets to .env and everything else to config.yaml.",
"hermes config edit opens config.yaml in your default editor.",
"hermes config check scans for missing or stale configuration options.",
"hermes sessions browse opens an interactive session picker with search.",
"hermes sessions stats shows session counts by platform and database size.",
"hermes sessions prune --older-than 30 cleans up old sessions.",
"hermes skills search react --source skills-sh searches the skills.sh public directory.",
"hermes skills check scans installed hub skills for upstream updates.",
"hermes skills tap add myorg/skills-repo adds a custom GitHub skill source.",
"hermes skills snapshot export setup.json exports your skill configuration for backup or sharing.",
"hermes mcp add github --command npx adds MCP servers from the command line.",
"hermes mcp serve runs Hermes itself as an MCP server for other agents.",
"hermes auth add lets you add multiple API keys for credential pool rotation.",
"hermes completion bash >> ~/.bashrc enables tab completion for all commands and profiles.",
"hermes logs -f follows agent.log in real time. --level WARNING --since 1h filters output.",
"hermes backup creates a zip backup of your entire Hermes home directory.",
"hermes profile create coder creates an isolated profile that becomes its own command.",
"hermes profile create work --clone copies your current config and keys to a new profile.",
"hermes update syncs new bundled skills to ALL profiles automatically.",
"hermes gateway install sets up Hermes as a system service (systemd/launchd).",
"hermes memory setup lets you configure an external memory provider (Honcho, Mem0, etc.).",
"hermes webhook subscribe creates event-driven webhook routes with HMAC validation.",
# --- Configuration ---
"Set display.bell_on_complete: true in config.yaml to hear a bell when long tasks finish.",
"Set display.streaming: true to see tokens appear in real time as the model generates.",
"Set display.show_reasoning: true to watch the model's chain-of-thought reasoning.",
"Set display.compact: true to reduce whitespace in output for denser information.",
"Set display.busy_input_mode: queue to queue messages instead of interrupting the agent.",
"Set display.resume_display: minimal to skip the full conversation recap on session resume.",
"Set compression.threshold: 0.50 to control when auto-compression fires (default: 50% of context).",
"Set agent.max_turns: 200 to let the agent take more tool-calling steps per turn.",
"Set file_read_max_chars: 200000 to increase the max content per read_file call.",
"Set approvals.mode: smart to let an LLM auto-approve safe commands and auto-deny dangerous ones.",
"Set fallback_model in config.yaml to automatically fail over to a backup provider.",
"Set privacy.redact_pii: true to hash user IDs and phone numbers before sending to the LLM.",
"Set browser.record_sessions: true to auto-record browser sessions as WebM videos.",
"Set worktree: true in config.yaml to always create a git worktree (same as hermes -w).",
"Set security.website_blocklist.enabled: true to block specific domains from web tools.",
"Set cron.wrap_response: false to deliver raw agent output without the cron header/footer.",
"HERMES_TIMEZONE overrides the server timezone with any IANA timezone string.",
"Environment variable substitution works in config.yaml: use ${VAR_NAME} syntax.",
"Quick commands in config.yaml run shell commands instantly with zero token usage.",
"Custom personalities can be defined in config.yaml under agent.personalities.",
"provider_routing controls OpenRouter provider sorting, whitelisting, and blacklisting.",
# --- Tools & Capabilities ---
"execute_code runs Python scripts that call Hermes tools programmatically — results stay out of context.",
"delegate_task spawns up to 3 concurrent sub-agents with isolated contexts for parallel work.",
"web_extract works on PDF URLs — pass any PDF link and it converts to markdown.",
"search_files is ripgrep-backed and faster than grep — use it instead of terminal grep.",
"patch uses 9 fuzzy matching strategies so minor whitespace differences won't break edits.",
"patch supports V4A format for bulk multi-file edits in a single call.",
"read_file suggests similar filenames when a file isn't found.",
"read_file auto-deduplicates — re-reading an unchanged file returns a lightweight stub.",
"browser_vision takes a screenshot and analyzes it with AI — works for CAPTCHAs and visual content.",
"browser_console can evaluate JavaScript expressions in the page context.",
"image_generate creates images with FLUX 2 Pro and automatic 2x upscaling.",
"text_to_speech converts text to audio — plays as voice bubbles on Telegram.",
"send_message can reach any connected messaging platform from within a session.",
"The todo tool helps the agent track complex multi-step tasks during a session.",
"session_search performs full-text search across ALL past conversations.",
"The agent automatically saves preferences, corrections, and environment facts to memory.",
"mixture_of_agents routes hard problems through 4 frontier LLMs collaboratively.",
"Terminal commands support background mode with notify_on_complete for long-running tasks.",
"Terminal background processes support watch_patterns to alert on specific output lines.",
"The terminal tool supports 6 backends: local, Docker, SSH, Modal, Daytona, and Singularity.",
# --- Profiles ---
"Each profile gets its own config, API keys, memory, sessions, skills, and cron jobs.",
"Profile names become shell commands — 'hermes profile create coder' creates the 'coder' command.",
"hermes profile export coder -o backup.tar.gz creates a portable profile archive.",
"If two profiles accidentally share a bot token, the second gateway is blocked with a clear error.",
# --- Sessions ---
"Sessions auto-generate descriptive titles after the first exchange — no manual naming needed.",
"Session titles support lineage: \"my project\"\"my project #2\"\"my project #3\".",
"When exiting, Hermes prints a resume command with session ID and stats.",
"hermes sessions export backup.jsonl exports all sessions for backup or analysis.",
"hermes -r SESSION_ID resumes any specific past session by its ID.",
# --- Memory ---
"Memory is a frozen snapshot — changes appear in the system prompt only at next session start.",
"Memory entries are automatically scanned for prompt injection and exfiltration patterns.",
"The agent has two memory stores: personal notes (~2200 chars) and user profile (~1375 chars).",
"Corrections you give the agent (\"no, do it this way\") are often auto-saved to memory.",
# --- Skills ---
"Over 80 bundled skills covering github, creative, mlops, productivity, research, and more.",
"Every installed skill automatically becomes a slash command — type / to see them all.",
"hermes skills install official/security/1password installs optional skills from the repo.",
"Skills can restrict to specific OS platforms — some only load on macOS or Linux.",
"skills.external_dirs in config.yaml lets you load skills from custom directories.",
"The agent can create its own skills as procedural memory using skill_manage.",
"The plan skill saves markdown plans under .hermes/plans/ in the active workspace.",
# --- Cron & Scheduling ---
"Cron jobs can attach skills: hermes cron add --skill blogwatcher \"Check for new posts\".",
"Cron delivery targets include telegram, discord, slack, email, sms, and 12+ more platforms.",
"If a cron response starts with [SILENT], delivery is suppressed — useful for monitoring-only jobs.",
"Cron supports relative delays (30m), intervals (every 2h), cron expressions, and ISO timestamps.",
"Cron jobs run in completely fresh agent sessions — prompts must be self-contained.",
# --- Voice ---
"Voice mode works with zero API keys if faster-whisper is installed (free local speech-to-text).",
"Five TTS providers available: Edge TTS (free), ElevenLabs, OpenAI, NeuTTS (free local), MiniMax.",
"/voice on enables voice mode in the CLI. Ctrl+B toggles push-to-talk recording.",
"Streaming TTS plays sentences as they generate — you don't wait for the full response.",
"Voice messages on Telegram, Discord, WhatsApp, and Slack are auto-transcribed.",
# --- Gateway & Messaging ---
"Hermes runs on 18 platforms: Telegram, Discord, Slack, WhatsApp, Signal, Matrix, email, and more.",
"hermes gateway install sets it up as a system service that starts on boot.",
"DingTalk uses Stream Mode — no webhooks or public URL needed.",
"BlueBubbles brings iMessage to Hermes via a local macOS server.",
"Webhook routes support HMAC validation, rate limiting, and event filtering.",
"The API server exposes an OpenAI-compatible endpoint compatible with Open WebUI and LibreChat.",
"Discord voice channel mode: the bot joins VC, transcribes speech, and talks back.",
"group_sessions_per_user: true gives each person their own session in group chats.",
"/sethome marks a chat as the home channel for cron job deliveries.",
"The gateway supports inactivity-based timeouts — active agents can run indefinitely.",
# --- Security ---
"Dangerous command approval has 4 tiers: once, session, always (permanent allowlist), deny.",
"Smart approval mode uses an LLM to auto-approve safe commands and flag dangerous ones.",
"SSRF protection blocks private networks, loopback, link-local, and cloud metadata addresses.",
"Tirith pre-exec scanning detects homograph URL spoofing and pipe-to-interpreter patterns.",
"MCP subprocesses receive a filtered environment — only safe system vars pass through.",
"Context files (.hermes.md, AGENTS.md) are security-scanned for prompt injection before loading.",
"command_allowlist in config.yaml permanently approves specific shell command patterns.",
# --- Context & Compression ---
"Context auto-compresses when it reaches the threshold — memories are flushed and history summarized.",
"The status bar turns yellow, then orange, then red as context fills up.",
"SOUL.md at ~/.hermes/SOUL.md is the agent's primary identity — customize it to shape behavior.",
"Hermes loads project context from .hermes.md, AGENTS.md, CLAUDE.md, or .cursorrules (first match).",
"Subdirectory AGENTS.md files are discovered progressively as the agent navigates into folders.",
"Context files are capped at 20,000 characters with smart head/tail truncation.",
# --- Browser ---
"Five browser providers: local Chromium, Browserbase, Browser Use, Camofox, and Firecrawl.",
"Camofox is an anti-detection browser — Firefox fork with C++ fingerprint spoofing.",
"browser_navigate returns a page snapshot automatically — no need to call browser_snapshot after.",
"browser_vision with annotate=true overlays numbered labels on interactive elements.",
# --- MCP ---
"MCP servers are configured in config.yaml — both stdio and HTTP transports supported.",
"Per-server tool filtering: tools.include whitelists and tools.exclude blacklists specific tools.",
"MCP servers auto-generate toolsets at runtime — hermes tools can toggle them per platform.",
"MCP OAuth support: auth: oauth enables browser-based authorization with PKCE.",
# --- Checkpoints & Rollback ---
"Checkpoints have zero overhead when no files are modified — enabled by default.",
"A pre-rollback snapshot is saved automatically so you can undo the undo.",
"/rollback also undoes the conversation turn, so the agent doesn't remember rolled-back changes.",
"Checkpoints use shadow repos in ~/.hermes/checkpoints/ — your project's .git is never touched.",
# --- Batch & Data ---
"batch_runner.py processes hundreds of prompts in parallel for training data generation.",
"hermes chat -Q enables quiet mode for programmatic use — suppresses banner and spinner.",
"Trajectory saving (--save-trajectories) captures full tool-use traces for model training.",
# --- Plugins ---
"Three plugin types: general (tools/hooks), memory providers, and context engines.",
"hermes plugins install owner/repo installs plugins directly from GitHub.",
"8 external memory providers available: Honcho, OpenViking, Mem0, Hindsight, and more.",
"Plugin hooks include pre_tool_call, post_tool_call, pre_llm_call, and post_llm_call.",
# --- Miscellaneous ---
"Prompt caching (Anthropic) reduces costs by reusing cached system prompt prefixes.",
"The agent auto-generates session titles in a background thread — zero latency impact.",
"Smart model routing can auto-route simple queries to a cheaper model.",
"Slash commands support prefix matching: /h resolves to /help, /mod to /model.",
"Dragging a file path into the terminal auto-attaches images or sends as context.",
".worktreeinclude in your repo root lists gitignored files to copy into worktrees.",
"hermes acp runs Hermes as an ACP server for VS Code, Zed, and JetBrains integration.",
"Custom providers: save named endpoints in config.yaml under custom_providers.",
"HERMES_EPHEMERAL_SYSTEM_PROMPT injects a system prompt that's never persisted to history.",
"credential_pool_strategies supports fill_first, round_robin, least_used, and random rotation.",
"hermes login supports OAuth-based auth for Nous and OpenAI Codex providers.",
"The API server supports both Chat Completions and Responses API with server-side state.",
"tool_preview_length: 0 in config shows full file paths in the spinner's activity feed.",
"hermes status --deep runs deeper diagnostic checks across all components.",
# --- Hidden Gems & Power-User Tricks ---
"BOOT.md at ~/.hermes/BOOT.md runs automatically on every gateway start — use it for startup checks.",
"Cron jobs can attach a Python script (--script) whose stdout is injected into the prompt as context.",
"Cron scripts live in ~/.hermes/scripts/ and run before the agent — perfect for data collection pipelines.",
"prefill_messages_file in config.yaml injects few-shot examples into every API call, never saved to history.",
"SOUL.md completely replaces the agent's default identity — rewrite it to make Hermes your own.",
"SOUL.md is auto-seeded with a default personality on first run. Edit ~/.hermes/SOUL.md to customize.",
"/compress <focus topic> allocates 60-70% of the summary budget to your topic and aggressively trims the rest.",
"On second+ compression, the compressor updates the previous summary instead of starting from scratch.",
"Before a gateway session reset, Hermes auto-flushes important facts to memory in the background.",
"network.force_ipv4: true in config.yaml fixes hangs on servers with broken IPv6 — monkey-patches socket.",
"The terminal tool annotates common exit codes: grep returning 1 = 'No matches found (not an error)'.",
"Failed foreground terminal commands auto-retry up to 3 times with exponential backoff (2s, 4s, 8s).",
"Bare sudo commands are auto-rewritten to pipe SUDO_PASSWORD from .env — no interactive prompt needed.",
"execute_code has built-in helpers: json_parse() for tolerant parsing, shell_quote(), and retry() with backoff.",
"execute_code's 7 sandbox tools (web_search, terminal, read/write/search/patch) use RPC — never enter context.",
"Reading the same file region 3+ times triggers a warning. At 4+, it's hard-blocked to prevent loops.",
"write_file and patch detect if a file was externally modified since the last read and warn about staleness.",
"V4A patch format supports Add File, Delete File, and Move File directives — not just Update.",
"MCP servers can request LLM completions back via sampling — the agent becomes a tool for the server.",
"MCP servers send notifications/tools/list_changed to trigger automatic tool re-registration without restart.",
"delegate_task with acp_command: 'claude' spawns Claude Code as a child agent from any platform.",
"Delegation has a heartbeat thread — child activity propagates to the parent, preventing gateway timeouts.",
"When a provider returns HTTP 402 (payment required), the auxiliary client auto-falls back to the next one.",
"agent.tool_use_enforcement steers models that describe actions instead of calling tools — auto for GPT/Codex.",
"agent.restart_drain_timeout (default 60s) lets running agents finish before a gateway restart takes effect.",
"The gateway caches AIAgent instances per session — destroying this cache breaks Anthropic prompt caching.",
"Any website can expose skills via /.well-known/skills/index.json — the skills hub discovers them automatically.",
"The skills audit log at ~/.hermes/skills/.hub/audit.log tracks every install and removal operation.",
"Stale git worktrees are auto-cleaned: 24-72h old with no unpushed commits get pruned on startup.",
"Each profile gets its own subprocess HOME at HERMES_HOME/home/ — isolated git, ssh, npm, gh configs.",
"HERMES_HOME_MODE env var (octal, e.g. 0701) sets custom directory permissions for web server traversal.",
"Container mode: place .container-mode in HERMES_HOME and the host CLI auto-execs into the container.",
"Ctrl+C has 5 priority tiers: cancel recording → cancel prompts → cancel picker → interrupt agent → exit.",
"Every interrupt during an agent run is logged to ~/.hermes/interrupt_debug.log with timestamps.",
"BROWSER_CDP_URL connects browser tools to any running Chrome — accepts WebSocket, HTTP, or host:port.",
"BROWSERBASE_ADVANCED_STEALTH=true enables advanced anti-detection with custom Chromium (Scale Plan).",
"The CLI auto-switches to compact mode in terminals narrower than 80 columns.",
"Quick commands support two types: exec (run shell command directly) and alias (redirect to another command).",
"Per-task delegation model: delegation.model and delegation.provider in config route subagents to cheaper models.",
"delegation.reasoning_effort independently controls thinking depth for subagents.",
"display.platforms in config.yaml allows per-platform display overrides: {telegram: {tool_progress: all}}.",
"human_delay.mode in config simulates human typing speed — configurable min_ms/max_ms range.",
"Config version migrations run automatically on load — new config keys appear without manual intervention.",
"GPT and Codex models get special system prompt guidance for tool discipline and mandatory tool use.",
"Gemini models get tailored directives for absolute paths, parallel tool calls, and non-interactive commands.",
"context.engine in config.yaml can be set to a plugin name for alternative context management strategies.",
"Browser pages over 8000 tokens are auto-summarized by the auxiliary LLM before returning to the agent.",
"The compressor does a cheap pre-pass: tool outputs over 200 chars are replaced with placeholders before the LLM runs.",
"When compression fails, further attempts are paused for 10 minutes to avoid API hammering.",
"Long dangerous commands (>70 chars) get a 'view' option in the approval prompt to see the full text first.",
"Audio level visualization shows ▁▂▃▄▅▆▇ bars during voice recording based on microphone RMS levels.",
"Profile names cannot collide with existing PATH binaries — 'hermes profile create ls' would be rejected.",
"hermes profile create backup --clone-all copies everything (config, keys, SOUL.md, memories, skills, sessions).",
"The voice record key is configurable via voice.record_key in config.yaml — not just Ctrl+B.",
".cursorrules and .cursor/rules/*.mdc files are auto-detected and loaded as project context.",
"Context files support 10+ prompt injection patterns — invisible Unicode, 'ignore instructions', exfil attempts.",
"GPT-5 and Codex use 'developer' role instead of 'system' in the message format.",
"Per-task auxiliary overrides: auxiliary.vision.provider, auxiliary.compression.model, etc. in config.yaml.",
"The auxiliary client treats 'main' as a provider alias — resolves to your actual primary provider + model.",
"Smart routing can auto-route simple queries to a cheaper model — set smart_model_routing.enabled: true.",
"hermes claw migrate --dry-run previews OpenClaw migration without writing anything.",
"File paths pasted with quotes or escaped spaces are handled automatically — no manual cleanup needed.",
"Slash commands never trigger the large-paste collapse — /command with big arguments works correctly.",
"In interrupt mode, slash commands typed during agent execution bypass interrupt logic and run immediately.",
"HERMES_DEV=1 bypasses container mode detection for local development.",
"Each MCP server gets its own toolset (mcp-servername) that can be toggled independently via hermes tools.",
"MCP ${ENV_VAR} placeholders in config are resolved at server spawn — including vars from ~/.hermes/.env.",
"Skills from trusted repos (NousResearch) get a 'trusted' security level; community skills get extra scanning.",
"The skills quarantine at ~/.hermes/skills/.hub/quarantine/ holds skills pending security review.",
]
def get_random_tip(exclude_recent: int = 0) -> str:
"""Return a random tip string.
Args:
exclude_recent: not used currently; reserved for future
deduplication across sessions.
"""
return random.choice(TIPS)
def get_tip_count() -> int:
"""Return the total number of tips available."""
return len(TIPS)

View file

@ -216,6 +216,51 @@ def get_env_path() -> Path:
return get_hermes_home() / ".env"
# ─── Network Preferences ─────────────────────────────────────────────────────
def apply_ipv4_preference(force: bool = False) -> None:
"""Monkey-patch ``socket.getaddrinfo`` to prefer IPv4 connections.
On servers with broken or unreachable IPv6, Python tries AAAA records
first and hangs for the full TCP timeout before falling back to IPv4.
This affects httpx, requests, urllib, the OpenAI SDK everything that
uses ``socket.getaddrinfo``.
When *force* is True, patches ``getaddrinfo`` so that calls with
``family=AF_UNSPEC`` (the default) resolve as ``AF_INET`` instead,
skipping IPv6 entirely. If no A record exists, falls back to the
original unfiltered resolution so pure-IPv6 hosts still work.
Safe to call multiple times only patches once.
Set ``network.force_ipv4: true`` in ``config.yaml`` to enable.
"""
if not force:
return
import socket
# Guard against double-patching
if getattr(socket.getaddrinfo, "_hermes_ipv4_patched", False):
return
_original_getaddrinfo = socket.getaddrinfo
def _ipv4_getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
if family == 0: # AF_UNSPEC — caller didn't request a specific family
try:
return _original_getaddrinfo(
host, port, socket.AF_INET, type, proto, flags
)
except socket.gaierror:
# No A record — fall back to full resolution (pure-IPv6 hosts)
return _original_getaddrinfo(host, port, family, type, proto, flags)
return _original_getaddrinfo(host, port, family, type, proto, flags)
_ipv4_getaddrinfo._hermes_ipv4_patched = True # type: ignore[attr-defined]
socket.getaddrinfo = _ipv4_getaddrinfo # type: ignore[assignment]
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
OPENROUTER_MODELS_URL = f"{OPENROUTER_BASE_URL}/models"

View file

@ -7,16 +7,28 @@ gateway call early in their startup path. All log files live under
Log files produced:
agent.log INFO+, all agent/tool/session activity (the main log)
errors.log WARNING+, errors and warnings only (quick triage)
gateway.log INFO+, gateway-only events (created when mode="gateway")
Both files use ``RotatingFileHandler`` with ``RedactingFormatter`` so
All files use ``RotatingFileHandler`` with ``RedactingFormatter`` so
secrets are never written to disk.
Component separation:
gateway.log only receives records from ``gateway.*`` loggers
platform adapters, session management, slash commands, delivery.
agent.log remains the catch-all (everything goes there).
Session context:
Call ``set_session_context(session_id)`` at the start of a conversation
and ``clear_session_context()`` when done. All log lines emitted on
that thread will include ``[session_id]`` for filtering/correlation.
"""
import logging
import os
import threading
from logging.handlers import RotatingFileHandler
from pathlib import Path
from typing import Optional
from typing import Optional, Sequence
from hermes_constants import get_config_path, get_hermes_home
@ -25,9 +37,14 @@ from hermes_constants import get_config_path, get_hermes_home
# unless ``force=True``.
_logging_initialized = False
# Default log format — includes timestamp, level, logger name, and message.
_LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s: %(message)s"
_LOG_FORMAT_VERBOSE = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# Thread-local storage for per-conversation session context.
_session_context = threading.local()
# Default log format — includes timestamp, level, optional session tag,
# logger name, and message. The ``%(session_tag)s`` field is guaranteed to
# exist on every LogRecord via _install_session_record_factory() below.
_LOG_FORMAT = "%(asctime)s %(levelname)s%(session_tag)s %(name)s: %(message)s"
_LOG_FORMAT_VERBOSE = "%(asctime)s - %(name)s - %(levelname)s%(session_tag)s - %(message)s"
# Third-party loggers that are noisy at DEBUG/INFO level.
_NOISY_LOGGERS = (
@ -48,6 +65,99 @@ _NOISY_LOGGERS = (
)
# ---------------------------------------------------------------------------
# Public session context API
# ---------------------------------------------------------------------------
def set_session_context(session_id: str) -> None:
"""Set the session ID for the current thread.
All subsequent log records on this thread will include ``[session_id]``
in the formatted output. Call at the start of ``run_conversation()``.
"""
_session_context.session_id = session_id
def clear_session_context() -> None:
"""Clear the session ID for the current thread.
Optional ``set_session_context()`` overwrites the previous value,
so explicit clearing is only needed if the thread is reused for
non-conversation work after ``run_conversation()`` returns.
"""
_session_context.session_id = None
# ---------------------------------------------------------------------------
# Record factory — injects session_tag into every LogRecord at creation
# ---------------------------------------------------------------------------
def _install_session_record_factory() -> None:
"""Replace the global LogRecord factory with one that adds ``session_tag``.
Unlike a ``logging.Filter`` on a handler or logger, the record factory
runs for EVERY record in the process including records that propagate
from child loggers and records handled by third-party handlers. This
guarantees ``%(session_tag)s`` is always available in format strings,
eliminating the KeyError that would occur if a handler used our format
without having a ``_SessionFilter`` attached.
Idempotent checks for a marker attribute to avoid double-wrapping if
the module is reloaded.
"""
current_factory = logging.getLogRecordFactory()
if getattr(current_factory, "_hermes_session_injector", False):
return # already installed
def _session_record_factory(*args, **kwargs):
record = current_factory(*args, **kwargs)
sid = getattr(_session_context, "session_id", None)
record.session_tag = f" [{sid}]" if sid else "" # type: ignore[attr-defined]
return record
_session_record_factory._hermes_session_injector = True # type: ignore[attr-defined]
logging.setLogRecordFactory(_session_record_factory)
# Install immediately on import — session_tag is available on all records
# from this point forward, even before setup_logging() is called.
_install_session_record_factory()
# ---------------------------------------------------------------------------
# Filters
# ---------------------------------------------------------------------------
class _ComponentFilter(logging.Filter):
"""Only pass records whose logger name starts with one of *prefixes*.
Used to route gateway-specific records to ``gateway.log`` while
keeping ``agent.log`` as the catch-all.
"""
def __init__(self, prefixes: Sequence[str]) -> None:
super().__init__()
self._prefixes = tuple(prefixes)
def filter(self, record: logging.LogRecord) -> bool:
return record.name.startswith(self._prefixes)
# Logger name prefixes that belong to each component.
# Used by _ComponentFilter and exposed for ``hermes logs --component``.
COMPONENT_PREFIXES = {
"gateway": ("gateway",),
"agent": ("agent", "run_agent", "model_tools", "batch_runner"),
"tools": ("tools",),
"cli": ("hermes_cli", "cli"),
"cron": ("cron",),
}
# ---------------------------------------------------------------------------
# Main setup
# ---------------------------------------------------------------------------
def setup_logging(
*,
hermes_home: Optional[Path] = None,
@ -78,8 +188,9 @@ def setup_logging(
Number of rotated backup files to keep.
Defaults to 3 or the value from config.yaml ``logging.backup_count``.
mode
Hint for the caller context: ``"cli"``, ``"gateway"``, ``"cron"``.
Currently used only for log format tuning (gateway includes PID).
Caller context: ``"cli"``, ``"gateway"``, ``"cron"``.
When ``"gateway"``, an additional ``gateway.log`` file is created
that receives only gateway-component records.
force
Re-run setup even if it has already been called.
@ -130,6 +241,18 @@ def setup_logging(
formatter=RedactingFormatter(_LOG_FORMAT),
)
# --- gateway.log (INFO+, gateway component only) ------------------------
if mode == "gateway":
_add_rotating_handler(
root,
log_dir / "gateway.log",
level=logging.INFO,
max_bytes=5 * 1024 * 1024,
backup_count=3,
formatter=RedactingFormatter(_LOG_FORMAT),
log_filter=_ComponentFilter(COMPONENT_PREFIXES["gateway"]),
)
# Ensure root logger level is low enough for the handlers to fire.
if root.level == logging.NOTSET or root.level > level:
root.setLevel(level)
@ -218,9 +341,16 @@ def _add_rotating_handler(
max_bytes: int,
backup_count: int,
formatter: logging.Formatter,
log_filter: Optional[logging.Filter] = None,
) -> None:
"""Add a ``RotatingFileHandler`` to *logger*, skipping if one already
exists for the same resolved file path (idempotent).
Parameters
----------
log_filter
Optional filter to attach to the handler (e.g. ``_ComponentFilter``
for gateway.log).
"""
resolved = path.resolve()
for existing in logger.handlers:
@ -236,6 +366,8 @@ def _add_rotating_handler(
)
handler.setLevel(level)
handler.setFormatter(formatter)
if log_filter is not None:
handler.addFilter(log_filter)
logger.addHandler(handler)

View file

@ -499,6 +499,16 @@
default = "ubuntu:24.04";
description = "OCI container image. The container pulls this at runtime via Docker/Podman.";
};
hostUsers = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Interactive users who get a ~/.hermes symlink to the service
stateDir. These users are automatically added to the hermes group.
'';
example = [ "sidbin" ];
};
};
};
@ -557,6 +567,25 @@
environment.variables.HERMES_HOME = "${cfg.stateDir}/.hermes";
})
# ── Host user group membership ─────────────────────────────────────
(lib.mkIf (cfg.container.enable && cfg.container.hostUsers != []) {
users.users = lib.genAttrs cfg.container.hostUsers (user: {
extraGroups = [ cfg.group ];
});
})
# ── Warnings ──────────────────────────────────────────────────────
(lib.mkIf (cfg.container.enable && !cfg.addToSystemPackages && cfg.container.hostUsers != []) {
warnings = [
''
services.hermes-agent: container.enable is true and container.hostUsers
is set, but addToSystemPackages is false. Without a host-installed hermes
binary, container routing will not work for interactive users.
Set addToSystemPackages = true or ensure hermes is on PATH.
''
];
})
# ── Directories ───────────────────────────────────────────────────
{
systemd.tmpfiles.rules = [
@ -611,6 +640,59 @@
chown ${cfg.user}:${cfg.group} ${cfg.stateDir}/.hermes/.managed
chmod 0644 ${cfg.stateDir}/.hermes/.managed
# Container mode metadata — tells the host CLI to exec into the
# container instead of running locally. Removed when container mode
# is disabled so the host CLI falls back to native execution.
${if cfg.container.enable then ''
cat > ${cfg.stateDir}/.hermes/.container-mode <<'HERMES_CONTAINER_MODE_EOF'
# Written by NixOS activation script. Do not edit manually.
backend=${cfg.container.backend}
container_name=${containerName}
exec_user=${cfg.user}
hermes_bin=${containerDataDir}/current-package/bin/hermes
HERMES_CONTAINER_MODE_EOF
chown ${cfg.user}:${cfg.group} ${cfg.stateDir}/.hermes/.container-mode
chmod 0644 ${cfg.stateDir}/.hermes/.container-mode
'' else ''
rm -f ${cfg.stateDir}/.hermes/.container-mode
# Remove symlink bridge for hostUsers
${lib.concatStringsSep "\n" (map (user:
let
userHome = config.users.users.${user}.home;
symlinkPath = "${userHome}/.hermes";
in ''
if [ -L "${symlinkPath}" ] && [ "$(readlink "${symlinkPath}")" = "${cfg.stateDir}/.hermes" ]; then
rm -f "${symlinkPath}"
echo "hermes-agent: removed symlink ${symlinkPath}"
fi
'') cfg.container.hostUsers)}
''}
# ── Symlink bridge for interactive users ───────────────────────
# Create ~/.hermes -> stateDir/.hermes for each hostUser so the
# host CLI shares state with the container service.
# Only runs when container mode is enabled.
${lib.optionalString cfg.container.enable
(lib.concatStringsSep "\n" (map (user:
let
userHome = config.users.users.${user}.home;
symlinkPath = "${userHome}/.hermes";
target = "${cfg.stateDir}/.hermes";
in ''
if [ -d "${symlinkPath}" ] && [ ! -L "${symlinkPath}" ]; then
# Real directory — back it up, then create symlink.
# (ln -sfn cannot atomically replace a directory.)
_backup="${symlinkPath}.bak.$(date +%s)"
echo "hermes-agent: backing up existing ${symlinkPath} to $_backup"
mv "${symlinkPath}" "$_backup"
fi
# For everything else (existing symlink, doesn't exist, etc.)
# ln -sfn handles it: replaces symlinks, creates new ones.
ln -sfn "${target}" "${symlinkPath}"
chown -h ${user}:${cfg.group} "${symlinkPath}"
'') cfg.container.hostUsers))}
# Seed auth file if provided
${lib.optionalString (cfg.authFile != null) ''
${if cfg.authFileForceOverwrite then ''

View file

@ -376,6 +376,24 @@ def backup_existing(path: Path, backup_root: Path) -> Optional[Path]:
return dest
# ── Brand rewriting ─────────────────────────────────────────
# Replace OpenClaw brand names with Hermes in migrated text so that
# memory entries, user profiles, SOUL.md, and workspace instructions
# read as self-referential to the new agent identity.
_REBRAND_PATTERNS: List[Tuple[re.Pattern, str]] = [
(re.compile(r'\bOpen[\s-]?Claw\b', re.IGNORECASE), 'Hermes'),
(re.compile(r'\bClawdBot\b', re.IGNORECASE), 'Hermes'),
(re.compile(r'\bMoltBot\b', re.IGNORECASE), 'Hermes'),
]
def rebrand_text(text: str) -> str:
"""Replace OpenClaw / ClawdBot / MoltBot brand names with Hermes."""
for pattern, replacement in _REBRAND_PATTERNS:
text = pattern.sub(replacement, text)
return text
def parse_existing_memory_entries(path: Path) -> List[str]:
if not path.exists():
return []
@ -782,12 +800,13 @@ class Migrator:
path.write_text("\n".join(entries) + "\n", encoding="utf-8")
return path
def copy_file(self, source: Path, destination: Path, kind: str) -> None:
def copy_file(self, source: Path, destination: Path, kind: str,
transform: Optional[Any] = None) -> None:
if not source or not source.exists():
return
if destination.exists():
if sha256_file(source) == sha256_file(destination):
if not transform and sha256_file(source) == sha256_file(destination):
self.record(kind, source, destination, "skipped", "Target already matches source")
return
if not self.overwrite:
@ -797,7 +816,13 @@ class Migrator:
if self.execute:
backup_path = self.maybe_backup(destination)
ensure_parent(destination)
shutil.copy2(source, destination)
if transform:
content = read_text(source)
content = transform(content)
destination.write_text(content, encoding="utf-8")
shutil.copystat(source, destination)
else:
shutil.copy2(source, destination)
self.record(kind, source, destination, "migrated", backup=str(backup_path) if backup_path else None)
else:
self.record(kind, source, destination, "migrated", "Would copy")
@ -807,7 +832,7 @@ class Migrator:
if not source:
self.record("soul", None, self.target_root / "SOUL.md", "skipped", "No OpenClaw SOUL.md found")
return
self.copy_file(source, self.target_root / "SOUL.md", kind="soul")
self.copy_file(source, self.target_root / "SOUL.md", kind="soul", transform=rebrand_text)
def migrate_workspace_agents(self) -> None:
source = self.source_candidate(
@ -821,7 +846,7 @@ class Migrator:
self.record("workspace-agents", source, None, "skipped", "No workspace target was provided")
return
destination = self.workspace_target / WORKSPACE_INSTRUCTIONS_FILENAME
self.copy_file(source, destination, kind="workspace-agents")
self.copy_file(source, destination, kind="workspace-agents", transform=rebrand_text)
def migrate_memory(self, source: Optional[Path], destination: Path, limit: int, kind: str) -> None:
if not source or not source.exists():
@ -832,6 +857,7 @@ class Migrator:
if not incoming:
self.record(kind, source, destination, "skipped", "No importable entries found")
return
incoming = [rebrand_text(entry) for entry in incoming]
existing = parse_existing_memory_entries(destination)
merged, stats, overflowed = merge_entries(existing, incoming, limit)
@ -927,7 +953,7 @@ class Migrator:
def load_openclaw_config(self) -> Dict[str, Any]:
# Check current name and legacy config filenames
for name in ("openclaw.json", "clawdbot.json", "moldbot.json"):
for name in ("openclaw.json", "clawdbot.json", "moltbot.json"):
config_path = self.source_root / name
if config_path.exists():
try:
@ -997,7 +1023,17 @@ class Migrator:
.get("workspace")
)
if isinstance(workspace, str) and workspace.strip():
additions["MESSAGING_CWD"] = workspace.strip()
ws_path = workspace.strip()
# Skip if the workspace points inside the OpenClaw source directory —
# that path will be stale after migration and would cause the Hermes
# gateway to use the old OpenClaw workspace as its cwd, picking up
# OpenClaw's AGENTS.md, MEMORY.md, etc.
try:
inside_source = Path(ws_path).resolve().is_relative_to(self.source_root.resolve())
except (ValueError, OSError):
inside_source = False
if not inside_source:
additions["MESSAGING_CWD"] = ws_path
allowlist_path = self.source_root / "credentials" / "telegram-default-allowFrom.json"
if allowlist_path.exists():
@ -1543,6 +1579,7 @@ class Migrator:
if not all_incoming:
self.record("daily-memory", source_dir, destination, "skipped", "No importable entries found in daily memory files")
return
all_incoming = [rebrand_text(entry) for entry in all_incoming]
existing = parse_existing_memory_entries(destination)
merged, stats, overflowed = merge_entries(existing, all_incoming, self.memory_limit)

View file

@ -43,7 +43,7 @@ dev = ["debugpy>=1.8.0,<2", "pytest>=9.0.2,<10", "pytest-asyncio>=1.3.0,<2", "py
messaging = ["python-telegram-bot[webhooks]>=22.6,<23", "discord.py[voice]>=2.7.1,<3", "aiohttp>=3.13.3,<4", "slack-bolt>=1.18.0,<2", "slack-sdk>=3.27.0,<4"]
cron = ["croniter>=6.0.0,<7"]
slack = ["slack-bolt>=1.18.0,<2", "slack-sdk>=3.27.0,<4"]
matrix = ["mautrix[encryption]>=0.20,<1", "Markdown>=3.6,<4"]
matrix = ["mautrix[encryption]>=0.20,<1", "Markdown>=3.6,<4", "aiosqlite>=0.20", "asyncpg>=0.29"]
cli = ["simple-term-menu>=1.0,<2"]
tts-premium = ["elevenlabs>=1.0,<2"]
voice = [

View file

@ -94,7 +94,7 @@ from agent.model_metadata import (
from agent.context_compressor import ContextCompressor
from agent.subdirectory_hints import SubdirectoryHintTracker
from agent.prompt_caching import apply_anthropic_cache_control
from agent.prompt_builder import build_skills_system_prompt, build_context_files_prompt, load_soul_md, TOOL_USE_ENFORCEMENT_GUIDANCE, TOOL_USE_ENFORCEMENT_MODELS, DEVELOPER_ROLE_MODELS, GOOGLE_MODEL_OPERATIONAL_GUIDANCE, OPENAI_MODEL_EXECUTION_GUIDANCE
from agent.prompt_builder import build_skills_system_prompt, build_context_files_prompt, build_environment_hints, load_soul_md, TOOL_USE_ENFORCEMENT_GUIDANCE, TOOL_USE_ENFORCEMENT_MODELS, DEVELOPER_ROLE_MODELS, GOOGLE_MODEL_OPERATIONAL_GUIDANCE, OPENAI_MODEL_EXECUTION_GUIDANCE
from agent.usage_pricing import estimate_usage_cost, normalize_usage
from agent.display import (
KawaiiSpinner, build_tool_preview as _build_tool_preview,
@ -339,10 +339,7 @@ def _paths_overlap(left: Path, right: Path) -> bool:
_SURROGATE_RE = re.compile(r'[\ud800-\udfff]')
_BUDGET_WARNING_RE = re.compile(
r"\[BUDGET(?:\s+WARNING)?:\s+Iteration\s+\d+/\d+\..*?\]",
re.DOTALL,
)
def _sanitize_surrogates(text: str) -> str:
@ -463,34 +460,7 @@ def _sanitize_messages_non_ascii(messages: list) -> bool:
return found
def _strip_budget_warnings_from_history(messages: list) -> None:
"""Remove budget pressure warnings from tool-result messages in-place.
Budget warnings are turn-scoped signals that must not leak into replayed
history. They live in tool-result ``content`` either as a JSON key
(``_budget_warning``) or appended plain text.
"""
for msg in messages:
if not isinstance(msg, dict) or msg.get("role") != "tool":
continue
content = msg.get("content")
if not isinstance(content, str) or "_budget_warning" not in content and "[BUDGET" not in content:
continue
# Try JSON first (the common case: _budget_warning key in a dict)
try:
parsed = json.loads(content)
if isinstance(parsed, dict) and "_budget_warning" in parsed:
del parsed["_budget_warning"]
msg["content"] = json.dumps(parsed, ensure_ascii=False)
continue
except (json.JSONDecodeError, TypeError):
pass
# Fallback: strip the text pattern from plain-text tool results
cleaned = _BUDGET_WARNING_RE.sub("", content).strip()
if cleaned != content:
msg["content"] = cleaned
# =========================================================================
@ -579,6 +549,7 @@ class AIAgent:
clarify_callback: callable = None,
step_callback: callable = None,
stream_delta_callback: callable = None,
interim_assistant_callback: callable = None,
tool_gen_callback: callable = None,
status_callback: callable = None,
max_tokens: int = None,
@ -728,6 +699,7 @@ class AIAgent:
self.clarify_callback = clarify_callback
self.step_callback = step_callback
self.stream_delta_callback = stream_delta_callback
self.interim_assistant_callback = interim_assistant_callback
self.status_callback = status_callback
self.tool_gen_callback = tool_gen_callback
@ -775,12 +747,14 @@ class AIAgent:
self._use_prompt_caching = (is_openrouter and is_claude) or is_native_anthropic
self._cache_ttl = "5m" # Default 5-minute TTL (1.25x write cost)
# Iteration budget pressure: warn the LLM as it approaches max_iterations.
# Warnings are injected into the last tool result JSON (not as separate
# messages) so they don't break message structure or invalidate caching.
self._budget_caution_threshold = 0.7 # 70% — nudge to start wrapping up
self._budget_warning_threshold = 0.9 # 90% — urgent, respond now
self._budget_pressure_enabled = True
# Iteration budget: the LLM is only notified when it actually exhausts
# the iteration budget (api_call_count >= max_iterations). At that
# point we inject ONE message, allow one final API call, and if the
# model doesn't produce a text response, force a user-message asking
# it to summarise. No intermediate pressure warnings — they caused
# models to "give up" prematurely on complex tasks (#7915).
self._budget_exhausted_injected = False
self._budget_grace_call = False
# Context pressure warnings: notify the USER (not the LLM) as context
# fills up. Purely informational — displayed in CLI output and sent via
@ -831,6 +805,11 @@ class AIAgent:
# Deferred paragraph break flag — set after tool iterations so a
# single "\n\n" is prepended to the next real text delta.
self._stream_needs_break = False
# Visible assistant text already delivered through live token callbacks
# during the current model response. Used to avoid re-sending the same
# commentary when the provider later returns it as a completed interim
# assistant message.
self._current_streamed_assistant_text = ""
# Optional current-turn user-message override used when the API-facing
# user message intentionally differs from the persisted transcript
@ -1328,9 +1307,23 @@ class AIAgent:
api_key=getattr(self, "api_key", ""),
config_context_length=_config_context_length,
provider=self.provider,
api_mode=self.api_mode,
)
self.compression_enabled = compression_enabled
# Reject models whose context window is below the minimum required
# for reliable tool-calling workflows (64K tokens).
from agent.model_metadata import MINIMUM_CONTEXT_LENGTH
_ctx = getattr(self.context_compressor, "context_length", 0)
if _ctx and _ctx < MINIMUM_CONTEXT_LENGTH:
raise ValueError(
f"Model {self.model} has a context window of {_ctx:,} tokens, "
f"which is below the minimum {MINIMUM_CONTEXT_LENGTH:,} required "
f"by Hermes Agent. Choose a model with at least "
f"{MINIMUM_CONTEXT_LENGTH // 1000}K context, or set "
f"model.context_length in config.yaml to override."
)
# Inject context engine tool schemas (e.g. lcm_grep, lcm_describe, lcm_expand)
self._context_engine_tool_names: set = set()
if hasattr(self, "context_compressor") and self.context_compressor and self.tools is not None:
@ -1571,6 +1564,7 @@ class AIAgent:
base_url=self.base_url,
api_key=getattr(self, "api_key", ""),
provider=self.provider,
api_mode=self.api_mode,
)
# ── Invalidate cached system prompt so it rebuilds next turn ──
@ -1704,6 +1698,16 @@ class AIAgent:
except Exception:
logger.debug("status_callback error in _emit_status", exc_info=True)
def _current_main_runtime(self) -> Dict[str, str]:
"""Return the live main runtime for session-scoped auxiliary routing."""
return {
"model": getattr(self, "model", "") or "",
"provider": getattr(self, "provider", "") or "",
"base_url": getattr(self, "base_url", "") or "",
"api_key": getattr(self, "api_key", "") or "",
"api_mode": getattr(self, "api_mode", "") or "",
}
def _check_compression_model_feasibility(self) -> None:
"""Warn at session start if the auxiliary compression model's context
window is smaller than the main model's compression threshold.
@ -1724,7 +1728,10 @@ class AIAgent:
from agent.auxiliary_client import get_text_auxiliary_client
from agent.model_metadata import get_model_context_length
client, aux_model = get_text_auxiliary_client("compression")
client, aux_model = get_text_auxiliary_client(
"compression",
main_runtime=self._current_main_runtime(),
)
if client is None or not aux_model:
msg = (
"⚠ No auxiliary LLM provider configured — context "
@ -3186,11 +3193,17 @@ class AIAgent:
f"not on any model name returned by the API."
)
# Environment hints (WSL, Termux, etc.) — tell the agent about the
# execution environment so it can translate paths and adapt behavior.
_env_hints = build_environment_hints()
if _env_hints:
prompt_parts.append(_env_hints)
platform_key = (self.platform or "").lower().strip()
if platform_key in PLATFORM_HINTS:
prompt_parts.append(PLATFORM_HINTS[platform_key])
return "\n\n".join(prompt_parts)
return "\n\n".join(p.strip() for p in prompt_parts if p.strip())
# =========================================================================
# Pre/post-call guardrails (inspired by PR #1321 — @alireza78a)
@ -4730,6 +4743,49 @@ class AIAgent:
# ── Unified streaming API call ─────────────────────────────────────────
def _reset_stream_delivery_tracking(self) -> None:
"""Reset tracking for text delivered during the current model response."""
self._current_streamed_assistant_text = ""
def _record_streamed_assistant_text(self, text: str) -> None:
"""Accumulate visible assistant text emitted through stream callbacks."""
if isinstance(text, str) and text:
self._current_streamed_assistant_text = (
getattr(self, "_current_streamed_assistant_text", "") + text
)
@staticmethod
def _normalize_interim_visible_text(text: str) -> str:
if not isinstance(text, str):
return ""
return re.sub(r"\s+", " ", text).strip()
def _interim_content_was_streamed(self, content: str) -> bool:
visible_content = self._normalize_interim_visible_text(
self._strip_think_blocks(content or "")
)
if not visible_content:
return False
streamed = self._normalize_interim_visible_text(
self._strip_think_blocks(getattr(self, "_current_streamed_assistant_text", "") or "")
)
return bool(streamed) and streamed == visible_content
def _emit_interim_assistant_message(self, assistant_msg: Dict[str, Any]) -> None:
"""Surface a real mid-turn assistant commentary message to the UI layer."""
cb = getattr(self, "interim_assistant_callback", None)
if cb is None or not isinstance(assistant_msg, dict):
return
content = assistant_msg.get("content")
visible = self._strip_think_blocks(content or "").strip()
if not visible or visible == "(empty)":
return
already_streamed = self._interim_content_was_streamed(visible)
try:
cb(visible, already_streamed=already_streamed)
except Exception:
logger.debug("interim_assistant_callback error", exc_info=True)
def _fire_stream_delta(self, text: str) -> None:
"""Fire all registered stream delta callbacks (display + TTS)."""
# If a tool iteration set the break flag, prepend a single paragraph
@ -4739,12 +4795,16 @@ class AIAgent:
if getattr(self, "_stream_needs_break", False) and text and text.strip():
self._stream_needs_break = False
text = "\n\n" + text
for cb in (self.stream_delta_callback, self._stream_callback):
if cb is not None:
try:
cb(text)
except Exception:
pass
callbacks = [cb for cb in (self.stream_delta_callback, self._stream_callback) if cb is not None]
delivered = False
for cb in callbacks:
try:
cb(text)
delivered = True
except Exception:
pass
if delivered:
self._record_streamed_assistant_text(text)
def _fire_reasoning_delta(self, text: str) -> None:
"""Fire reasoning callback if registered."""
@ -4928,6 +4988,7 @@ class AIAgent:
if self.stream_delta_callback:
try:
self.stream_delta_callback(delta.content)
self._record_streamed_assistant_text(delta.content)
except Exception:
pass
@ -6509,17 +6570,23 @@ class AIAgent:
if messages and messages[-1].get("_flush_sentinel") == _sentinel:
messages.pop()
def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default") -> tuple:
def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default", focus_topic: str = None) -> tuple:
"""Compress conversation context and split the session in SQLite.
Args:
focus_topic: Optional focus string for guided compression the
summariser will prioritise preserving information related to
this topic. Inspired by Claude Code's ``/compact <focus>``.
Returns:
(compressed_messages, new_system_prompt) tuple
"""
_pre_msg_count = len(messages)
logger.info(
"context compression started: session=%s messages=%d tokens=~%s model=%s",
"context compression started: session=%s messages=%d tokens=~%s model=%s focus=%r",
self.session_id or "none", _pre_msg_count,
f"{approx_tokens:,}" if approx_tokens else "unknown", self.model,
focus_topic,
)
# Pre-compression memory flush: let the model save memories before they're lost
self.flush_memories(messages, min_turns=0)
@ -6531,7 +6598,7 @@ class AIAgent:
except Exception:
pass
compressed = self.context_compressor.compress(messages, current_tokens=approx_tokens)
compressed = self.context_compressor.compress(messages, current_tokens=approx_tokens, focus_topic=focus_topic)
todo_snapshot = self._todo_store.format_for_injection()
if todo_snapshot:
@ -6920,24 +6987,6 @@ class AIAgent:
turn_tool_msgs = messages[-num_tools:]
enforce_turn_budget(turn_tool_msgs, env=get_active_env(effective_task_id))
# ── Budget pressure injection ────────────────────────────────────
budget_warning = self._get_budget_warning(api_call_count)
if budget_warning and messages and messages[-1].get("role") == "tool":
last_content = messages[-1]["content"]
try:
parsed = json.loads(last_content)
if isinstance(parsed, dict):
parsed["_budget_warning"] = budget_warning
messages[-1]["content"] = json.dumps(parsed, ensure_ascii=False)
else:
messages[-1]["content"] = last_content + f"\n\n{budget_warning}"
except (json.JSONDecodeError, TypeError):
messages[-1]["content"] = last_content + f"\n\n{budget_warning}"
if not self.quiet_mode:
remaining = self.max_iterations - api_call_count
tier = "⚠️ WARNING" if remaining <= self.max_iterations * 0.1 else "💡 CAUTION"
print(f"{self.log_prefix}{tier}: {remaining} iterations remaining")
def _execute_tool_calls_sequential(self, assistant_message, messages: list, effective_task_id: str, api_call_count: int = 0) -> None:
"""Execute tool calls sequentially (original behavior). Used for single calls or interactive tools."""
for i, tool_call in enumerate(assistant_message.tool_calls, 1):
@ -6986,6 +7035,15 @@ class AIAgent:
self._current_tool = function_name
self._touch_activity(f"executing tool: {function_name}")
# Set activity callback for long-running tool execution (terminal
# commands, etc.) so the gateway's inactivity monitor doesn't kill
# the agent while a command is running.
try:
from tools.environments.base import set_activity_callback
set_activity_callback(self._touch_activity)
except Exception:
pass
if self.tool_progress_callback:
try:
preview = _build_tool_preview(function_name, function_args)
@ -7275,50 +7333,7 @@ class AIAgent:
if num_tools_seq > 0:
enforce_turn_budget(messages[-num_tools_seq:], env=get_active_env(effective_task_id))
# ── Budget pressure injection ─────────────────────────────────
# After all tool calls in this turn are processed, check if we're
# approaching max_iterations. If so, inject a warning into the LAST
# tool result's JSON so the LLM sees it naturally when reading results.
budget_warning = self._get_budget_warning(api_call_count)
if budget_warning and messages and messages[-1].get("role") == "tool":
last_content = messages[-1]["content"]
try:
parsed = json.loads(last_content)
if isinstance(parsed, dict):
parsed["_budget_warning"] = budget_warning
messages[-1]["content"] = json.dumps(parsed, ensure_ascii=False)
else:
messages[-1]["content"] = last_content + f"\n\n{budget_warning}"
except (json.JSONDecodeError, TypeError):
messages[-1]["content"] = last_content + f"\n\n{budget_warning}"
if not self.quiet_mode:
remaining = self.max_iterations - api_call_count
tier = "⚠️ WARNING" if remaining <= self.max_iterations * 0.1 else "💡 CAUTION"
print(f"{self.log_prefix}{tier}: {remaining} iterations remaining")
def _get_budget_warning(self, api_call_count: int) -> Optional[str]:
"""Return a budget pressure string, or None if not yet needed.
Two-tier system:
- Caution (70%): nudge to consolidate work
- Warning (90%): urgent, must respond now
"""
if not self._budget_pressure_enabled or self.max_iterations <= 0:
return None
progress = api_call_count / self.max_iterations
remaining = self.max_iterations - api_call_count
if progress >= self._budget_warning_threshold:
return (
f"[BUDGET WARNING: Iteration {api_call_count}/{self.max_iterations}. "
f"Only {remaining} iteration(s) left. "
"Provide your final response NOW. No more tool calls unless absolutely critical.]"
)
if progress >= self._budget_caution_threshold:
return (
f"[BUDGET: Iteration {api_call_count}/{self.max_iterations}. "
f"{remaining} iterations left. Start consolidating your work.]"
)
return None
def _emit_context_pressure(self, compaction_progress: float, compressor) -> None:
"""Notify the user that context is approaching the compaction threshold.
@ -7542,6 +7557,11 @@ class AIAgent:
# Installed once, transparent when streams are healthy, prevents crash on write.
_install_safe_stdio()
# Tag all log records on this thread with the session ID so
# ``hermes logs --session <id>`` can filter a single conversation.
from hermes_logging import set_session_context
set_session_context(self.session_id)
# If the previous turn activated fallback, restore the primary
# runtime so this turn gets a fresh attempt with the preferred model.
# No-op when _fallback_activated is False (gateway, first turn, etc.).
@ -7611,14 +7631,6 @@ class AIAgent:
# Initialize conversation (copy to avoid mutating the caller's list)
messages = list(conversation_history) if conversation_history else []
# Strip budget pressure warnings from previous turns. These are
# turn-scoped signals injected by _get_budget_warning() into tool
# result content. If left in the replayed history, models (especially
# GPT-family) interpret them as still-active instructions and avoid
# making tool calls in ALL subsequent turns.
if messages:
_strip_budget_warnings_from_history(messages)
# Hydrate todo store from conversation history (gateway creates a fresh
# AIAgent per message, so the in-memory store is empty -- we need to
# recover the todo state from the most recent todo tool response in history)
@ -7835,7 +7847,7 @@ class AIAgent:
except Exception:
pass
while api_call_count < self.max_iterations and self.iteration_budget.remaining > 0:
while (api_call_count < self.max_iterations and self.iteration_budget.remaining > 0) or self._budget_grace_call:
# Reset per-turn checkpoint dedup so each iteration can take one snapshot
self._checkpoint_mgr.new_turn()
@ -7850,7 +7862,13 @@ class AIAgent:
api_call_count += 1
self._api_call_count = api_call_count
self._touch_activity(f"starting API call #{api_call_count}")
if not self.iteration_budget.consume():
# Grace call: the budget is exhausted but we gave the model one
# more chance. Consume the grace flag so the loop exits after
# this iteration regardless of outcome.
if self._budget_grace_call:
self._budget_grace_call = False
elif not self.iteration_budget.consume():
_turn_exit_reason = "budget_exhausted"
if not self.quiet_mode:
self._safe_print(f"\n⚠️ Iteration budget exhausted ({self.iteration_budget.used}/{self.iteration_budget.max_total} iterations used)")
@ -7977,9 +7995,39 @@ class AIAgent:
# manual message manipulation are always caught.
api_messages = self._sanitize_api_messages(api_messages)
# Normalize message whitespace and tool-call JSON for consistent
# prefix matching. Ensures bit-perfect prefixes across turns,
# which enables KV cache reuse on local inference servers
# (llama.cpp, vLLM, Ollama) and improves cache hit rates for
# cloud providers. Operates on api_messages (the API copy) so
# the original conversation history in `messages` is untouched.
for am in api_messages:
if isinstance(am.get("content"), str):
am["content"] = am["content"].strip()
for am in api_messages:
tcs = am.get("tool_calls")
if not tcs:
continue
new_tcs = []
for tc in tcs:
if isinstance(tc, dict) and "function" in tc:
try:
args_obj = json.loads(tc["function"]["arguments"])
tc = {**tc, "function": {
**tc["function"],
"arguments": json.dumps(
args_obj, separators=(",", ":"),
sort_keys=True,
),
}}
except Exception:
pass
new_tcs.append(tc)
am["tool_calls"] = new_tcs
# Calculate approximate request size for logging
total_chars = sum(len(str(msg)) for msg in api_messages)
approx_tokens = total_chars // 4 # Rough estimate: 4 chars per token
approx_tokens = estimate_messages_tokens_rough(api_messages)
# Thinking spinner for quiet mode (animated during API call)
thinking_spinner = None
@ -8028,6 +8076,7 @@ class AIAgent:
while retry_count < max_retries:
try:
self._reset_stream_delivery_tracking()
api_kwargs = self._build_api_kwargs(api_messages)
if self.api_mode == "codex_responses":
api_kwargs = self._preflight_codex_api_kwargs(api_kwargs, allow_stream=False)
@ -8186,6 +8235,8 @@ class AIAgent:
self._emit_status("⚠️ Empty/malformed response — switching to fallback...")
if self._try_activate_fallback():
retry_count = 0
compression_attempts = 0
primary_recovery_attempted = False
continue
# Check for error field in response (some providers include this)
@ -8221,6 +8272,8 @@ class AIAgent:
self._emit_status(f"⚠️ Max retries ({max_retries}) for invalid responses — trying fallback...")
if self._try_activate_fallback():
retry_count = 0
compression_attempts = 0
primary_recovery_attempted = False
continue
self._emit_status(f"❌ Max retries ({max_retries}) exceeded for invalid responses. Giving up.")
logging.error(f"{self.log_prefix}Invalid API response after {max_retries} retries.")
@ -8875,6 +8928,8 @@ class AIAgent:
self._emit_status("⚠️ Rate limited — switching to fallback provider...")
if self._try_activate_fallback():
retry_count = 0
compression_attempts = 0
primary_recovery_attempted = False
continue
is_payload_too_large = (
@ -9088,6 +9143,8 @@ class AIAgent:
self._emit_status(f"⚠️ Non-retryable error (HTTP {status_code}) — trying fallback...")
if self._try_activate_fallback():
retry_count = 0
compression_attempts = 0
primary_recovery_attempted = False
continue
if api_kwargs is not None:
self._dump_api_request_debug(
@ -9153,6 +9210,8 @@ class AIAgent:
self._emit_status(f"⚠️ Max retries ({max_retries}) exhausted — trying fallback...")
if self._try_activate_fallback():
retry_count = 0
compression_attempts = 0
primary_recovery_attempted = False
continue
_final_summary = self._summarize_api_error(api_error)
if is_rate_limited:
@ -9376,8 +9435,6 @@ class AIAgent:
# Check for incomplete <REASONING_SCRATCHPAD> (opened but never closed)
# This means the model ran out of output tokens mid-reasoning — retry up to 2 times
if has_incomplete_scratchpad(assistant_message.content or ""):
if not hasattr(self, '_incomplete_scratchpad_retries'):
self._incomplete_scratchpad_retries = 0
self._incomplete_scratchpad_retries += 1
self._vprint(f"{self.log_prefix}⚠️ Incomplete <REASONING_SCRATCHPAD> detected (opened but never closed)")
@ -9405,12 +9462,9 @@ class AIAgent:
}
# Reset incomplete scratchpad counter on clean response
if hasattr(self, '_incomplete_scratchpad_retries'):
self._incomplete_scratchpad_retries = 0
self._incomplete_scratchpad_retries = 0
if self.api_mode == "codex_responses" and finish_reason == "incomplete":
if not hasattr(self, "_codex_incomplete_retries"):
self._codex_incomplete_retries = 0
self._codex_incomplete_retries += 1
interim_msg = self._build_assistant_message(assistant_message, finish_reason)
@ -9437,6 +9491,7 @@ class AIAgent:
)
if not duplicate_interim:
messages.append(interim_msg)
self._emit_interim_assistant_message(interim_msg)
if self._codex_incomplete_retries < 3:
if not self.quiet_mode:
@ -9481,8 +9536,6 @@ class AIAgent:
]
if invalid_tool_calls:
# Track retries for invalid tool calls
if not hasattr(self, '_invalid_tool_retries'):
self._invalid_tool_retries = 0
self._invalid_tool_retries += 1
# Return helpful error to model — model can self-correct next turn
@ -9518,8 +9571,7 @@ class AIAgent:
})
continue
# Reset retry counter on successful tool call validation
if hasattr(self, '_invalid_tool_retries'):
self._invalid_tool_retries = 0
self._invalid_tool_retries = 0
# Validate tool call arguments are valid JSON
# Handle empty strings as empty objects (common model quirk)
@ -9659,6 +9711,7 @@ class AIAgent:
messages.pop()
messages.append(assistant_msg)
self._emit_interim_assistant_message(assistant_msg)
# Close any open streaming display (response box, reasoning
# box) before tool execution begins. Intermediate turns may
@ -9921,8 +9974,7 @@ class AIAgent:
break
# Reset retry counter/signature on successful content
if hasattr(self, '_empty_content_retries'):
self._empty_content_retries = 0
self._empty_content_retries = 0
self._thinking_prefill_retries = 0
if (
@ -9938,6 +9990,7 @@ class AIAgent:
codex_ack_continuations += 1
interim_msg = self._build_assistant_message(assistant_message, "incomplete")
messages.append(interim_msg)
self._emit_interim_assistant_message(interim_msg)
continue_msg = {
"role": "user",
@ -9988,8 +10041,7 @@ class AIAgent:
except (OSError, ValueError):
logger.error(error_msg)
if self.verbose_logging:
logging.exception("Detailed error information:")
logger.debug("Outer loop error in API call #%d", api_call_count, exc_info=True)
# If an assistant message with tool_calls was already appended,
# the API expects a role="tool" result for every tool_call_id.
@ -10035,7 +10087,31 @@ class AIAgent:
if final_response is None and (
api_call_count >= self.max_iterations
or self.iteration_budget.remaining <= 0
):
) and not self._budget_exhausted_injected:
# Budget exhausted but we haven't tried asking the model to
# summarise yet. Inject a user message and give it one grace
# API call to produce a text response.
self._budget_exhausted_injected = True
self._budget_grace_call = True
_grace_msg = (
"Your tool budget ran out. Please give me the information "
"or actions you've completed so far."
)
messages.append({"role": "user", "content": _grace_msg})
self._emit_status(
f"⚠️ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) "
"— asking model to summarise"
)
if not self.quiet_mode:
self._safe_print(
f"\n⚠️ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) "
"— requesting summary..."
)
if final_response is None and (
api_call_count >= self.max_iterations
or self.iteration_budget.remaining <= 0
) and not self._budget_grace_call:
_turn_exit_reason = f"max_iterations_reached({api_call_count}/{self.max_iterations})"
if self.iteration_budget.remaining <= 0 and not self.quiet_mode:
print(f"\n⚠️ Iteration budget exhausted ({self.iteration_budget.used}/{self.iteration_budget.max_total} iterations used)")

View file

@ -971,6 +971,74 @@ class TestTaskSpecificOverrides:
client, model = get_text_auxiliary_client("compression")
assert model == "google/gemini-3-flash-preview" # auto → OpenRouter
def test_resolve_auto_prefers_live_main_runtime_over_persisted_config(self, monkeypatch, tmp_path):
"""Session-only live model switches should override persisted config for auto routing."""
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "config.yaml").write_text(
"""model:
default: glm-5.1
provider: opencode-go
compression:
summary_provider: auto
"""
)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
calls = []
def _fake_resolve(provider, model=None, *args, **kwargs):
calls.append((provider, model, kwargs))
return MagicMock(), model or "resolved-model"
with patch("agent.auxiliary_client.resolve_provider_client", side_effect=_fake_resolve):
client, model = _resolve_auto(
main_runtime={
"provider": "openai-codex",
"model": "gpt-5.4",
"api_mode": "codex_responses",
}
)
assert client is not None
assert model == "gpt-5.4"
assert calls[0][0] == "openai-codex"
assert calls[0][1] == "gpt-5.4"
assert calls[0][2]["api_mode"] == "codex_responses"
def test_explicit_compression_pin_still_wins_over_live_main_runtime(self, monkeypatch, tmp_path):
"""Task-level compression config should beat a live session override."""
hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "config.yaml").write_text(
"""auxiliary:
compression:
provider: openrouter
model: google/gemini-3-flash-preview
model:
default: glm-5.1
provider: opencode-go
"""
)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
with patch("agent.auxiliary_client.resolve_provider_client", return_value=(MagicMock(), "google/gemini-3-flash-preview")) as mock_resolve:
client, model = get_text_auxiliary_client(
"compression",
main_runtime={
"provider": "openai-codex",
"model": "gpt-5.4",
},
)
assert client is not None
assert model == "google/gemini-3-flash-preview"
assert mock_resolve.call_args.args[0] == "openrouter"
assert mock_resolve.call_args.kwargs["main_runtime"] == {
"provider": "openai-codex",
"model": "gpt-5.4",
}
def test_compression_summary_base_url_from_config(self, monkeypatch, tmp_path):
"""compression.summary_base_url should produce a custom-endpoint client."""
hermes_home = tmp_path / "hermes"
@ -1560,3 +1628,74 @@ class TestStaleBaseUrlWarning:
assert not any("OPENAI_BASE_URL is set" in rec.message for rec in caplog.records), \
"Warning should not fire a second time"
# ---------------------------------------------------------------------------
# Anthropic-compatible image block conversion
# ---------------------------------------------------------------------------
class TestAnthropicCompatImageConversion:
"""Tests for _is_anthropic_compat_endpoint and _convert_openai_images_to_anthropic."""
def test_known_providers_detected(self):
from agent.auxiliary_client import _is_anthropic_compat_endpoint
assert _is_anthropic_compat_endpoint("minimax", "")
assert _is_anthropic_compat_endpoint("minimax-cn", "")
def test_openrouter_not_detected(self):
from agent.auxiliary_client import _is_anthropic_compat_endpoint
assert not _is_anthropic_compat_endpoint("openrouter", "")
assert not _is_anthropic_compat_endpoint("anthropic", "")
def test_url_based_detection(self):
from agent.auxiliary_client import _is_anthropic_compat_endpoint
assert _is_anthropic_compat_endpoint("custom", "https://api.minimax.io/anthropic")
assert _is_anthropic_compat_endpoint("custom", "https://example.com/anthropic/v1")
assert not _is_anthropic_compat_endpoint("custom", "https://api.openai.com/v1")
def test_base64_image_converted(self):
from agent.auxiliary_client import _convert_openai_images_to_anthropic
messages = [{
"role": "user",
"content": [
{"type": "text", "text": "describe"},
{"type": "image_url", "image_url": {"url": "data:image/png;base64,iVBOR="}}
]
}]
result = _convert_openai_images_to_anthropic(messages)
img_block = result[0]["content"][1]
assert img_block["type"] == "image"
assert img_block["source"]["type"] == "base64"
assert img_block["source"]["media_type"] == "image/png"
assert img_block["source"]["data"] == "iVBOR="
def test_url_image_converted(self):
from agent.auxiliary_client import _convert_openai_images_to_anthropic
messages = [{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "https://example.com/img.jpg"}}
]
}]
result = _convert_openai_images_to_anthropic(messages)
img_block = result[0]["content"][0]
assert img_block["type"] == "image"
assert img_block["source"]["type"] == "url"
assert img_block["source"]["url"] == "https://example.com/img.jpg"
def test_text_only_messages_unchanged(self):
from agent.auxiliary_client import _convert_openai_images_to_anthropic
messages = [{"role": "user", "content": "Hello"}]
result = _convert_openai_images_to_anthropic(messages)
assert result[0] is messages[0] # same object, not copied
def test_jpeg_media_type_parsed(self):
from agent.auxiliary_client import _convert_openai_images_to_anthropic
messages = [{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "data:image/jpeg;base64,/9j/="}}
]
}]
result = _convert_openai_images_to_anthropic(messages)
assert result[0]["content"][0]["source"]["media_type"] == "image/jpeg"

View file

@ -0,0 +1,139 @@
"""Tests for focus_topic flowing through the compressor.
Verifies that _generate_summary and compress accept and use the focus_topic
parameter correctly. Inspired by Claude Code's /compact <focus>.
"""
from unittest.mock import MagicMock, patch
from agent.context_compressor import ContextCompressor
def _make_compressor():
"""Create a ContextCompressor with minimal state for testing."""
compressor = ContextCompressor.__new__(ContextCompressor)
compressor.protect_first_n = 2
compressor.protect_last_n = 5
compressor.tail_token_budget = 20000
compressor.context_length = 200000
compressor.threshold_percent = 0.80
compressor.threshold_tokens = 160000
compressor.max_summary_tokens = 10000
compressor.quiet_mode = True
compressor.compression_count = 0
compressor.last_prompt_tokens = 0
compressor._previous_summary = None
compressor._summary_failure_cooldown_until = 0.0
compressor.summary_model = None
return compressor
def test_focus_topic_injected_into_summary_prompt():
"""When focus_topic is provided, the LLM prompt includes focus guidance."""
compressor = _make_compressor()
turns = [
{"role": "user", "content": "Tell me about the database schema"},
{"role": "assistant", "content": "The schema has tables: users, orders, products."},
]
captured_prompt = {}
def mock_call_llm(**kwargs):
captured_prompt["messages"] = kwargs["messages"]
resp = MagicMock()
resp.choices = [MagicMock()]
resp.choices[0].message.content = "## Goal\nUnderstand DB schema."
return resp
with patch("agent.context_compressor.call_llm", mock_call_llm):
result = compressor._generate_summary(turns, focus_topic="database schema")
assert result is not None
prompt_text = captured_prompt["messages"][0]["content"]
assert 'FOCUS TOPIC: "database schema"' in prompt_text
assert "PRIORITISE" in prompt_text
assert "60-70%" in prompt_text
def test_no_focus_topic_no_injection():
"""Without focus_topic, the prompt doesn't contain focus guidance."""
compressor = _make_compressor()
turns = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi"},
]
captured_prompt = {}
def mock_call_llm(**kwargs):
captured_prompt["messages"] = kwargs["messages"]
resp = MagicMock()
resp.choices = [MagicMock()]
resp.choices[0].message.content = "## Goal\nGreeting."
return resp
with patch("agent.context_compressor.call_llm", mock_call_llm):
result = compressor._generate_summary(turns)
prompt_text = captured_prompt["messages"][0]["content"]
assert "FOCUS TOPIC" not in prompt_text
def test_compress_passes_focus_to_generate_summary():
"""compress() passes focus_topic through to _generate_summary."""
compressor = _make_compressor()
# Track what _generate_summary receives
received_kwargs = {}
original_generate = compressor._generate_summary
def tracking_generate(turns, **kwargs):
received_kwargs.update(kwargs)
return "## Goal\nTest."
compressor._generate_summary = tracking_generate
messages = [
{"role": "system", "content": "System prompt"},
{"role": "user", "content": "first"},
{"role": "assistant", "content": "reply1"},
{"role": "user", "content": "second"},
{"role": "assistant", "content": "reply2"},
{"role": "user", "content": "third"},
{"role": "assistant", "content": "reply3"},
{"role": "user", "content": "fourth"},
{"role": "assistant", "content": "reply4"},
]
compressor.compress(messages, current_tokens=100000, focus_topic="authentication flow")
assert received_kwargs.get("focus_topic") == "authentication flow"
def test_compress_none_focus_by_default():
"""compress() passes None focus_topic by default."""
compressor = _make_compressor()
received_kwargs = {}
def tracking_generate(turns, **kwargs):
received_kwargs.update(kwargs)
return "## Goal\nTest."
compressor._generate_summary = tracking_generate
messages = [
{"role": "system", "content": "System prompt"},
{"role": "user", "content": "first"},
{"role": "assistant", "content": "reply1"},
{"role": "user", "content": "second"},
{"role": "assistant", "content": "reply2"},
{"role": "user", "content": "third"},
{"role": "assistant", "content": "reply3"},
{"role": "user", "content": "fourth"},
{"role": "assistant", "content": "reply4"},
]
compressor.compress(messages, current_tokens=100000)
assert received_kwargs.get("focus_topic") is None

View file

@ -191,6 +191,37 @@ class TestNonStringContent:
kwargs = mock_call.call_args.kwargs
assert "temperature" not in kwargs
def test_summary_call_passes_live_main_runtime(self):
mock_response = MagicMock()
mock_response.choices = [MagicMock()]
mock_response.choices[0].message.content = "ok"
with patch("agent.context_compressor.get_model_context_length", return_value=100000):
c = ContextCompressor(
model="gpt-5.4",
provider="openai-codex",
base_url="https://chatgpt.com/backend-api/codex",
api_key="codex-token",
api_mode="codex_responses",
quiet_mode=True,
)
messages = [
{"role": "user", "content": "do something"},
{"role": "assistant", "content": "ok"},
]
with patch("agent.context_compressor.call_llm", return_value=mock_response) as mock_call:
c._generate_summary(messages)
assert mock_call.call_args.kwargs["main_runtime"] == {
"model": "gpt-5.4",
"provider": "openai-codex",
"base_url": "https://chatgpt.com/backend-api/codex",
"api_key": "codex-token",
"api_mode": "codex_responses",
}
class TestSummaryFailureCooldown:
def test_summary_failure_enters_cooldown_and_skips_retry(self):
@ -576,11 +607,19 @@ class TestSummaryTargetRatio:
assert c.summary_target_ratio == 0.80
def test_default_threshold_is_50_percent(self):
"""Default compression threshold should be 50%."""
"""Default compression threshold should be 50%, with a 64K floor."""
with patch("agent.context_compressor.get_model_context_length", return_value=100_000):
c = ContextCompressor(model="test", quiet_mode=True)
assert c.threshold_percent == 0.50
assert c.threshold_tokens == 50_000
# 50% of 100K = 50K, but the floor is 64K
assert c.threshold_tokens == 64_000
def test_threshold_floor_does_not_apply_above_128k(self):
"""On large-context models the 50% percentage is used directly."""
with patch("agent.context_compressor.get_model_context_length", return_value=200_000):
c = ContextCompressor(model="test", quiet_mode=True)
# 50% of 200K = 100K, which is above the 64K floor
assert c.threshold_tokens == 100_000
def test_default_protect_last_n_is_20(self):
"""Default protect_last_n should be 20."""

View file

@ -50,7 +50,8 @@ class TestEstimateTokensRough:
assert estimate_tokens_rough("a" * 400) == 100
def test_short_text(self):
assert estimate_tokens_rough("hello") == 1
# "hello" = 5 chars → ceil(5/4) = 2
assert estimate_tokens_rough("hello") == 2
def test_proportional(self):
short = estimate_tokens_rough("hello world")
@ -68,10 +69,11 @@ class TestEstimateMessagesTokensRough:
assert estimate_messages_tokens_rough([]) == 0
def test_single_message_concrete_value(self):
"""Verify against known str(msg) length."""
"""Verify against known str(msg) length (ceiling division)."""
msg = {"role": "user", "content": "a" * 400}
result = estimate_messages_tokens_rough([msg])
expected = len(str(msg)) // 4
n = len(str(msg))
expected = (n + 3) // 4
assert result == expected
def test_multiple_messages_additive(self):
@ -80,7 +82,8 @@ class TestEstimateMessagesTokensRough:
{"role": "assistant", "content": "Hi there, how can I help?"},
]
result = estimate_messages_tokens_rough(msgs)
expected = sum(len(str(m)) for m in msgs) // 4
n = sum(len(str(m)) for m in msgs)
expected = (n + 3) // 4
assert result == expected
def test_tool_call_message(self):
@ -89,7 +92,7 @@ class TestEstimateMessagesTokensRough:
"tool_calls": [{"id": "1", "function": {"name": "terminal", "arguments": "{}"}}]}
result = estimate_messages_tokens_rough([msg])
assert result > 0
assert result == len(str(msg)) // 4
assert result == (len(str(msg)) + 3) // 4
def test_message_with_list_content(self):
"""Vision messages with multimodal content arrays."""
@ -98,7 +101,7 @@ class TestEstimateMessagesTokensRough:
{"type": "image_url", "image_url": {"url": "data:image/png;base64,AAAA"}}
]}
result = estimate_messages_tokens_rough([msg])
assert result == len(str(msg)) // 4
assert result == (len(str(msg)) + 3) // 4
# =========================================================================

View file

@ -87,7 +87,10 @@ class TestProviderMapping:
def test_unmapped_provider_not_in_dict(self):
assert "nous" not in PROVIDER_TO_MODELS_DEV
assert "openai-codex" not in PROVIDER_TO_MODELS_DEV
def test_openai_codex_mapped_to_openai(self):
assert PROVIDER_TO_MODELS_DEV["openai"] == "openai"
assert PROVIDER_TO_MODELS_DEV["openai-codex"] == "openai"
class TestExtractContext:

View file

@ -18,6 +18,7 @@ from agent.prompt_builder import (
build_skills_system_prompt,
build_nous_subscription_prompt,
build_context_files_prompt,
build_environment_hints,
CONTEXT_FILE_MAX_CHARS,
DEFAULT_AGENT_IDENTITY,
TOOL_USE_ENFORCEMENT_GUIDANCE,
@ -26,6 +27,7 @@ from agent.prompt_builder import (
MEMORY_GUIDANCE,
SESSION_SEARCH_GUIDANCE,
PLATFORM_HINTS,
WSL_ENVIRONMENT_HINT,
)
from hermes_cli.nous_subscription import NousFeatureState, NousSubscriptionFeatures
@ -770,6 +772,29 @@ class TestPromptBuilderConstants:
assert "cli" in PLATFORM_HINTS
# =========================================================================
# Environment hints
# =========================================================================
class TestEnvironmentHints:
def test_wsl_hint_constant_mentions_mnt(self):
assert "/mnt/c/" in WSL_ENVIRONMENT_HINT
assert "WSL" in WSL_ENVIRONMENT_HINT
def test_build_environment_hints_on_wsl(self, monkeypatch):
import agent.prompt_builder as _pb
monkeypatch.setattr(_pb, "is_wsl", lambda: True)
result = _pb.build_environment_hints()
assert "/mnt/" in result
assert "WSL" in result
def test_build_environment_hints_not_wsl(self, monkeypatch):
import agent.prompt_builder as _pb
monkeypatch.setattr(_pb, "is_wsl", lambda: False)
result = _pb.build_environment_hints()
assert result == ""
# =========================================================================
# Conditional skill activation
# =========================================================================
@ -1009,65 +1034,4 @@ class TestOpenAIModelExecutionGuidance:
# =========================================================================
class TestStripBudgetWarningsFromHistory:
def test_strips_json_budget_warning_key(self):
import json
from run_agent import _strip_budget_warnings_from_history
messages = [
{"role": "tool", "tool_call_id": "c1", "content": json.dumps({
"output": "hello",
"exit_code": 0,
"_budget_warning": "[BUDGET: Iteration 55/60. 5 iterations left. Start consolidating your work.]",
})},
]
_strip_budget_warnings_from_history(messages)
parsed = json.loads(messages[0]["content"])
assert "_budget_warning" not in parsed
assert parsed["output"] == "hello"
assert parsed["exit_code"] == 0
def test_strips_text_budget_warning(self):
from run_agent import _strip_budget_warnings_from_history
messages = [
{"role": "tool", "tool_call_id": "c1",
"content": "some result\n\n[BUDGET WARNING: Iteration 58/60. Only 2 iteration(s) left. Provide your final response NOW. No more tool calls unless absolutely critical.]"},
]
_strip_budget_warnings_from_history(messages)
assert messages[0]["content"] == "some result"
def test_leaves_non_tool_messages_unchanged(self):
from run_agent import _strip_budget_warnings_from_history
messages = [
{"role": "assistant", "content": "[BUDGET WARNING: Iteration 58/60. Only 2 iteration(s) left. Provide your final response NOW. No more tool calls unless absolutely critical.]"},
{"role": "user", "content": "hello"},
]
original_contents = [m["content"] for m in messages]
_strip_budget_warnings_from_history(messages)
assert [m["content"] for m in messages] == original_contents
def test_handles_empty_and_missing_content(self):
from run_agent import _strip_budget_warnings_from_history
messages = [
{"role": "tool", "tool_call_id": "c1", "content": ""},
{"role": "tool", "tool_call_id": "c2"},
]
_strip_budget_warnings_from_history(messages)
assert messages[0]["content"] == ""
def test_strips_caution_variant(self):
import json
from run_agent import _strip_budget_warnings_from_history
messages = [
{"role": "tool", "tool_call_id": "c1", "content": json.dumps({
"output": "ok",
"_budget_warning": "[BUDGET: Iteration 42/60. 18 iterations left. Start consolidating your work.]",
})},
]
_strip_budget_warnings_from_history(messages)
parsed = json.loads(messages[0]["content"])
assert "_budget_warning" not in parsed

View file

@ -0,0 +1,118 @@
"""Tests for /compress <focus> — guided compression with focus topic.
Inspired by Claude Code's /compact <focus> feature.
"""
from unittest.mock import MagicMock, patch
from tests.cli.test_cli_init import _make_cli
def _make_history() -> list[dict[str, str]]:
return [
{"role": "user", "content": "one"},
{"role": "assistant", "content": "two"},
{"role": "user", "content": "three"},
{"role": "assistant", "content": "four"},
]
def test_focus_topic_extracted_and_passed(capsys):
"""Focus topic is extracted from the command and passed to _compress_context."""
shell = _make_cli()
history = _make_history()
compressed = [history[0], history[-1]]
shell.conversation_history = history
shell.agent = MagicMock()
shell.agent.compression_enabled = True
shell.agent._cached_system_prompt = ""
shell.agent._compress_context.return_value = (compressed, "")
def _estimate(messages):
if messages is history:
return 100
return 50
with patch("agent.model_metadata.estimate_messages_tokens_rough", side_effect=_estimate):
shell._manual_compress("/compress database schema")
output = capsys.readouterr().out
assert 'focus: "database schema"' in output
# Verify focus_topic was passed through
shell.agent._compress_context.assert_called_once()
call_kwargs = shell.agent._compress_context.call_args
assert call_kwargs.kwargs.get("focus_topic") == "database schema"
def test_no_focus_topic_when_bare_command(capsys):
"""When no focus topic is provided, None is passed."""
shell = _make_cli()
history = _make_history()
shell.conversation_history = history
shell.agent = MagicMock()
shell.agent.compression_enabled = True
shell.agent._cached_system_prompt = ""
shell.agent._compress_context.return_value = (list(history), "")
with patch("agent.model_metadata.estimate_messages_tokens_rough", return_value=100):
shell._manual_compress("/compress")
shell.agent._compress_context.assert_called_once()
call_kwargs = shell.agent._compress_context.call_args
assert call_kwargs.kwargs.get("focus_topic") is None
def test_empty_focus_after_command_treated_as_none(capsys):
"""Trailing whitespace after /compress does not produce a focus topic."""
shell = _make_cli()
history = _make_history()
shell.conversation_history = history
shell.agent = MagicMock()
shell.agent.compression_enabled = True
shell.agent._cached_system_prompt = ""
shell.agent._compress_context.return_value = (list(history), "")
with patch("agent.model_metadata.estimate_messages_tokens_rough", return_value=100):
shell._manual_compress("/compress ")
shell.agent._compress_context.assert_called_once()
call_kwargs = shell.agent._compress_context.call_args
assert call_kwargs.kwargs.get("focus_topic") is None
def test_focus_topic_printed_in_compression_banner(capsys):
"""The focus topic shows in the compression progress banner."""
shell = _make_cli()
history = _make_history()
compressed = [history[0], history[-1]]
shell.conversation_history = history
shell.agent = MagicMock()
shell.agent.compression_enabled = True
shell.agent._cached_system_prompt = ""
shell.agent._compress_context.return_value = (compressed, "")
with patch("agent.model_metadata.estimate_messages_tokens_rough", return_value=100):
shell._manual_compress("/compress API endpoints")
output = capsys.readouterr().out
assert 'focus: "API endpoints"' in output
def test_no_focus_prints_standard_banner(capsys):
"""Without focus, the standard banner (no focus: line) is printed."""
shell = _make_cli()
history = _make_history()
compressed = [history[0], history[-1]]
shell.conversation_history = history
shell.agent = MagicMock()
shell.agent.compression_enabled = True
shell.agent._cached_system_prompt = ""
shell.agent._compress_context.return_value = (compressed, "")
with patch("agent.model_metadata.estimate_messages_tokens_rough", return_value=100):
shell._manual_compress("/compress")
output = capsys.readouterr().out
assert "focus:" not in output
assert "Compressing" in output

View file

@ -0,0 +1,189 @@
"""Tests for stacked tool progress scrollback lines in the CLI TUI.
When tool_progress_mode is "all" or "new", _on_tool_progress should print
persistent lines to scrollback on tool.completed, restoring the stacked
tool history that was lost when the TUI switched to a single-line spinner.
"""
import os
import sys
import importlib
from unittest.mock import MagicMock, patch
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
# Module-level reference to the cli module (set by _make_cli on first call)
_cli_mod = None
def _make_cli(tool_progress="all"):
"""Create a HermesCLI instance with minimal mocking."""
global _cli_mod
_clean_config = {
"model": {
"default": "anthropic/claude-opus-4.6",
"base_url": "https://openrouter.ai/api/v1",
"provider": "auto",
},
"display": {"compact": False, "tool_progress": tool_progress},
"agent": {},
"terminal": {"env_type": "local"},
}
clean_env = {"LLM_MODEL": "", "HERMES_MAX_ITERATIONS": ""}
prompt_toolkit_stubs = {
"prompt_toolkit": MagicMock(),
"prompt_toolkit.history": MagicMock(),
"prompt_toolkit.styles": MagicMock(),
"prompt_toolkit.patch_stdout": MagicMock(),
"prompt_toolkit.application": MagicMock(),
"prompt_toolkit.layout": MagicMock(),
"prompt_toolkit.layout.processors": MagicMock(),
"prompt_toolkit.filters": MagicMock(),
"prompt_toolkit.layout.dimension": MagicMock(),
"prompt_toolkit.layout.menus": MagicMock(),
"prompt_toolkit.widgets": MagicMock(),
"prompt_toolkit.key_binding": MagicMock(),
"prompt_toolkit.completion": MagicMock(),
"prompt_toolkit.formatted_text": MagicMock(),
"prompt_toolkit.auto_suggest": MagicMock(),
}
with patch.dict(sys.modules, prompt_toolkit_stubs), \
patch.dict("os.environ", clean_env, clear=False):
import cli as mod
mod = importlib.reload(mod)
_cli_mod = mod
with patch.object(mod, "get_tool_definitions", return_value=[]), \
patch.dict(mod.__dict__, {"CLI_CONFIG": _clean_config}):
return mod.HermesCLI()
class TestToolProgressScrollback:
"""Stacked scrollback lines for 'all' and 'new' modes."""
def test_all_mode_prints_scrollback_on_completed(self):
"""In 'all' mode, tool.completed prints a stacked line."""
cli = _make_cli(tool_progress="all")
# Simulate tool.started
cli._on_tool_progress("tool.started", "terminal", "git log", {"command": "git log"})
# Simulate tool.completed
with patch.object(_cli_mod, "_cprint") as mock_print:
cli._on_tool_progress("tool.completed", "terminal", None, None, duration=1.5, is_error=False)
mock_print.assert_called_once()
line = mock_print.call_args[0][0]
# Should contain tool info (the cute message format has "git log" for terminal)
assert "git log" in line or "$" in line
def test_all_mode_prints_every_call(self):
"""In 'all' mode, consecutive calls to the same tool each get a line."""
cli = _make_cli(tool_progress="all")
with patch.object(_cli_mod, "_cprint") as mock_print:
# First call
cli._on_tool_progress("tool.started", "read_file", "cli.py", {"path": "cli.py"})
cli._on_tool_progress("tool.completed", "read_file", None, None, duration=0.1, is_error=False)
# Second call (same tool)
cli._on_tool_progress("tool.started", "read_file", "run_agent.py", {"path": "run_agent.py"})
cli._on_tool_progress("tool.completed", "read_file", None, None, duration=0.2, is_error=False)
assert mock_print.call_count == 2
def test_new_mode_skips_consecutive_repeats(self):
"""In 'new' mode, consecutive calls to the same tool only print once."""
cli = _make_cli(tool_progress="new")
with patch.object(_cli_mod, "_cprint") as mock_print:
cli._on_tool_progress("tool.started", "read_file", "cli.py", {"path": "cli.py"})
cli._on_tool_progress("tool.completed", "read_file", None, None, duration=0.1, is_error=False)
cli._on_tool_progress("tool.started", "read_file", "run_agent.py", {"path": "run_agent.py"})
cli._on_tool_progress("tool.completed", "read_file", None, None, duration=0.2, is_error=False)
assert mock_print.call_count == 1 # Only the first read_file
def test_new_mode_prints_when_tool_changes(self):
"""In 'new' mode, a different tool name triggers a new line."""
cli = _make_cli(tool_progress="new")
with patch.object(_cli_mod, "_cprint") as mock_print:
cli._on_tool_progress("tool.started", "read_file", "cli.py", {"path": "cli.py"})
cli._on_tool_progress("tool.completed", "read_file", None, None, duration=0.1, is_error=False)
cli._on_tool_progress("tool.started", "search_files", "pattern", {"pattern": "test"})
cli._on_tool_progress("tool.completed", "search_files", None, None, duration=0.3, is_error=False)
cli._on_tool_progress("tool.started", "read_file", "run_agent.py", {"path": "run_agent.py"})
cli._on_tool_progress("tool.completed", "read_file", None, None, duration=0.2, is_error=False)
# read_file, search_files, read_file (3rd prints because search_files broke the streak)
assert mock_print.call_count == 3
def test_off_mode_no_scrollback(self):
"""In 'off' mode, no stacked lines are printed."""
cli = _make_cli(tool_progress="off")
with patch.object(_cli_mod, "_cprint") as mock_print:
cli._on_tool_progress("tool.started", "terminal", "ls", {"command": "ls"})
cli._on_tool_progress("tool.completed", "terminal", None, None, duration=0.5, is_error=False)
mock_print.assert_not_called()
def test_error_suffix_on_failed_tool(self):
"""When is_error=True, the stacked line includes [error]."""
cli = _make_cli(tool_progress="all")
cli._on_tool_progress("tool.started", "terminal", "bad cmd", {"command": "bad cmd"})
with patch.object(_cli_mod, "_cprint") as mock_print:
cli._on_tool_progress("tool.completed", "terminal", None, None, duration=0.5, is_error=True)
line = mock_print.call_args[0][0]
assert "[error]" in line
def test_spinner_still_updates_on_started(self):
"""tool.started still updates the spinner text for live display."""
cli = _make_cli(tool_progress="all")
cli._on_tool_progress("tool.started", "terminal", "git status", {"command": "git status"})
assert "git status" in cli._spinner_text
def test_spinner_timer_clears_on_completed(self):
"""tool.completed still clears the tool timer."""
cli = _make_cli(tool_progress="all")
cli._on_tool_progress("tool.started", "terminal", "git status", {"command": "git status"})
assert cli._tool_start_time > 0
with patch.object(_cli_mod, "_cprint"):
cli._on_tool_progress("tool.completed", "terminal", None, None, duration=0.5, is_error=False)
assert cli._tool_start_time == 0.0
def test_concurrent_tools_produce_stacked_lines(self):
"""Multiple tool.started followed by multiple tool.completed all produce lines."""
cli = _make_cli(tool_progress="all")
with patch.object(_cli_mod, "_cprint") as mock_print:
# All start first (concurrent pattern)
cli._on_tool_progress("tool.started", "web_search", "query 1", {"query": "test 1"})
cli._on_tool_progress("tool.started", "web_search", "query 2", {"query": "test 2"})
# All complete
cli._on_tool_progress("tool.completed", "web_search", None, None, duration=1.0, is_error=False)
cli._on_tool_progress("tool.completed", "web_search", None, None, duration=1.5, is_error=False)
assert mock_print.call_count == 2
def test_verbose_mode_no_duplicate_scrollback(self):
"""In 'verbose' mode, scrollback lines are NOT printed (run_agent handles verbose output)."""
cli = _make_cli(tool_progress="verbose")
with patch.object(_cli_mod, "_cprint") as mock_print:
cli._on_tool_progress("tool.started", "terminal", "ls", {"command": "ls"})
cli._on_tool_progress("tool.completed", "terminal", None, None, duration=0.5, is_error=False)
mock_print.assert_not_called()
def test_pending_info_stores_on_started(self):
"""tool.started stores args for later use by tool.completed."""
cli = _make_cli(tool_progress="all")
cli._on_tool_progress("tool.started", "terminal", "ls", {"command": "ls"})
assert "terminal" in cli._pending_tool_info
assert len(cli._pending_tool_info["terminal"]) == 1
assert cli._pending_tool_info["terminal"][0] == {"command": "ls"}
def test_pending_info_consumed_on_completed(self):
"""tool.completed consumes stored args (FIFO for concurrent)."""
cli = _make_cli(tool_progress="all")
cli._on_tool_progress("tool.started", "terminal", "ls", {"command": "ls"})
cli._on_tool_progress("tool.started", "terminal", "pwd", {"command": "pwd"})
assert len(cli._pending_tool_info["terminal"]) == 2
with patch.object(_cli_mod, "_cprint"):
cli._on_tool_progress("tool.completed", "terminal", None, None, duration=0.1, is_error=False)
# First entry consumed, second remains
assert len(cli._pending_tool_info.get("terminal", [])) == 1
assert cli._pending_tool_info["terminal"][0] == {"command": "pwd"}

View file

@ -0,0 +1,226 @@
"""Tests for the clean shutdown marker that prevents unwanted session auto-resets.
When the gateway shuts down gracefully (hermes update, gateway restart, /restart),
it writes a .clean_shutdown marker. On the next startup, if the marker exists,
suspend_recently_active() is skipped so users don't lose their sessions.
After a crash (no marker), suspension still fires as a safety net for stuck sessions.
"""
import os
from datetime import datetime, timedelta
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from gateway.config import GatewayConfig, Platform, PlatformConfig, SessionResetPolicy
from gateway.session import SessionEntry, SessionSource, SessionStore
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_source(platform=Platform.TELEGRAM, chat_id="123", user_id="u1"):
return SessionSource(platform=platform, chat_id=chat_id, user_id=user_id)
def _make_store(tmp_path, policy=None):
config = GatewayConfig()
if policy:
config.default_reset_policy = policy
return SessionStore(sessions_dir=tmp_path, config=config)
# ---------------------------------------------------------------------------
# SessionStore.suspend_recently_active
# ---------------------------------------------------------------------------
class TestSuspendRecentlyActive:
"""Verify suspend_recently_active only marks recent sessions."""
def test_suspends_recently_active_sessions(self, tmp_path):
store = _make_store(tmp_path)
source = _make_source()
entry = store.get_or_create_session(source)
assert not entry.suspended
count = store.suspend_recently_active()
assert count == 1
# Re-fetch — should be suspended now
refreshed = store.get_or_create_session(source)
assert refreshed.was_auto_reset
def test_does_not_suspend_old_sessions(self, tmp_path):
store = _make_store(tmp_path)
source = _make_source()
entry = store.get_or_create_session(source)
# Backdate the session's updated_at beyond the cutoff
with store._lock:
entry.updated_at = datetime.now() - timedelta(seconds=300)
store._save()
count = store.suspend_recently_active(max_age_seconds=120)
assert count == 0
def test_already_suspended_not_double_counted(self, tmp_path):
store = _make_store(tmp_path)
source = _make_source()
entry = store.get_or_create_session(source)
# Suspend once
count1 = store.suspend_recently_active()
assert count1 == 1
# Create a new session (the old one got reset on next access)
entry2 = store.get_or_create_session(source)
# Suspend again — the new session is recent but not yet suspended
count2 = store.suspend_recently_active()
assert count2 == 1
# ---------------------------------------------------------------------------
# Clean shutdown marker integration
# ---------------------------------------------------------------------------
class TestCleanShutdownMarker:
"""Test that the marker file controls session suspension on startup."""
def test_marker_written_on_graceful_stop(self, tmp_path, monkeypatch):
"""stop() should write .clean_shutdown marker."""
monkeypatch.setattr("gateway.run._hermes_home", tmp_path)
marker = tmp_path / ".clean_shutdown"
assert not marker.exists()
# Create a minimal runner and call the shutdown logic directly
from gateway.run import GatewayRunner
runner = object.__new__(GatewayRunner)
runner._restart_requested = False
runner._restart_detached = False
runner._restart_via_service = False
runner._restart_task_started = False
runner._running = True
runner._draining = False
runner._stop_task = None
runner._running_agents = {}
runner._pending_messages = {}
runner._pending_approvals = {}
runner._background_tasks = set()
runner._shutdown_event = MagicMock()
runner._restart_drain_timeout = 5
runner._exit_code = None
runner._exit_reason = None
runner.adapters = {}
runner.config = GatewayConfig()
# Mock heavy dependencies
with patch("gateway.run.GatewayRunner._drain_active_agents", new_callable=AsyncMock, return_value=([], False)), \
patch("gateway.run.GatewayRunner._finalize_shutdown_agents"), \
patch("gateway.run.GatewayRunner._update_runtime_status"), \
patch("gateway.status.remove_pid_file"), \
patch("tools.process_registry.process_registry") as mock_proc_reg, \
patch("tools.terminal_tool.cleanup_all_environments"), \
patch("tools.browser_tool.cleanup_all_browsers"):
mock_proc_reg.kill_all = MagicMock()
import asyncio
asyncio.get_event_loop().run_until_complete(runner.stop())
assert marker.exists(), ".clean_shutdown marker should exist after graceful stop"
def test_marker_skips_suspension_on_startup(self, tmp_path, monkeypatch):
"""If .clean_shutdown exists, suspend_recently_active should NOT be called."""
monkeypatch.setattr("gateway.run._hermes_home", tmp_path)
# Create the marker
marker = tmp_path / ".clean_shutdown"
marker.touch()
# Create a store with a recently active session
store = _make_store(tmp_path)
source = _make_source()
entry = store.get_or_create_session(source)
assert not entry.suspended
# Simulate what start() does:
if marker.exists():
marker.unlink()
# Should NOT call suspend_recently_active
else:
store.suspend_recently_active()
# Session should NOT be suspended
with store._lock:
store._ensure_loaded_locked()
for e in store._entries.values():
assert not e.suspended, "Session should NOT be suspended after clean shutdown"
assert not marker.exists(), "Marker should be cleaned up"
def test_no_marker_triggers_suspension(self, tmp_path, monkeypatch):
"""Without .clean_shutdown marker (crash), suspension should fire."""
monkeypatch.setattr("gateway.run._hermes_home", tmp_path)
marker = tmp_path / ".clean_shutdown"
assert not marker.exists()
# Create a store with a recently active session
store = _make_store(tmp_path)
source = _make_source()
entry = store.get_or_create_session(source)
assert not entry.suspended
# Simulate what start() does:
if marker.exists():
marker.unlink()
else:
store.suspend_recently_active()
# Session SHOULD be suspended (crash recovery)
with store._lock:
store._ensure_loaded_locked()
suspended_count = sum(1 for e in store._entries.values() if e.suspended)
assert suspended_count == 1, "Session should be suspended after crash (no marker)"
def test_marker_written_on_restart_stop(self, tmp_path, monkeypatch):
"""stop(restart=True) should also write the marker."""
monkeypatch.setattr("gateway.run._hermes_home", tmp_path)
marker = tmp_path / ".clean_shutdown"
from gateway.run import GatewayRunner
runner = object.__new__(GatewayRunner)
runner._restart_requested = False
runner._restart_detached = False
runner._restart_via_service = False
runner._restart_task_started = False
runner._running = True
runner._draining = False
runner._stop_task = None
runner._running_agents = {}
runner._pending_messages = {}
runner._pending_approvals = {}
runner._background_tasks = set()
runner._shutdown_event = MagicMock()
runner._restart_drain_timeout = 5
runner._exit_code = None
runner._exit_reason = None
runner.adapters = {}
runner.config = GatewayConfig()
with patch("gateway.run.GatewayRunner._drain_active_agents", new_callable=AsyncMock, return_value=([], False)), \
patch("gateway.run.GatewayRunner._finalize_shutdown_agents"), \
patch("gateway.run.GatewayRunner._update_runtime_status"), \
patch("gateway.status.remove_pid_file"), \
patch("tools.process_registry.process_registry") as mock_proc_reg, \
patch("tools.terminal_tool.cleanup_all_environments"), \
patch("tools.browser_tool.cleanup_all_browsers"):
mock_proc_reg.kill_all = MagicMock()
import asyncio
asyncio.get_event_loop().run_until_complete(runner.stop(restart=True))
assert marker.exists(), ".clean_shutdown marker should exist after restart-stop too"

View file

@ -0,0 +1,118 @@
"""Tests for gateway /compress <focus> — focus topic on the gateway side."""
from datetime import datetime
from unittest.mock import MagicMock, patch
import pytest
from gateway.config import GatewayConfig, Platform, PlatformConfig
from gateway.platforms.base import MessageEvent
from gateway.session import SessionEntry, SessionSource, build_session_key
def _make_source() -> SessionSource:
return SessionSource(
platform=Platform.TELEGRAM,
user_id="u1",
chat_id="c1",
user_name="tester",
chat_type="dm",
)
def _make_event(text: str = "/compress") -> MessageEvent:
return MessageEvent(text=text, source=_make_source(), message_id="m1")
def _make_history() -> list[dict[str, str]]:
return [
{"role": "user", "content": "one"},
{"role": "assistant", "content": "two"},
{"role": "user", "content": "three"},
{"role": "assistant", "content": "four"},
]
def _make_runner(history: list[dict[str, str]]):
from gateway.run import GatewayRunner
runner = object.__new__(GatewayRunner)
runner.config = GatewayConfig(
platforms={Platform.TELEGRAM: PlatformConfig(enabled=True, token="***")}
)
session_entry = SessionEntry(
session_key=build_session_key(_make_source()),
session_id="sess-1",
created_at=datetime.now(),
updated_at=datetime.now(),
platform=Platform.TELEGRAM,
chat_type="dm",
)
runner.session_store = MagicMock()
runner.session_store.get_or_create_session.return_value = session_entry
runner.session_store.load_transcript.return_value = history
runner.session_store.rewrite_transcript = MagicMock()
runner.session_store.update_session = MagicMock()
runner.session_store._save = MagicMock()
return runner
@pytest.mark.asyncio
async def test_compress_focus_topic_passed_to_agent():
"""Focus topic from /compress <focus> is passed through to _compress_context."""
history = _make_history()
compressed = [history[0], history[-1]]
runner = _make_runner(history)
agent_instance = MagicMock()
agent_instance.context_compressor.protect_first_n = 0
agent_instance.context_compressor._align_boundary_forward.return_value = 0
agent_instance.context_compressor._find_tail_cut_by_tokens.return_value = 2
agent_instance.session_id = "sess-1"
agent_instance._compress_context.return_value = (compressed, "")
def _estimate(messages):
return 100
with (
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={"api_key": "***"}),
patch("gateway.run._resolve_gateway_model", return_value="test-model"),
patch("run_agent.AIAgent", return_value=agent_instance),
patch("agent.model_metadata.estimate_messages_tokens_rough", side_effect=_estimate),
):
result = await runner._handle_compress_command(_make_event("/compress database schema"))
# Verify focus_topic was passed
agent_instance._compress_context.assert_called_once()
call_kwargs = agent_instance._compress_context.call_args
assert call_kwargs.kwargs.get("focus_topic") == "database schema"
# Verify focus is mentioned in response
assert 'Focus: "database schema"' in result
@pytest.mark.asyncio
async def test_compress_no_focus_passes_none():
"""Bare /compress passes focus_topic=None."""
history = _make_history()
runner = _make_runner(history)
agent_instance = MagicMock()
agent_instance.context_compressor.protect_first_n = 0
agent_instance.context_compressor._align_boundary_forward.return_value = 0
agent_instance.context_compressor._find_tail_cut_by_tokens.return_value = 2
agent_instance.session_id = "sess-1"
agent_instance._compress_context.return_value = (list(history), "")
with (
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={"api_key": "***"}),
patch("gateway.run._resolve_gateway_model", return_value="test-model"),
patch("run_agent.AIAgent", return_value=agent_instance),
patch("agent.model_metadata.estimate_messages_tokens_rough", return_value=100),
):
result = await runner._handle_compress_command(_make_event("/compress"))
agent_instance._compress_context.assert_called_once()
call_kwargs = agent_instance._compress_context.call_args
assert call_kwargs.kwargs.get("focus_topic") is None
# No focus line in response
assert "Focus:" not in result

View file

@ -74,6 +74,26 @@ class FakeBot:
return None
class SlowSyncTree(FakeTree):
def __init__(self):
super().__init__()
self.started = asyncio.Event()
self.allow_finish = asyncio.Event()
async def _slow_sync():
self.started.set()
await self.allow_finish.wait()
return []
self.sync = AsyncMock(side_effect=_slow_sync)
class SlowSyncBot(FakeBot):
def __init__(self, *, intents, proxy=None):
super().__init__(intents=intents, proxy=proxy)
self.tree = SlowSyncTree()
@pytest.mark.asyncio
@pytest.mark.parametrize(
("allowed_users", "expected_members_intent"),
@ -138,3 +158,36 @@ async def test_connect_releases_token_lock_on_timeout(monkeypatch):
assert ok is False
assert released == [("discord-bot-token", "test-token")]
assert adapter._platform_lock_identity is None
@pytest.mark.asyncio
async def test_connect_does_not_wait_for_slash_sync(monkeypatch):
adapter = DiscordAdapter(PlatformConfig(enabled=True, token="test-token"))
monkeypatch.setattr("gateway.status.acquire_scoped_lock", lambda scope, identity, metadata=None: (True, None))
monkeypatch.setattr("gateway.status.release_scoped_lock", lambda scope, identity: None)
intents = SimpleNamespace(message_content=False, dm_messages=False, guild_messages=False, members=False, voice_states=False)
monkeypatch.setattr(discord_platform.Intents, "default", lambda: intents)
created = {}
def fake_bot_factory(*, command_prefix, intents, proxy=None):
bot = SlowSyncBot(intents=intents, proxy=proxy)
created["bot"] = bot
return bot
monkeypatch.setattr(discord_platform.commands, "Bot", fake_bot_factory)
monkeypatch.setattr(adapter, "_resolve_allowed_usernames", AsyncMock())
ok = await asyncio.wait_for(adapter.connect(), timeout=1.0)
assert ok is True
assert adapter._ready_event.is_set()
await asyncio.wait_for(created["bot"].tree.started.wait(), timeout=1.0)
assert created["bot"].tree.sync.await_count == 1
created["bot"].tree.allow_finish.set()
await asyncio.sleep(0)
await adapter.disconnect()

View file

@ -0,0 +1,355 @@
"""Tests for gateway.display_config — per-platform display/verbosity resolver."""
import pytest
# ---------------------------------------------------------------------------
# Resolver: resolution order
# ---------------------------------------------------------------------------
class TestResolveDisplaySetting:
"""resolve_display_setting() resolves with correct priority."""
def test_explicit_platform_override_wins(self):
"""display.platforms.<plat>.<key> takes top priority."""
from gateway.display_config import resolve_display_setting
config = {
"display": {
"tool_progress": "all",
"platforms": {
"telegram": {"tool_progress": "verbose"},
},
}
}
assert resolve_display_setting(config, "telegram", "tool_progress") == "verbose"
def test_global_setting_when_no_platform_override(self):
"""Falls back to display.<key> when no platform override exists."""
from gateway.display_config import resolve_display_setting
config = {
"display": {
"tool_progress": "new",
"platforms": {},
}
}
assert resolve_display_setting(config, "telegram", "tool_progress") == "new"
def test_platform_default_when_no_user_config(self):
"""Falls back to built-in platform default."""
from gateway.display_config import resolve_display_setting
# Empty config — should get built-in defaults
config = {}
# Telegram defaults to tier_high → "all"
assert resolve_display_setting(config, "telegram", "tool_progress") == "all"
# Email defaults to tier_minimal → "off"
assert resolve_display_setting(config, "email", "tool_progress") == "off"
def test_global_default_for_unknown_platform(self):
"""Unknown platforms get the global defaults."""
from gateway.display_config import resolve_display_setting
config = {}
# Unknown platform, no config → global default "all"
assert resolve_display_setting(config, "unknown_platform", "tool_progress") == "all"
def test_fallback_parameter_used_last(self):
"""Explicit fallback is used when nothing else matches."""
from gateway.display_config import resolve_display_setting
config = {}
# "nonexistent_key" isn't in any defaults
result = resolve_display_setting(config, "telegram", "nonexistent_key", "my_fallback")
assert result == "my_fallback"
def test_platform_override_only_affects_that_platform(self):
"""Other platforms are unaffected by a specific platform override."""
from gateway.display_config import resolve_display_setting
config = {
"display": {
"tool_progress": "all",
"platforms": {
"slack": {"tool_progress": "off"},
},
}
}
assert resolve_display_setting(config, "slack", "tool_progress") == "off"
assert resolve_display_setting(config, "telegram", "tool_progress") == "all"
# ---------------------------------------------------------------------------
# Backward compatibility: tool_progress_overrides
# ---------------------------------------------------------------------------
class TestBackwardCompat:
"""Legacy tool_progress_overrides is still respected as a fallback."""
def test_legacy_overrides_read(self):
"""tool_progress_overrides is read when no platforms entry exists."""
from gateway.display_config import resolve_display_setting
config = {
"display": {
"tool_progress": "all",
"tool_progress_overrides": {
"signal": "off",
"telegram": "verbose",
},
}
}
assert resolve_display_setting(config, "signal", "tool_progress") == "off"
assert resolve_display_setting(config, "telegram", "tool_progress") == "verbose"
def test_new_platforms_takes_precedence_over_legacy(self):
"""display.platforms beats tool_progress_overrides."""
from gateway.display_config import resolve_display_setting
config = {
"display": {
"tool_progress": "all",
"tool_progress_overrides": {"telegram": "verbose"},
"platforms": {"telegram": {"tool_progress": "new"}},
}
}
assert resolve_display_setting(config, "telegram", "tool_progress") == "new"
def test_legacy_overrides_only_for_tool_progress(self):
"""Legacy overrides don't affect other settings."""
from gateway.display_config import resolve_display_setting
config = {
"display": {
"tool_progress_overrides": {"telegram": "verbose"},
}
}
# show_reasoning should NOT read from tool_progress_overrides
assert resolve_display_setting(config, "telegram", "show_reasoning") is False
# ---------------------------------------------------------------------------
# YAML normalisation
# ---------------------------------------------------------------------------
class TestYAMLNormalisation:
"""YAML 1.1 quirks (bare off → False, on → True) are handled."""
def test_tool_progress_false_normalised_to_off(self):
"""YAML's bare `off` parses as False — normalised to 'off' string."""
from gateway.display_config import resolve_display_setting
config = {"display": {"tool_progress": False}}
assert resolve_display_setting(config, "telegram", "tool_progress") == "off"
def test_tool_progress_true_normalised_to_all(self):
"""YAML's bare `on` parses as True — normalised to 'all'."""
from gateway.display_config import resolve_display_setting
config = {"display": {"tool_progress": True}}
assert resolve_display_setting(config, "telegram", "tool_progress") == "all"
def test_show_reasoning_string_true(self):
"""String 'true' is normalised to bool True."""
from gateway.display_config import resolve_display_setting
config = {"display": {"platforms": {"telegram": {"show_reasoning": "true"}}}}
assert resolve_display_setting(config, "telegram", "show_reasoning") is True
def test_tool_preview_length_string(self):
"""String numbers are normalised to int."""
from gateway.display_config import resolve_display_setting
config = {"display": {"platforms": {"slack": {"tool_preview_length": "80"}}}}
assert resolve_display_setting(config, "slack", "tool_preview_length") == 80
def test_platform_override_false_tool_progress(self):
"""Per-platform bare off → normalised."""
from gateway.display_config import resolve_display_setting
config = {"display": {"platforms": {"slack": {"tool_progress": False}}}}
assert resolve_display_setting(config, "slack", "tool_progress") == "off"
# ---------------------------------------------------------------------------
# Built-in platform defaults (tier system)
# ---------------------------------------------------------------------------
class TestPlatformDefaults:
"""Built-in defaults reflect platform capability tiers."""
def test_high_tier_platforms(self):
"""Telegram and Discord default to 'all' tool progress."""
from gateway.display_config import resolve_display_setting
for plat in ("telegram", "discord"):
assert resolve_display_setting({}, plat, "tool_progress") == "all", plat
def test_medium_tier_platforms(self):
"""Slack, Mattermost, Matrix default to 'new' tool progress."""
from gateway.display_config import resolve_display_setting
for plat in ("slack", "mattermost", "matrix", "feishu"):
assert resolve_display_setting({}, plat, "tool_progress") == "new", plat
def test_low_tier_platforms(self):
"""Signal, WhatsApp, etc. default to 'off' tool progress."""
from gateway.display_config import resolve_display_setting
for plat in ("signal", "whatsapp", "bluebubbles", "weixin", "wecom", "dingtalk"):
assert resolve_display_setting({}, plat, "tool_progress") == "off", plat
def test_minimal_tier_platforms(self):
"""Email, SMS, webhook default to 'off' tool progress."""
from gateway.display_config import resolve_display_setting
for plat in ("email", "sms", "webhook", "homeassistant"):
assert resolve_display_setting({}, plat, "tool_progress") == "off", plat
def test_low_tier_streaming_defaults_to_false(self):
"""Low-tier platforms default streaming to False."""
from gateway.display_config import resolve_display_setting
assert resolve_display_setting({}, "signal", "streaming") is False
assert resolve_display_setting({}, "email", "streaming") is False
def test_high_tier_streaming_defaults_to_none(self):
"""High-tier platforms default streaming to None (follow global)."""
from gateway.display_config import resolve_display_setting
assert resolve_display_setting({}, "telegram", "streaming") is None
# ---------------------------------------------------------------------------
# get_effective_display / get_platform_defaults
# ---------------------------------------------------------------------------
class TestHelpers:
"""Helper functions return correct composite results."""
def test_get_effective_display_merges_correctly(self):
from gateway.display_config import get_effective_display
config = {
"display": {
"tool_progress": "new",
"show_reasoning": True,
"platforms": {
"telegram": {"tool_progress": "verbose"},
},
}
}
eff = get_effective_display(config, "telegram")
assert eff["tool_progress"] == "verbose" # platform override
assert eff["show_reasoning"] is True # global
assert "tool_preview_length" in eff # default filled in
def test_get_platform_defaults_returns_dict(self):
from gateway.display_config import get_platform_defaults
defaults = get_platform_defaults("telegram")
assert "tool_progress" in defaults
assert "show_reasoning" in defaults
# Returns a new dict (not the shared tier dict)
defaults["tool_progress"] = "changed"
assert get_platform_defaults("telegram")["tool_progress"] != "changed"
# ---------------------------------------------------------------------------
# Config migration: tool_progress_overrides → display.platforms
# ---------------------------------------------------------------------------
class TestConfigMigration:
"""Version 16 migration moves tool_progress_overrides into display.platforms."""
def test_migration_creates_platforms_entries(self, tmp_path, monkeypatch):
"""Old overrides are migrated into display.platforms.<plat>.tool_progress."""
import yaml
config_path = tmp_path / "config.yaml"
config = {
"_config_version": 15,
"display": {
"tool_progress_overrides": {
"signal": "off",
"telegram": "all",
},
},
}
config_path.write_text(yaml.dump(config))
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
# Re-import to pick up the new HERMES_HOME
import importlib
import hermes_cli.config as cfg_mod
importlib.reload(cfg_mod)
result = cfg_mod.migrate_config(interactive=False, quiet=True)
# Re-read config
updated = yaml.safe_load(config_path.read_text())
platforms = updated.get("display", {}).get("platforms", {})
assert platforms.get("signal", {}).get("tool_progress") == "off"
assert platforms.get("telegram", {}).get("tool_progress") == "all"
def test_migration_preserves_existing_platforms_entries(self, tmp_path, monkeypatch):
"""Existing display.platforms entries are NOT overwritten by migration."""
import yaml
config_path = tmp_path / "config.yaml"
config = {
"_config_version": 15,
"display": {
"tool_progress_overrides": {"telegram": "off"},
"platforms": {"telegram": {"tool_progress": "verbose"}},
},
}
config_path.write_text(yaml.dump(config))
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
import importlib
import hermes_cli.config as cfg_mod
importlib.reload(cfg_mod)
cfg_mod.migrate_config(interactive=False, quiet=True)
updated = yaml.safe_load(config_path.read_text())
# Existing "verbose" should NOT be overwritten by legacy "off"
assert updated["display"]["platforms"]["telegram"]["tool_progress"] == "verbose"
# ---------------------------------------------------------------------------
# Streaming per-platform (None = follow global)
# ---------------------------------------------------------------------------
class TestStreamingPerPlatform:
"""Streaming per-platform override semantics."""
def test_none_means_follow_global(self):
"""When streaming is None, the caller should use global config."""
from gateway.display_config import resolve_display_setting
config = {}
# Telegram has no streaming override in defaults → None
result = resolve_display_setting(config, "telegram", "streaming")
assert result is None # caller should check global StreamingConfig
def test_explicit_false_disables(self):
"""Explicit False disables streaming for that platform."""
from gateway.display_config import resolve_display_setting
config = {
"display": {
"platforms": {"telegram": {"streaming": False}},
}
}
assert resolve_display_setting(config, "telegram", "streaming") is False
def test_explicit_true_enables(self):
"""Explicit True enables streaming for that platform."""
from gateway.display_config import resolve_display_setting
config = {
"display": {
"platforms": {"email": {"streaming": True}},
}
}
assert resolve_display_setting(config, "email", "streaming") is True

View file

@ -28,12 +28,16 @@ class _FakeRegistry:
def __init__(self, sessions):
self._sessions = list(sessions)
self._completion_consumed: set = set()
def get(self, session_id):
if self._sessions:
return self._sessions.pop(0)
return None
def is_completion_consumed(self, session_id):
return session_id in self._completion_consumed
def _build_runner(monkeypatch, tmp_path) -> GatewayRunner:
"""Create a GatewayRunner with notifications set to 'all'."""

View file

@ -157,12 +157,44 @@ def _make_fake_mautrix():
mautrix_crypto_store = types.ModuleType("mautrix.crypto.store")
class MemoryCryptoStore:
def __init__(self, account_id="", pickle_key=""):
def __init__(self, account_id="", pickle_key=""): # noqa: S301
self.account_id = account_id
self.pickle_key = pickle_key
mautrix_crypto_store.MemoryCryptoStore = MemoryCryptoStore
# --- mautrix.crypto.store.asyncpg ---
mautrix_crypto_store_asyncpg = types.ModuleType("mautrix.crypto.store.asyncpg")
class PgCryptoStore:
upgrade_table = MagicMock()
def __init__(self, account_id="", pickle_key="", db=None): # noqa: S301
self.account_id = account_id
self.pickle_key = pickle_key
self.db = db
async def open(self):
pass
mautrix_crypto_store_asyncpg.PgCryptoStore = PgCryptoStore
# --- mautrix.util ---
mautrix_util = types.ModuleType("mautrix.util")
# --- mautrix.util.async_db ---
mautrix_util_async_db = types.ModuleType("mautrix.util.async_db")
class Database:
@classmethod
def create(cls, url, upgrade_table=None):
db = MagicMock()
db.start = AsyncMock()
db.stop = AsyncMock()
return db
mautrix_util_async_db.Database = Database
return {
"mautrix": mautrix,
"mautrix.api": mautrix_api,
@ -171,6 +203,9 @@ def _make_fake_mautrix():
"mautrix.client.state_store": mautrix_client_state_store,
"mautrix.crypto": mautrix_crypto,
"mautrix.crypto.store": mautrix_crypto_store,
"mautrix.crypto.store.asyncpg": mautrix_crypto_store_asyncpg,
"mautrix.util": mautrix_util,
"mautrix.util.async_db": mautrix_util_async_db,
}
@ -740,6 +775,12 @@ class TestMatrixAccessTokenAuth:
mock_client.whoami = AsyncMock(return_value=FakeWhoamiResponse("@bot:example.org", "DEV123"))
mock_client.sync = AsyncMock(return_value={"rooms": {"join": {"!room:server": {}}}})
mock_client.add_event_handler = MagicMock()
mock_client.handle_sync = MagicMock(return_value=[])
mock_client.query_keys = AsyncMock(return_value={
"device_keys": {"@bot:example.org": {"DEV123": {
"keys": {"ed25519:DEV123": "fake_ed25519_key"},
}}},
})
mock_client.api = MagicMock()
mock_client.api.token = "syt_test_access_token"
mock_client.api.session = MagicMock()
@ -751,6 +792,8 @@ class TestMatrixAccessTokenAuth:
mock_olm.share_keys = AsyncMock()
mock_olm.share_keys_min_trust = None
mock_olm.send_keys_min_trust = None
mock_olm.account = MagicMock()
mock_olm.account.identity_keys = {"ed25519": "fake_ed25519_key"}
# Patch Client constructor to return our mock
fake_mautrix_mods["mautrix.client"].Client = MagicMock(return_value=mock_client)
@ -924,6 +967,12 @@ class TestMatrixDeviceId:
mock_client.whoami = AsyncMock(return_value=MagicMock(user_id="@bot:example.org", device_id="WHOAMI_DEV"))
mock_client.sync = AsyncMock(return_value={"rooms": {"join": {"!room:server": {}}}})
mock_client.add_event_handler = MagicMock()
mock_client.handle_sync = MagicMock(return_value=[])
mock_client.query_keys = AsyncMock(return_value={
"device_keys": {"@bot:example.org": {"MY_STABLE_DEVICE": {
"keys": {"ed25519:MY_STABLE_DEVICE": "fake_ed25519_key"},
}}},
})
mock_client.api = MagicMock()
mock_client.api.token = "syt_test_access_token"
mock_client.api.session = MagicMock()
@ -934,6 +983,8 @@ class TestMatrixDeviceId:
mock_olm.share_keys = AsyncMock()
mock_olm.share_keys_min_trust = None
mock_olm.send_keys_min_trust = None
mock_olm.account = MagicMock()
mock_olm.account.identity_keys = {"ed25519": "fake_ed25519_key"}
fake_mautrix_mods["mautrix.client"].Client = MagicMock(return_value=mock_client)
fake_mautrix_mods["mautrix.crypto"].OlmMachine = MagicMock(return_value=mock_olm)
@ -1030,8 +1081,8 @@ class TestMatrixDeviceIdConfig:
class TestMatrixSyncLoop:
@pytest.mark.asyncio
async def test_sync_loop_shares_keys_when_encryption_enabled(self):
"""_sync_loop should call crypto.share_keys() after each sync."""
async def test_sync_loop_dispatches_events_and_stores_token(self):
"""_sync_loop should call handle_sync() and persist next_batch."""
adapter = _make_adapter()
adapter._encryption = True
adapter._closing = False
@ -1046,7 +1097,6 @@ class TestMatrixSyncLoop:
return {"rooms": {"join": {"!room:example.org": {}}}, "next_batch": "s1234"}
mock_crypto = MagicMock()
mock_crypto.share_keys = AsyncMock()
mock_sync_store = MagicMock()
mock_sync_store.get_next_batch = AsyncMock(return_value=None)
@ -1062,7 +1112,6 @@ class TestMatrixSyncLoop:
await adapter._sync_loop()
fake_client.sync.assert_awaited_once()
mock_crypto.share_keys.assert_awaited_once()
fake_client.handle_sync.assert_called_once()
mock_sync_store.put_next_batch.assert_awaited_once_with("s1234")
@ -1248,6 +1297,12 @@ class TestMatrixEncryptedEventHandler:
mock_client.whoami = AsyncMock(return_value=MagicMock(user_id="@bot:example.org", device_id="DEV123"))
mock_client.sync = AsyncMock(return_value={"rooms": {"join": {"!room:server": {}}}})
mock_client.add_event_handler = MagicMock()
mock_client.handle_sync = MagicMock(return_value=[])
mock_client.query_keys = AsyncMock(return_value={
"device_keys": {"@bot:example.org": {"DEV123": {
"keys": {"ed25519:DEV123": "fake_ed25519_key"},
}}},
})
mock_client.api = MagicMock()
mock_client.api.token = "syt_test_token"
mock_client.api.session = MagicMock()
@ -1258,6 +1313,8 @@ class TestMatrixEncryptedEventHandler:
mock_olm.share_keys = AsyncMock()
mock_olm.share_keys_min_trust = None
mock_olm.send_keys_min_trust = None
mock_olm.account = MagicMock()
mock_olm.account.identity_keys = {"ed25519": "fake_ed25519_key"}
fake_mautrix_mods["mautrix.client"].Client = MagicMock(return_value=mock_client)
fake_mautrix_mods["mautrix.crypto"].OlmMachine = MagicMock(return_value=mock_olm)

View file

@ -8,8 +8,8 @@ from types import SimpleNamespace
import pytest
from gateway.config import Platform, PlatformConfig
from gateway.platforms.base import BasePlatformAdapter, SendResult
from gateway.config import Platform, PlatformConfig, StreamingConfig
from gateway.platforms.base import BasePlatformAdapter, MessageEvent, MessageType, SendResult
from gateway.session import SessionSource
@ -104,6 +104,11 @@ def _make_runner(adapter):
runner._session_db = None
runner._running_agents = {}
runner.hooks = SimpleNamespace(loaded_hooks=False)
runner.config = SimpleNamespace(
thread_sessions_per_user=False,
group_sessions_per_user=False,
stt_enabled=False,
)
return runner
@ -118,6 +123,7 @@ async def test_run_agent_progress_stays_in_originating_topic(monkeypatch, tmp_pa
fake_run_agent = types.ModuleType("run_agent")
fake_run_agent.AIAgent = FakeAgent
monkeypatch.setitem(sys.modules, "run_agent", fake_run_agent)
import tools.terminal_tool # noqa: F401 - register terminal emoji for this fake-agent test
adapter = ProgressCaptureAdapter()
runner = _make_runner(adapter)
@ -144,7 +150,7 @@ async def test_run_agent_progress_stays_in_originating_topic(monkeypatch, tmp_pa
assert adapter.sent == [
{
"chat_id": "-1001",
"content": '⚙️ terminal: "pwd"',
"content": '💻 terminal: "pwd"',
"reply_to": None,
"metadata": {"thread_id": "17585"},
}
@ -334,3 +340,238 @@ def test_all_mode_no_truncation_when_preview_fits(monkeypatch, tmp_path):
content = adapter.sent[0]["content"]
# With a 200-char cap, the 165-char command should NOT be truncated
assert "..." not in content, f"Preview was truncated when it shouldn't be: {content}"
class CommentaryAgent:
def __init__(self, **kwargs):
self.tool_progress_callback = kwargs.get("tool_progress_callback")
self.interim_assistant_callback = kwargs.get("interim_assistant_callback")
self.stream_delta_callback = kwargs.get("stream_delta_callback")
self.tools = []
def run_conversation(self, message, conversation_history=None, task_id=None):
if self.interim_assistant_callback:
self.interim_assistant_callback("I'll inspect the repo first.", already_streamed=False)
time.sleep(0.1)
if self.stream_delta_callback:
self.stream_delta_callback("done")
return {
"final_response": "done",
"messages": [],
"api_calls": 1,
}
class PreviewedResponseAgent:
def __init__(self, **kwargs):
self.interim_assistant_callback = kwargs.get("interim_assistant_callback")
self.tools = []
def run_conversation(self, message, conversation_history=None, task_id=None):
if self.interim_assistant_callback:
self.interim_assistant_callback("You're welcome.", already_streamed=False)
return {
"final_response": "You're welcome.",
"response_previewed": True,
"messages": [],
"api_calls": 1,
}
class QueuedCommentaryAgent:
calls = 0
def __init__(self, **kwargs):
self.interim_assistant_callback = kwargs.get("interim_assistant_callback")
self.tools = []
def run_conversation(self, message, conversation_history=None, task_id=None):
type(self).calls += 1
if type(self).calls == 1 and self.interim_assistant_callback:
self.interim_assistant_callback("I'll inspect the repo first.", already_streamed=False)
return {
"final_response": f"final response {type(self).calls}",
"messages": [],
"api_calls": 1,
}
async def _run_with_agent(
monkeypatch,
tmp_path,
agent_cls,
*,
session_id,
pending_text=None,
config_data=None,
):
if config_data:
import yaml
(tmp_path / "config.yaml").write_text(yaml.dump(config_data), encoding="utf-8")
fake_dotenv = types.ModuleType("dotenv")
fake_dotenv.load_dotenv = lambda *args, **kwargs: None
monkeypatch.setitem(sys.modules, "dotenv", fake_dotenv)
fake_run_agent = types.ModuleType("run_agent")
fake_run_agent.AIAgent = agent_cls
monkeypatch.setitem(sys.modules, "run_agent", fake_run_agent)
adapter = ProgressCaptureAdapter()
runner = _make_runner(adapter)
gateway_run = importlib.import_module("gateway.run")
if config_data and "streaming" in config_data:
runner.config.streaming = StreamingConfig.from_dict(config_data["streaming"])
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
monkeypatch.setattr(gateway_run, "_resolve_runtime_agent_kwargs", lambda: {"api_key": "***"})
source = SessionSource(
platform=Platform.TELEGRAM,
chat_id="-1001",
chat_type="group",
thread_id="17585",
)
session_key = "agent:main:telegram:group:-1001:17585"
if pending_text is not None:
adapter._pending_messages[session_key] = MessageEvent(
text=pending_text,
message_type=MessageType.TEXT,
source=source,
message_id="queued-1",
)
result = await runner._run_agent(
message="hello",
context_prompt="",
history=[],
source=source,
session_id=session_id,
session_key=session_key,
)
return adapter, result
@pytest.mark.asyncio
async def test_run_agent_surfaces_real_interim_commentary(monkeypatch, tmp_path):
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
CommentaryAgent,
session_id="sess-commentary",
config_data={"display": {"interim_assistant_messages": True}},
)
assert result.get("already_sent") is not True
assert any(call["content"] == "I'll inspect the repo first." for call in adapter.sent)
@pytest.mark.asyncio
async def test_run_agent_surfaces_interim_commentary_by_default(monkeypatch, tmp_path):
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
CommentaryAgent,
session_id="sess-commentary-default-on",
)
assert any(call["content"] == "I'll inspect the repo first." for call in adapter.sent)
@pytest.mark.asyncio
async def test_run_agent_suppresses_interim_commentary_when_disabled(monkeypatch, tmp_path):
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
CommentaryAgent,
session_id="sess-commentary-disabled",
config_data={"display": {"interim_assistant_messages": False}},
)
assert result.get("already_sent") is not True
assert not any(call["content"] == "I'll inspect the repo first." for call in adapter.sent)
@pytest.mark.asyncio
async def test_run_agent_tool_progress_does_not_control_interim_commentary(monkeypatch, tmp_path):
"""tool_progress=all with interim_assistant_messages=false should not surface commentary."""
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
CommentaryAgent,
session_id="sess-commentary-tool-progress",
config_data={"display": {"tool_progress": "all", "interim_assistant_messages": False}},
)
assert result.get("already_sent") is not True
assert not any(call["content"] == "I'll inspect the repo first." for call in adapter.sent)
@pytest.mark.asyncio
async def test_run_agent_streaming_does_not_enable_completed_interim_commentary(
monkeypatch, tmp_path
):
"""Streaming alone with interim_assistant_messages=false should not surface commentary."""
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
CommentaryAgent,
session_id="sess-commentary-streaming",
config_data={
"display": {"tool_progress": "off", "interim_assistant_messages": False},
"streaming": {"enabled": True},
},
)
assert result.get("already_sent") is True
assert not any(call["content"] == "I'll inspect the repo first." for call in adapter.sent)
@pytest.mark.asyncio
async def test_run_agent_interim_commentary_works_with_tool_progress_off(monkeypatch, tmp_path):
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
CommentaryAgent,
session_id="sess-commentary-explicit-on",
config_data={
"display": {
"tool_progress": "off",
"interim_assistant_messages": True,
},
},
)
assert result.get("already_sent") is not True
assert any(call["content"] == "I'll inspect the repo first." for call in adapter.sent)
@pytest.mark.asyncio
async def test_run_agent_previewed_final_marks_already_sent(monkeypatch, tmp_path):
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
PreviewedResponseAgent,
session_id="sess-previewed",
config_data={"display": {"interim_assistant_messages": True}},
)
assert result.get("already_sent") is True
assert [call["content"] for call in adapter.sent] == ["You're welcome."]
@pytest.mark.asyncio
async def test_run_agent_queued_message_does_not_treat_commentary_as_final(monkeypatch, tmp_path):
QueuedCommentaryAgent.calls = 0
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
QueuedCommentaryAgent,
session_id="sess-queued-commentary",
pending_text="queued follow-up",
config_data={"display": {"interim_assistant_messages": True}},
)
sent_texts = [call["content"] for call in adapter.sent]
assert result["final_response"] == "final response 2"
assert "I'll inspect the repo first." in sent_texts
assert "final response 1" in sent_texts

View file

@ -1,4 +1,5 @@
import pytest
from unittest.mock import AsyncMock
from gateway.config import GatewayConfig, Platform, PlatformConfig
from gateway.platforms.base import BasePlatformAdapter
@ -45,6 +46,23 @@ class _DisabledAdapter(BasePlatformAdapter):
return {"id": chat_id}
class _SuccessfulAdapter(BasePlatformAdapter):
def __init__(self):
super().__init__(PlatformConfig(enabled=True, token="***"), Platform.DISCORD)
async def connect(self) -> bool:
return True
async def disconnect(self) -> None:
self._mark_disconnected()
async def send(self, chat_id, content, reply_to=None, metadata=None):
raise NotImplementedError
async def get_chat_info(self, chat_id):
return {"id": chat_id}
@pytest.mark.asyncio
async def test_runner_returns_failure_for_retryable_startup_errors(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
@ -65,7 +83,7 @@ async def test_runner_returns_failure_for_retryable_startup_errors(monkeypatch,
state = read_runtime_status()
assert state["gateway_state"] == "startup_failed"
assert "temporary DNS resolution failure" in state["exit_reason"]
assert state["platforms"]["telegram"]["state"] == "fatal"
assert state["platforms"]["telegram"]["state"] == "retrying"
assert state["platforms"]["telegram"]["error_code"] == "telegram_connect_error"
@ -89,6 +107,64 @@ async def test_runner_allows_cron_only_mode_when_no_platforms_are_enabled(monkey
assert state["gateway_state"] == "running"
@pytest.mark.asyncio
async def test_runner_records_connected_platform_state_on_success(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
config = GatewayConfig(
platforms={
Platform.DISCORD: PlatformConfig(enabled=True, token="***")
},
sessions_dir=tmp_path / "sessions",
)
runner = GatewayRunner(config)
monkeypatch.setattr(runner, "_create_adapter", lambda platform, platform_config: _SuccessfulAdapter())
monkeypatch.setattr(runner.hooks, "discover_and_load", lambda: None)
monkeypatch.setattr(runner.hooks, "emit", AsyncMock())
ok = await runner.start()
assert ok is True
state = read_runtime_status()
assert state["gateway_state"] == "running"
assert state["platforms"]["discord"]["state"] == "connected"
assert state["platforms"]["discord"]["error_code"] is None
assert state["platforms"]["discord"]["error_message"] is None
@pytest.mark.asyncio
async def test_start_gateway_verbosity_imports_redacting_formatter(monkeypatch, tmp_path):
"""Verbosity != None must not crash with NameError on RedactingFormatter (#8044)."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
class _CleanExitRunner:
def __init__(self, config):
self.config = config
self.should_exit_cleanly = True
self.exit_reason = None
self.adapters = {}
async def start(self):
return True
async def stop(self):
return None
monkeypatch.setattr("gateway.status.get_running_pid", lambda: None)
monkeypatch.setattr("tools.skills_sync.sync_skills", lambda quiet=True: None)
monkeypatch.setattr("hermes_logging.setup_logging", lambda hermes_home, mode: tmp_path)
monkeypatch.setattr("hermes_logging._add_rotating_handler", lambda *args, **kwargs: None)
monkeypatch.setattr("gateway.run.GatewayRunner", _CleanExitRunner)
from gateway.run import start_gateway
# verbosity=1 triggers the code path that uses RedactingFormatter.
# Before the fix this raised NameError.
ok = await start_gateway(config=GatewayConfig(), replace=False, verbosity=1)
assert ok is True
@pytest.mark.asyncio
async def test_start_gateway_replace_force_uses_terminate_pid(monkeypatch, tmp_path):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))

View file

@ -1,3 +1,4 @@
import asyncio
import os
from gateway.config import Platform
@ -130,3 +131,99 @@ def test_set_session_env_handles_missing_optional_fields():
assert get_session_env("HERMES_SESSION_THREAD_ID") == ""
runner._clear_session_env(tokens)
# ---------------------------------------------------------------------------
# SESSION_KEY contextvars tests
# ---------------------------------------------------------------------------
def test_session_key_set_via_contextvars(monkeypatch):
"""set_session_vars should set HERMES_SESSION_KEY via contextvars."""
monkeypatch.delenv("HERMES_SESSION_KEY", raising=False)
tokens = set_session_vars(
platform="telegram",
chat_id="-1001",
session_key="tg:-1001:17585",
)
assert get_session_env("HERMES_SESSION_KEY") == "tg:-1001:17585"
clear_session_vars(tokens)
assert get_session_env("HERMES_SESSION_KEY") == ""
def test_session_key_falls_back_to_os_environ(monkeypatch):
"""get_session_env for SESSION_KEY should fall back to os.environ."""
monkeypatch.setenv("HERMES_SESSION_KEY", "env-session-123")
# No contextvar set — should read from os.environ
assert get_session_env("HERMES_SESSION_KEY") == "env-session-123"
# Set contextvar — should prefer it
tokens = set_session_vars(session_key="ctx-session-456")
assert get_session_env("HERMES_SESSION_KEY") == "ctx-session-456"
# Restore — should fall back to os.environ
clear_session_vars(tokens)
assert get_session_env("HERMES_SESSION_KEY") == "env-session-123"
def test_set_session_env_includes_session_key():
"""_set_session_env should propagate session_key from SessionContext."""
runner = object.__new__(GatewayRunner)
source = SessionSource(
platform=Platform.TELEGRAM,
chat_id="-1001",
chat_name="Group",
chat_type="group",
thread_id="17585",
)
context = SessionContext(
source=source,
connected_platforms=[],
home_channels={},
session_key="tg:-1001:17585",
)
tokens = runner._set_session_env(context)
assert get_session_env("HERMES_SESSION_KEY") == "tg:-1001:17585"
runner._clear_session_env(tokens)
assert get_session_env("HERMES_SESSION_KEY") == ""
def test_session_key_no_race_condition_with_contextvars(monkeypatch):
"""Prove contextvars isolates SESSION_KEY across concurrent async tasks.
Two tasks set different session keys. With contextvars each task
reads back its own value. With os.environ the second task would
overwrite the first (the old bug).
"""
monkeypatch.delenv("HERMES_SESSION_KEY", raising=False)
results = {}
async def handler(key: str, delay: float):
tokens = set_session_vars(session_key=key)
try:
await asyncio.sleep(delay)
read_back = get_session_env("HERMES_SESSION_KEY")
results[key] = read_back
finally:
clear_session_vars(tokens)
async def run():
task_a = asyncio.create_task(handler("session-A", 0.15))
await asyncio.sleep(0.05)
task_b = asyncio.create_task(handler("session-B", 0.05))
await asyncio.gather(task_a, task_b)
asyncio.run(run())
# Both tasks must read back their own session key
assert results["session-A"] == "session-A", (
f"Session A got '{results['session-A']}' instead of 'session-A' — race condition!"
)
assert results["session-B"] == "session-B", (
f"Session B got '{results['session-B']}' instead of 'session-B' — race condition!"
)

View file

@ -104,6 +104,34 @@ class TestGatewayRuntimeStatus:
assert payload["platforms"]["telegram"]["error_code"] == "telegram_polling_conflict"
assert payload["platforms"]["telegram"]["error_message"] == "another poller is active"
def test_write_runtime_status_explicit_none_clears_stale_fields(self, tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
status.write_runtime_status(
gateway_state="startup_failed",
exit_reason="stale error",
platform="discord",
platform_state="fatal",
error_code="discord_timeout",
error_message="stale platform error",
)
status.write_runtime_status(
gateway_state="running",
exit_reason=None,
platform="discord",
platform_state="connected",
error_code=None,
error_message=None,
)
payload = status.read_runtime_status()
assert payload["gateway_state"] == "running"
assert payload["exit_reason"] is None
assert payload["platforms"]["discord"]["state"] == "connected"
assert payload["platforms"]["discord"]["error_code"] is None
assert payload["platforms"]["discord"]["error_message"] is None
class TestTerminatePid:
def test_force_uses_taskkill_on_windows(self, monkeypatch):

View file

@ -505,3 +505,81 @@ class TestSegmentBreakOnToolBoundary:
assert len(sent_texts) == 3
assert sent_texts[0].startswith(prefix)
assert sum(len(t) for t in sent_texts[1:]) == len(tail)
class TestInterimCommentaryMessages:
@pytest.mark.asyncio
async def test_commentary_message_stays_separate_from_final_stream(self):
adapter = MagicMock()
adapter.send = AsyncMock(side_effect=[
SimpleNamespace(success=True, message_id="msg_1"),
SimpleNamespace(success=True, message_id="msg_2"),
])
adapter.edit_message = AsyncMock(return_value=SimpleNamespace(success=True))
adapter.MAX_MESSAGE_LENGTH = 4096
consumer = GatewayStreamConsumer(
adapter,
"chat_123",
StreamConsumerConfig(edit_interval=0.01, buffer_threshold=5),
)
consumer.on_commentary("I'll inspect the repository first.")
consumer.on_delta("Done.")
consumer.finish()
await consumer.run()
sent_texts = [call[1]["content"] for call in adapter.send.call_args_list]
assert sent_texts == ["I'll inspect the repository first.", "Done."]
assert consumer.final_response_sent is True
@pytest.mark.asyncio
async def test_failed_final_send_does_not_mark_final_response_sent(self):
adapter = MagicMock()
adapter.send = AsyncMock(return_value=SimpleNamespace(success=False, message_id=None))
adapter.edit_message = AsyncMock(return_value=SimpleNamespace(success=True))
adapter.MAX_MESSAGE_LENGTH = 4096
consumer = GatewayStreamConsumer(
adapter,
"chat_123",
StreamConsumerConfig(edit_interval=0.01, buffer_threshold=5),
)
consumer.on_delta("Done.")
consumer.finish()
await consumer.run()
assert consumer.final_response_sent is False
assert consumer.already_sent is False
@pytest.mark.asyncio
async def test_success_without_message_id_marks_visible_and_sends_only_tail(self):
adapter = MagicMock()
adapter.send = AsyncMock(side_effect=[
SimpleNamespace(success=True, message_id=None),
SimpleNamespace(success=True, message_id=None),
])
adapter.edit_message = AsyncMock(return_value=SimpleNamespace(success=True))
adapter.MAX_MESSAGE_LENGTH = 4096
consumer = GatewayStreamConsumer(
adapter,
"chat_123",
StreamConsumerConfig(edit_interval=0.01, buffer_threshold=5, cursor=""),
)
consumer.on_delta("Hello")
task = asyncio.create_task(consumer.run())
await asyncio.sleep(0.08)
consumer.on_delta(" world")
await asyncio.sleep(0.08)
consumer.finish()
await task
sent_texts = [call[1]["content"] for call in adapter.send.call_args_list]
assert sent_texts == ["Hello ▉", "world"]
assert consumer.already_sent is True
assert consumer.final_response_sent is True

View file

@ -403,6 +403,56 @@ class TestWatchUpdateProgress:
# Should not crash; legacy notification handles this case
@pytest.mark.asyncio
async def test_prompt_forwarded_only_once(self, tmp_path):
"""Regression: prompt must not be re-sent on every poll cycle.
Before the fix, the watcher never deleted .update_prompt.json after
forwarding, causing the same prompt to be sent every poll_interval.
"""
runner = _make_runner()
hermes_home = tmp_path / "hermes"
hermes_home.mkdir()
pending = {"platform": "telegram", "chat_id": "111", "user_id": "222",
"session_key": "agent:main:telegram:dm:111"}
(hermes_home / ".update_pending.json").write_text(json.dumps(pending))
(hermes_home / ".update_output.txt").write_text("")
mock_adapter = AsyncMock()
runner.adapters = {Platform.TELEGRAM: mock_adapter}
# Write the prompt file up front (before the watcher starts).
# The watcher should forward it exactly once, then delete it.
prompt = {"prompt": "Would you like to configure new options now? Y/n",
"default": "n", "id": "dup-test"}
(hermes_home / ".update_prompt.json").write_text(json.dumps(prompt))
async def finish_after_polls():
# Wait long enough for multiple poll cycles to occur, then
# simulate a response + completion.
await asyncio.sleep(1.0)
(hermes_home / ".update_response").write_text("n")
await asyncio.sleep(0.3)
(hermes_home / ".update_exit_code").write_text("0")
with patch("gateway.run._hermes_home", hermes_home):
task = asyncio.create_task(finish_after_polls())
await runner._watch_update_progress(
poll_interval=0.1,
stream_interval=0.2,
timeout=10.0,
)
await task
# Count how many times the prompt text was sent
all_sent = [str(c) for c in mock_adapter.send.call_args_list]
prompt_sends = [s for s in all_sent if "configure new options" in s]
assert len(prompt_sends) == 1, (
f"Prompt was sent {len(prompt_sends)} times (expected 1). "
f"All sends: {all_sent}"
)
# ---------------------------------------------------------------------------
# Message interception for update prompts

View file

@ -63,7 +63,7 @@ class TestVerboseCommand:
@pytest.mark.asyncio
async def test_enabled_cycles_mode(self, tmp_path, monkeypatch):
"""When enabled, /verbose cycles tool_progress mode."""
"""When enabled, /verbose cycles tool_progress mode per-platform."""
hermes_home = tmp_path / "hermes"
hermes_home.mkdir()
config_path = hermes_home / "config.yaml"
@ -79,10 +79,11 @@ class TestVerboseCommand:
# all -> verbose
assert "VERBOSE" in result
assert "telegram" in result.lower() # per-platform feedback
# Verify config was saved
# Verify config was saved to display.platforms.telegram
saved = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert saved["display"]["tool_progress"] == "verbose"
assert saved["display"]["platforms"]["telegram"]["tool_progress"] == "verbose"
@pytest.mark.asyncio
async def test_cycles_through_all_modes(self, tmp_path, monkeypatch):
@ -103,8 +104,9 @@ class TestVerboseCommand:
for mode in expected:
result = await runner._handle_verbose_command(_make_event())
saved = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert saved["display"]["tool_progress"] == mode, \
f"Expected {mode}, got {saved['display']['tool_progress']}"
actual = saved["display"]["platforms"]["telegram"]["tool_progress"]
assert actual == mode, \
f"Expected {mode}, got {actual}"
@pytest.mark.asyncio
async def test_defaults_to_all_when_no_tool_progress_set(self, tmp_path, monkeypatch):
@ -122,10 +124,45 @@ class TestVerboseCommand:
runner = _make_runner()
result = await runner._handle_verbose_command(_make_event())
# default "all" -> verbose
# Telegram default is "all" (high tier) → cycles to verbose
assert "VERBOSE" in result
saved = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert saved["display"]["tool_progress"] == "verbose"
assert saved["display"]["platforms"]["telegram"]["tool_progress"] == "verbose"
@pytest.mark.asyncio
async def test_per_platform_isolation(self, tmp_path, monkeypatch):
"""Cycling /verbose on Telegram doesn't change Slack's setting.
Without a global tool_progress, each platform uses its built-in
default: Telegram = 'all' (high tier), Slack = 'new' (medium tier).
"""
hermes_home = tmp_path / "hermes"
hermes_home.mkdir()
config_path = hermes_home / "config.yaml"
# No global tool_progress → built-in platform defaults apply
config_path.write_text(
"display:\n tool_progress_command: true\n",
encoding="utf-8",
)
monkeypatch.setattr(gateway_run, "_hermes_home", hermes_home)
runner = _make_runner()
# Cycle on Telegram
await runner._handle_verbose_command(
_make_event(platform=Platform.TELEGRAM)
)
# Cycle on Slack
await runner._handle_verbose_command(
_make_event(platform=Platform.SLACK)
)
saved = yaml.safe_load(config_path.read_text(encoding="utf-8"))
platforms = saved["display"]["platforms"]
# Telegram: all -> verbose (high tier default = all)
assert platforms["telegram"]["tool_progress"] == "verbose"
# Slack: new -> all (medium tier default = new, cycle to all)
assert platforms["slack"]["tool_progress"] == "all"
@pytest.mark.asyncio
async def test_no_config_file_returns_disabled(self, tmp_path, monkeypatch):

View file

@ -0,0 +1,185 @@
"""Tests for the WeCom callback-mode adapter."""
import asyncio
from xml.etree import ElementTree as ET
import pytest
from gateway.config import PlatformConfig
from gateway.platforms.wecom_callback import WecomCallbackAdapter
from gateway.platforms.wecom_crypto import WXBizMsgCrypt
def _app(name="test-app", corp_id="ww1234567890", agent_id="1000002"):
return {
"name": name,
"corp_id": corp_id,
"corp_secret": "test-secret",
"agent_id": agent_id,
"token": "test-callback-token",
"encoding_aes_key": "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFG",
}
def _config(apps=None):
return PlatformConfig(
enabled=True,
extra={"mode": "callback", "host": "127.0.0.1", "port": 0, "apps": apps or [_app()]},
)
class TestWecomCrypto:
def test_roundtrip_encrypt_decrypt(self):
app = _app()
crypt = WXBizMsgCrypt(app["token"], app["encoding_aes_key"], app["corp_id"])
encrypted_xml = crypt.encrypt(
"<xml><Content>hello</Content></xml>", nonce="nonce123", timestamp="123456",
)
root = ET.fromstring(encrypted_xml)
decrypted = crypt.decrypt(
root.findtext("MsgSignature", default=""),
root.findtext("TimeStamp", default=""),
root.findtext("Nonce", default=""),
root.findtext("Encrypt", default=""),
)
assert b"<Content>hello</Content>" in decrypted
def test_signature_mismatch_raises(self):
app = _app()
crypt = WXBizMsgCrypt(app["token"], app["encoding_aes_key"], app["corp_id"])
encrypted_xml = crypt.encrypt("<xml/>", nonce="n", timestamp="1")
root = ET.fromstring(encrypted_xml)
from gateway.platforms.wecom_crypto import SignatureError
with pytest.raises(SignatureError):
crypt.decrypt("bad-sig", "1", "n", root.findtext("Encrypt", default=""))
class TestWecomCallbackEventConstruction:
def test_build_event_extracts_text_message(self):
adapter = WecomCallbackAdapter(_config())
xml_text = """
<xml>
<ToUserName>ww1234567890</ToUserName>
<FromUserName>zhangsan</FromUserName>
<CreateTime>1710000000</CreateTime>
<MsgType>text</MsgType>
<Content>\u4f60\u597d</Content>
<MsgId>123456789</MsgId>
</xml>
"""
event = adapter._build_event(_app(), xml_text)
assert event is not None
assert event.source is not None
assert event.source.user_id == "zhangsan"
assert event.source.chat_id == "ww1234567890:zhangsan"
assert event.message_id == "123456789"
assert event.text == "\u4f60\u597d"
def test_build_event_returns_none_for_subscribe(self):
adapter = WecomCallbackAdapter(_config())
xml_text = """
<xml>
<ToUserName>ww1234567890</ToUserName>
<FromUserName>zhangsan</FromUserName>
<CreateTime>1710000000</CreateTime>
<MsgType>event</MsgType>
<Event>subscribe</Event>
</xml>
"""
event = adapter._build_event(_app(), xml_text)
assert event is None
class TestWecomCallbackRouting:
def test_user_app_key_scopes_across_corps(self):
adapter = WecomCallbackAdapter(_config())
assert adapter._user_app_key("corpA", "alice") == "corpA:alice"
assert adapter._user_app_key("corpB", "alice") == "corpB:alice"
assert adapter._user_app_key("corpA", "alice") != adapter._user_app_key("corpB", "alice")
@pytest.mark.asyncio
async def test_send_selects_correct_app_for_scoped_chat_id(self):
apps = [
_app(name="corp-a", corp_id="corpA", agent_id="1001"),
_app(name="corp-b", corp_id="corpB", agent_id="2002"),
]
adapter = WecomCallbackAdapter(_config(apps=apps))
adapter._user_app_map["corpB:alice"] = "corp-b"
adapter._access_tokens["corp-b"] = {"token": "tok-b", "expires_at": 9999999999}
calls = {}
class FakeResponse:
def json(self):
return {"errcode": 0, "msgid": "ok1"}
class FakeClient:
async def post(self, url, json):
calls["url"] = url
calls["json"] = json
return FakeResponse()
adapter._http_client = FakeClient()
result = await adapter.send("corpB:alice", "hello")
assert result.success is True
assert calls["json"]["touser"] == "alice"
assert calls["json"]["agentid"] == 2002
assert "tok-b" in calls["url"]
@pytest.mark.asyncio
async def test_send_falls_back_from_bare_user_id_when_unique(self):
apps = [_app(name="corp-a", corp_id="corpA", agent_id="1001")]
adapter = WecomCallbackAdapter(_config(apps=apps))
adapter._user_app_map["corpA:alice"] = "corp-a"
adapter._access_tokens["corp-a"] = {"token": "tok-a", "expires_at": 9999999999}
calls = {}
class FakeResponse:
def json(self):
return {"errcode": 0, "msgid": "ok2"}
class FakeClient:
async def post(self, url, json):
calls["url"] = url
calls["json"] = json
return FakeResponse()
adapter._http_client = FakeClient()
result = await adapter.send("alice", "hello")
assert result.success is True
assert calls["json"]["agentid"] == 1001
class TestWecomCallbackPollLoop:
@pytest.mark.asyncio
async def test_poll_loop_dispatches_handle_message(self, monkeypatch):
adapter = WecomCallbackAdapter(_config())
calls = []
async def fake_handle_message(event):
calls.append(event.text)
monkeypatch.setattr(adapter, "handle_message", fake_handle_message)
event = adapter._build_event(
_app(),
"""
<xml>
<ToUserName>ww1234567890</ToUserName>
<FromUserName>lisi</FromUserName>
<CreateTime>1710000000</CreateTime>
<MsgType>text</MsgType>
<Content>test</Content>
<MsgId>m2</MsgId>
</xml>
""",
)
task = asyncio.create_task(adapter._poll_loop())
await adapter._message_queue.put(event)
await asyncio.sleep(0.05)
task.cancel()
with pytest.raises(asyncio.CancelledError):
await task
assert calls == ["test"]

View file

@ -64,13 +64,44 @@ class TestWeixinFormatting:
class TestWeixinChunking:
def test_split_text_keeps_short_multiline_message_in_single_chunk(self):
def test_split_text_splits_short_chatty_replies_into_separate_bubbles(self):
adapter = _make_adapter()
content = adapter.format_message("第一行\n第二行\n第三行")
chunks = adapter._split_text(content)
assert chunks == ["第一行\n第二行\n第三行"]
assert chunks == ["第一行", "第二行", "第三行"]
def test_split_text_keeps_structured_table_block_together(self):
adapter = _make_adapter()
content = adapter.format_message(
"- Setting: Timeout\n Value: 30s\n- Setting: Retries\n Value: 3"
)
chunks = adapter._split_text(content)
assert chunks == ["- Setting: Timeout\n Value: 30s\n- Setting: Retries\n Value: 3"]
def test_split_text_keeps_four_line_structured_blocks_together(self):
adapter = _make_adapter()
content = adapter.format_message(
"今天结论:\n"
"- 留存下降 3%\n"
"- 转化上涨 8%\n"
"- 主要问题在首日激活"
)
chunks = adapter._split_text(content)
assert chunks == ["今天结论:\n- 留存下降 3%\n- 转化上涨 8%\n- 主要问题在首日激活"]
def test_split_text_keeps_heading_with_body_together(self):
adapter = _make_adapter()
content = adapter.format_message("## 结论\n这是正文")
chunks = adapter._split_text(content)
assert chunks == ["**结论**\n这是正文"]
def test_split_text_keeps_short_reformatted_table_in_single_chunk(self):
adapter = _make_adapter()

View file

@ -14,6 +14,7 @@ from hermes_cli.auth import (
PROVIDER_REGISTRY,
_read_codex_tokens,
_save_codex_tokens,
_write_codex_cli_tokens,
_import_codex_cli_tokens,
get_codex_auth_status,
get_provider_auth_state,
@ -161,7 +162,7 @@ def test_import_codex_cli_tokens_missing(tmp_path, monkeypatch):
def test_codex_tokens_not_written_to_shared_file(tmp_path, monkeypatch):
"""Verify Hermes never writes to ~/.codex/auth.json."""
"""Verify _save_codex_tokens writes only to Hermes auth store, not ~/.codex/."""
hermes_home = tmp_path / "hermes"
codex_home = tmp_path / "codex-cli"
hermes_home.mkdir(parents=True, exist_ok=True)
@ -173,7 +174,7 @@ def test_codex_tokens_not_written_to_shared_file(tmp_path, monkeypatch):
_save_codex_tokens({"access_token": "hermes-at", "refresh_token": "hermes-rt"})
# ~/.codex/auth.json should NOT exist
# ~/.codex/auth.json should NOT exist — _save_codex_tokens only touches Hermes store
assert not (codex_home / "auth.json").exists()
# Hermes auth store should have the tokens
@ -181,6 +182,98 @@ def test_codex_tokens_not_written_to_shared_file(tmp_path, monkeypatch):
assert data["tokens"]["access_token"] == "hermes-at"
def test_write_codex_cli_tokens_creates_file(tmp_path, monkeypatch):
"""_write_codex_cli_tokens creates ~/.codex/auth.json with refreshed tokens."""
codex_home = tmp_path / "codex-cli"
monkeypatch.setenv("CODEX_HOME", str(codex_home))
_write_codex_cli_tokens("new-access", "new-refresh", last_refresh="2026-04-12T00:00:00Z")
auth_path = codex_home / "auth.json"
assert auth_path.exists()
data = json.loads(auth_path.read_text())
assert data["tokens"]["access_token"] == "new-access"
assert data["tokens"]["refresh_token"] == "new-refresh"
assert data["last_refresh"] == "2026-04-12T00:00:00Z"
# Verify file permissions are restricted
assert (auth_path.stat().st_mode & 0o777) == 0o600
def test_write_codex_cli_tokens_preserves_existing(tmp_path, monkeypatch):
"""_write_codex_cli_tokens preserves extra fields in existing auth.json."""
codex_home = tmp_path / "codex-cli"
codex_home.mkdir(parents=True, exist_ok=True)
monkeypatch.setenv("CODEX_HOME", str(codex_home))
existing = {
"tokens": {
"access_token": "old-access",
"refresh_token": "old-refresh",
"extra_field": "preserved",
},
"last_refresh": "2026-01-01T00:00:00Z",
"custom_key": "keep_me",
}
(codex_home / "auth.json").write_text(json.dumps(existing))
_write_codex_cli_tokens("updated-access", "updated-refresh")
data = json.loads((codex_home / "auth.json").read_text())
assert data["tokens"]["access_token"] == "updated-access"
assert data["tokens"]["refresh_token"] == "updated-refresh"
assert data["tokens"]["extra_field"] == "preserved"
assert data["custom_key"] == "keep_me"
# last_refresh not updated since we didn't pass it
assert data["last_refresh"] == "2026-01-01T00:00:00Z"
def test_write_codex_cli_tokens_handles_missing_dir(tmp_path, monkeypatch):
"""_write_codex_cli_tokens creates parent directories if missing."""
codex_home = tmp_path / "does" / "not" / "exist"
monkeypatch.setenv("CODEX_HOME", str(codex_home))
_write_codex_cli_tokens("at", "rt")
assert (codex_home / "auth.json").exists()
data = json.loads((codex_home / "auth.json").read_text())
assert data["tokens"]["access_token"] == "at"
def test_refresh_codex_auth_tokens_writes_back_to_cli(tmp_path, monkeypatch):
"""After refreshing, _refresh_codex_auth_tokens writes back to ~/.codex/auth.json."""
from hermes_cli.auth import _refresh_codex_auth_tokens
hermes_home = tmp_path / "hermes"
codex_home = tmp_path / "codex-cli"
hermes_home.mkdir(parents=True, exist_ok=True)
codex_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "auth.json").write_text(json.dumps({"version": 1, "providers": {}}))
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setenv("CODEX_HOME", str(codex_home))
# Write initial CLI tokens
(codex_home / "auth.json").write_text(json.dumps({
"tokens": {"access_token": "old-at", "refresh_token": "old-rt"},
}))
# Mock the pure refresh to return new tokens
monkeypatch.setattr("hermes_cli.auth.refresh_codex_oauth_pure", lambda *a, **kw: {
"access_token": "refreshed-at",
"refresh_token": "refreshed-rt",
"last_refresh": "2026-04-12T01:00:00Z",
})
_refresh_codex_auth_tokens(
{"access_token": "old-at", "refresh_token": "old-rt"},
timeout_seconds=10,
)
# Verify CLI file was updated
cli_data = json.loads((codex_home / "auth.json").read_text())
assert cli_data["tokens"]["access_token"] == "refreshed-at"
assert cli_data["tokens"]["refresh_token"] == "refreshed-rt"
def test_resolve_returns_hermes_auth_store_source(tmp_path, monkeypatch):
hermes_home = tmp_path / "hermes"
_setup_hermes_auth(hermes_home)

View file

@ -0,0 +1,897 @@
"""Tests for hermes backup and import commands."""
import os
import zipfile
from argparse import Namespace
from pathlib import Path
from unittest.mock import patch
import pytest
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_hermes_tree(root: Path) -> None:
"""Create a realistic ~/.hermes directory structure for testing."""
(root / "config.yaml").write_text("model:\n provider: openrouter\n")
(root / ".env").write_text("OPENROUTER_API_KEY=sk-test-123\n")
(root / "memory_store.db").write_bytes(b"fake-sqlite")
(root / "hermes_state.db").write_bytes(b"fake-state")
# Sessions
(root / "sessions").mkdir(exist_ok=True)
(root / "sessions" / "abc123.json").write_text("{}")
# Skills
(root / "skills").mkdir(exist_ok=True)
(root / "skills" / "my-skill").mkdir()
(root / "skills" / "my-skill" / "SKILL.md").write_text("# My Skill\n")
# Skins
(root / "skins").mkdir(exist_ok=True)
(root / "skins" / "cyber.yaml").write_text("name: cyber\n")
# Cron
(root / "cron").mkdir(exist_ok=True)
(root / "cron" / "jobs.json").write_text("[]")
# Memories
(root / "memories").mkdir(exist_ok=True)
(root / "memories" / "notes.json").write_text("{}")
# Profiles
(root / "profiles").mkdir(exist_ok=True)
(root / "profiles" / "coder").mkdir()
(root / "profiles" / "coder" / "config.yaml").write_text("model:\n provider: anthropic\n")
(root / "profiles" / "coder" / ".env").write_text("ANTHROPIC_API_KEY=sk-ant-123\n")
# hermes-agent repo (should be EXCLUDED)
(root / "hermes-agent").mkdir(exist_ok=True)
(root / "hermes-agent" / "run_agent.py").write_text("# big file\n")
(root / "hermes-agent" / ".git").mkdir()
(root / "hermes-agent" / ".git" / "HEAD").write_text("ref: refs/heads/main\n")
# __pycache__ (should be EXCLUDED)
(root / "plugins").mkdir(exist_ok=True)
(root / "plugins" / "__pycache__").mkdir()
(root / "plugins" / "__pycache__" / "mod.cpython-312.pyc").write_bytes(b"\x00")
# PID files (should be EXCLUDED)
(root / "gateway.pid").write_text("12345")
# Logs (should be included)
(root / "logs").mkdir(exist_ok=True)
(root / "logs" / "agent.log").write_text("log line\n")
# ---------------------------------------------------------------------------
# _should_exclude tests
# ---------------------------------------------------------------------------
class TestShouldExclude:
def test_excludes_hermes_agent(self):
from hermes_cli.backup import _should_exclude
assert _should_exclude(Path("hermes-agent/run_agent.py"))
assert _should_exclude(Path("hermes-agent/.git/HEAD"))
def test_excludes_pycache(self):
from hermes_cli.backup import _should_exclude
assert _should_exclude(Path("plugins/__pycache__/mod.cpython-312.pyc"))
def test_excludes_pyc_files(self):
from hermes_cli.backup import _should_exclude
assert _should_exclude(Path("some/module.pyc"))
def test_excludes_pid_files(self):
from hermes_cli.backup import _should_exclude
assert _should_exclude(Path("gateway.pid"))
assert _should_exclude(Path("cron.pid"))
def test_includes_config(self):
from hermes_cli.backup import _should_exclude
assert not _should_exclude(Path("config.yaml"))
def test_includes_env(self):
from hermes_cli.backup import _should_exclude
assert not _should_exclude(Path(".env"))
def test_includes_skills(self):
from hermes_cli.backup import _should_exclude
assert not _should_exclude(Path("skills/my-skill/SKILL.md"))
def test_includes_profiles(self):
from hermes_cli.backup import _should_exclude
assert not _should_exclude(Path("profiles/coder/config.yaml"))
def test_includes_sessions(self):
from hermes_cli.backup import _should_exclude
assert not _should_exclude(Path("sessions/abc.json"))
def test_includes_logs(self):
from hermes_cli.backup import _should_exclude
assert not _should_exclude(Path("logs/agent.log"))
# ---------------------------------------------------------------------------
# Backup tests
# ---------------------------------------------------------------------------
class TestBackup:
def test_creates_zip(self, tmp_path, monkeypatch):
"""Backup creates a valid zip containing expected files."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
_make_hermes_tree(hermes_home)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
# get_default_hermes_root needs this
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out_zip = tmp_path / "backup.zip"
args = Namespace(output=str(out_zip))
from hermes_cli.backup import run_backup
run_backup(args)
assert out_zip.exists()
with zipfile.ZipFile(out_zip, "r") as zf:
names = zf.namelist()
# Config should be present
assert "config.yaml" in names
assert ".env" in names
# Skills
assert "skills/my-skill/SKILL.md" in names
# Profiles
assert "profiles/coder/config.yaml" in names
assert "profiles/coder/.env" in names
# Sessions
assert "sessions/abc123.json" in names
# Logs
assert "logs/agent.log" in names
# Skins
assert "skins/cyber.yaml" in names
def test_excludes_hermes_agent(self, tmp_path, monkeypatch):
"""Backup does NOT include hermes-agent/ directory."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
_make_hermes_tree(hermes_home)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out_zip = tmp_path / "backup.zip"
args = Namespace(output=str(out_zip))
from hermes_cli.backup import run_backup
run_backup(args)
with zipfile.ZipFile(out_zip, "r") as zf:
names = zf.namelist()
agent_files = [n for n in names if "hermes-agent" in n]
assert agent_files == [], f"hermes-agent files leaked into backup: {agent_files}"
def test_excludes_pycache(self, tmp_path, monkeypatch):
"""Backup does NOT include __pycache__ dirs."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
_make_hermes_tree(hermes_home)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out_zip = tmp_path / "backup.zip"
args = Namespace(output=str(out_zip))
from hermes_cli.backup import run_backup
run_backup(args)
with zipfile.ZipFile(out_zip, "r") as zf:
names = zf.namelist()
pycache_files = [n for n in names if "__pycache__" in n]
assert pycache_files == []
def test_excludes_pid_files(self, tmp_path, monkeypatch):
"""Backup does NOT include PID files."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
_make_hermes_tree(hermes_home)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out_zip = tmp_path / "backup.zip"
args = Namespace(output=str(out_zip))
from hermes_cli.backup import run_backup
run_backup(args)
with zipfile.ZipFile(out_zip, "r") as zf:
names = zf.namelist()
pid_files = [n for n in names if n.endswith(".pid")]
assert pid_files == []
def test_default_output_path(self, tmp_path, monkeypatch):
"""When no output path given, zip goes to ~/hermes-backup-*.zip."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / "config.yaml").write_text("model: test\n")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
args = Namespace(output=None)
from hermes_cli.backup import run_backup
run_backup(args)
# Should exist in home dir
zips = list(tmp_path.glob("hermes-backup-*.zip"))
assert len(zips) == 1
# ---------------------------------------------------------------------------
# Import tests
# ---------------------------------------------------------------------------
class TestImport:
def _make_backup_zip(self, zip_path: Path, files: dict[str, str | bytes]) -> None:
"""Create a test zip with given files."""
with zipfile.ZipFile(zip_path, "w") as zf:
for name, content in files.items():
if isinstance(content, bytes):
zf.writestr(name, content)
else:
zf.writestr(name, content)
def test_restores_files(self, tmp_path, monkeypatch):
"""Import extracts files into hermes home."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {
"config.yaml": "model:\n provider: openrouter\n",
".env": "OPENROUTER_API_KEY=sk-test\n",
"skills/my-skill/SKILL.md": "# My Skill\n",
"profiles/coder/config.yaml": "model:\n provider: anthropic\n",
})
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
run_import(args)
assert (hermes_home / "config.yaml").read_text() == "model:\n provider: openrouter\n"
assert (hermes_home / ".env").read_text() == "OPENROUTER_API_KEY=sk-test\n"
assert (hermes_home / "skills" / "my-skill" / "SKILL.md").read_text() == "# My Skill\n"
assert (hermes_home / "profiles" / "coder" / "config.yaml").exists()
def test_strips_hermes_prefix(self, tmp_path, monkeypatch):
"""Import strips .hermes/ prefix if all entries share it."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {
".hermes/config.yaml": "model: test\n",
".hermes/skills/a/SKILL.md": "# A\n",
})
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
run_import(args)
assert (hermes_home / "config.yaml").read_text() == "model: test\n"
assert (hermes_home / "skills" / "a" / "SKILL.md").read_text() == "# A\n"
def test_rejects_empty_zip(self, tmp_path, monkeypatch):
"""Import rejects an empty zip."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "empty.zip"
with zipfile.ZipFile(zip_path, "w"):
pass # empty
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
with pytest.raises(SystemExit):
run_import(args)
def test_rejects_non_hermes_zip(self, tmp_path, monkeypatch):
"""Import rejects a zip that doesn't look like a hermes backup."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "random.zip"
self._make_backup_zip(zip_path, {
"some/random/file.txt": "hello",
"another/thing.json": "{}",
})
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
with pytest.raises(SystemExit):
run_import(args)
def test_blocks_path_traversal(self, tmp_path, monkeypatch):
"""Import blocks zip entries with path traversal."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "evil.zip"
# Include a marker file so validation passes
self._make_backup_zip(zip_path, {
"config.yaml": "model: test\n",
"../../etc/passwd": "root:x:0:0\n",
})
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
run_import(args)
# config.yaml should be restored
assert (hermes_home / "config.yaml").exists()
# traversal file should NOT exist outside hermes home
assert not (tmp_path / "etc" / "passwd").exists()
def test_confirmation_prompt_abort(self, tmp_path, monkeypatch):
"""Import aborts when user says no to confirmation."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
# Pre-existing config triggers the confirmation
(hermes_home / "config.yaml").write_text("existing: true\n")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {
"config.yaml": "model: restored\n",
})
args = Namespace(zipfile=str(zip_path), force=False)
from hermes_cli.backup import run_import
with patch("builtins.input", return_value="n"):
run_import(args)
# Original config should be unchanged
assert (hermes_home / "config.yaml").read_text() == "existing: true\n"
def test_force_skips_confirmation(self, tmp_path, monkeypatch):
"""Import with --force skips confirmation and overwrites."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / "config.yaml").write_text("existing: true\n")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {
"config.yaml": "model: restored\n",
})
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
run_import(args)
assert (hermes_home / "config.yaml").read_text() == "model: restored\n"
def test_missing_file_exits(self, tmp_path, monkeypatch):
"""Import exits with error for nonexistent file."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
args = Namespace(zipfile=str(tmp_path / "nonexistent.zip"), force=True)
from hermes_cli.backup import run_import
with pytest.raises(SystemExit):
run_import(args)
# ---------------------------------------------------------------------------
# Round-trip test
# ---------------------------------------------------------------------------
class TestRoundTrip:
def test_backup_then_import(self, tmp_path, monkeypatch):
"""Full round-trip: backup -> import to a new location -> verify."""
# Source
src_home = tmp_path / "source" / ".hermes"
src_home.mkdir(parents=True)
_make_hermes_tree(src_home)
monkeypatch.setenv("HERMES_HOME", str(src_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path / "source")
# Backup
out_zip = tmp_path / "roundtrip.zip"
from hermes_cli.backup import run_backup, run_import
run_backup(Namespace(output=str(out_zip)))
assert out_zip.exists()
# Import into a different location
dst_home = tmp_path / "dest" / ".hermes"
dst_home.mkdir(parents=True)
monkeypatch.setenv("HERMES_HOME", str(dst_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path / "dest")
run_import(Namespace(zipfile=str(out_zip), force=True))
# Verify key files
assert (dst_home / "config.yaml").read_text() == "model:\n provider: openrouter\n"
assert (dst_home / ".env").read_text() == "OPENROUTER_API_KEY=sk-test-123\n"
assert (dst_home / "skills" / "my-skill" / "SKILL.md").exists()
assert (dst_home / "profiles" / "coder" / "config.yaml").exists()
assert (dst_home / "sessions" / "abc123.json").exists()
assert (dst_home / "logs" / "agent.log").exists()
# hermes-agent should NOT be present
assert not (dst_home / "hermes-agent").exists()
# __pycache__ should NOT be present
assert not (dst_home / "plugins" / "__pycache__").exists()
# PID files should NOT be present
assert not (dst_home / "gateway.pid").exists()
# ---------------------------------------------------------------------------
# Validate / detect-prefix unit tests
# ---------------------------------------------------------------------------
class TestFormatSize:
def test_bytes(self):
from hermes_cli.backup import _format_size
assert _format_size(512) == "512 B"
def test_kilobytes(self):
from hermes_cli.backup import _format_size
assert "KB" in _format_size(2048)
def test_megabytes(self):
from hermes_cli.backup import _format_size
assert "MB" in _format_size(5 * 1024 * 1024)
def test_gigabytes(self):
from hermes_cli.backup import _format_size
assert "GB" in _format_size(3 * 1024 ** 3)
def test_terabytes(self):
from hermes_cli.backup import _format_size
assert "TB" in _format_size(2 * 1024 ** 4)
class TestValidation:
def test_validate_with_config(self):
"""Zip with config.yaml passes validation."""
import io
from hermes_cli.backup import _validate_backup_zip
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w") as zf:
zf.writestr("config.yaml", "test")
buf.seek(0)
with zipfile.ZipFile(buf, "r") as zf:
ok, reason = _validate_backup_zip(zf)
assert ok
def test_validate_with_env(self):
"""Zip with .env passes validation."""
import io
from hermes_cli.backup import _validate_backup_zip
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w") as zf:
zf.writestr(".env", "KEY=val")
buf.seek(0)
with zipfile.ZipFile(buf, "r") as zf:
ok, reason = _validate_backup_zip(zf)
assert ok
def test_validate_rejects_random(self):
"""Zip without hermes markers fails validation."""
import io
from hermes_cli.backup import _validate_backup_zip
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w") as zf:
zf.writestr("random/file.txt", "hello")
buf.seek(0)
with zipfile.ZipFile(buf, "r") as zf:
ok, reason = _validate_backup_zip(zf)
assert not ok
def test_detect_prefix_hermes(self):
"""Detects .hermes/ prefix wrapping all entries."""
import io
from hermes_cli.backup import _detect_prefix
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w") as zf:
zf.writestr(".hermes/config.yaml", "test")
zf.writestr(".hermes/skills/a/SKILL.md", "skill")
buf.seek(0)
with zipfile.ZipFile(buf, "r") as zf:
assert _detect_prefix(zf) == ".hermes/"
def test_detect_prefix_none(self):
"""No prefix when entries are at root."""
import io
from hermes_cli.backup import _detect_prefix
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w") as zf:
zf.writestr("config.yaml", "test")
zf.writestr("skills/a/SKILL.md", "skill")
buf.seek(0)
with zipfile.ZipFile(buf, "r") as zf:
assert _detect_prefix(zf) == ""
def test_detect_prefix_only_dirs(self):
"""Prefix detection returns empty for zip with only directory entries."""
import io
from hermes_cli.backup import _detect_prefix
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w") as zf:
# Only directory entries (trailing slash)
zf.writestr(".hermes/", "")
zf.writestr(".hermes/skills/", "")
buf.seek(0)
with zipfile.ZipFile(buf, "r") as zf:
assert _detect_prefix(zf) == ""
# ---------------------------------------------------------------------------
# Edge case tests for uncovered paths
# ---------------------------------------------------------------------------
class TestBackupEdgeCases:
def test_nonexistent_hermes_home(self, tmp_path, monkeypatch):
"""Backup exits when hermes home doesn't exist."""
fake_home = tmp_path / "nonexistent" / ".hermes"
monkeypatch.setenv("HERMES_HOME", str(fake_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path / "nonexistent")
args = Namespace(output=str(tmp_path / "out.zip"))
from hermes_cli.backup import run_backup
with pytest.raises(SystemExit):
run_backup(args)
def test_output_is_directory(self, tmp_path, monkeypatch):
"""When output path is a directory, zip is created inside it."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / "config.yaml").write_text("model: test\n")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out_dir = tmp_path / "backups"
out_dir.mkdir()
args = Namespace(output=str(out_dir))
from hermes_cli.backup import run_backup
run_backup(args)
zips = list(out_dir.glob("hermes-backup-*.zip"))
assert len(zips) == 1
def test_output_without_zip_suffix(self, tmp_path, monkeypatch):
"""Output path without .zip gets suffix appended."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / "config.yaml").write_text("model: test\n")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out_path = tmp_path / "mybackup.tar"
args = Namespace(output=str(out_path))
from hermes_cli.backup import run_backup
run_backup(args)
# Should have .tar.zip suffix
assert (tmp_path / "mybackup.tar.zip").exists()
def test_empty_hermes_home(self, tmp_path, monkeypatch):
"""Backup handles empty hermes home (no files to back up)."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
# Only excluded dirs, no actual files
(hermes_home / "__pycache__").mkdir()
(hermes_home / "__pycache__" / "foo.pyc").write_bytes(b"\x00")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
args = Namespace(output=str(tmp_path / "out.zip"))
from hermes_cli.backup import run_backup
run_backup(args)
# No zip should be created
assert not (tmp_path / "out.zip").exists()
def test_permission_error_during_backup(self, tmp_path, monkeypatch):
"""Backup handles permission errors gracefully."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / "config.yaml").write_text("model: test\n")
# Create an unreadable file
bad_file = hermes_home / "secret.db"
bad_file.write_text("data")
bad_file.chmod(0o000)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
out_zip = tmp_path / "out.zip"
args = Namespace(output=str(out_zip))
from hermes_cli.backup import run_backup
try:
run_backup(args)
finally:
# Restore permissions for cleanup
bad_file.chmod(0o644)
# Zip should still be created with the readable files
assert out_zip.exists()
def test_skips_output_zip_inside_hermes(self, tmp_path, monkeypatch):
"""Backup skips its own output zip if it's inside hermes root."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / "config.yaml").write_text("model: test\n")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
# Output inside hermes home
out_zip = hermes_home / "backup.zip"
args = Namespace(output=str(out_zip))
from hermes_cli.backup import run_backup
run_backup(args)
# The zip should exist but not contain itself
assert out_zip.exists()
with zipfile.ZipFile(out_zip, "r") as zf:
assert "backup.zip" not in zf.namelist()
class TestImportEdgeCases:
def _make_backup_zip(self, zip_path: Path, files: dict[str, str | bytes]) -> None:
with zipfile.ZipFile(zip_path, "w") as zf:
for name, content in files.items():
zf.writestr(name, content)
def test_not_a_zip(self, tmp_path, monkeypatch):
"""Import rejects a non-zip file."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
not_zip = tmp_path / "fake.zip"
not_zip.write_text("this is not a zip")
args = Namespace(zipfile=str(not_zip), force=True)
from hermes_cli.backup import run_import
with pytest.raises(SystemExit):
run_import(args)
def test_eof_during_confirmation(self, tmp_path, monkeypatch):
"""Import handles EOFError during confirmation prompt."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / "config.yaml").write_text("existing\n")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {"config.yaml": "new\n"})
args = Namespace(zipfile=str(zip_path), force=False)
from hermes_cli.backup import run_import
with patch("builtins.input", side_effect=EOFError):
with pytest.raises(SystemExit):
run_import(args)
def test_keyboard_interrupt_during_confirmation(self, tmp_path, monkeypatch):
"""Import handles KeyboardInterrupt during confirmation prompt."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / ".env").write_text("KEY=val\n")
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {"config.yaml": "new\n"})
args = Namespace(zipfile=str(zip_path), force=False)
from hermes_cli.backup import run_import
with patch("builtins.input", side_effect=KeyboardInterrupt):
with pytest.raises(SystemExit):
run_import(args)
def test_permission_error_during_import(self, tmp_path, monkeypatch):
"""Import handles permission errors during extraction."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
# Create a read-only directory so extraction fails
locked_dir = hermes_home / "locked"
locked_dir.mkdir()
locked_dir.chmod(0o555)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {
"config.yaml": "model: test\n",
"locked/secret.txt": "data",
})
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
try:
run_import(args)
finally:
locked_dir.chmod(0o755)
# config.yaml should still be restored despite the error
assert (hermes_home / "config.yaml").exists()
def test_progress_with_many_files(self, tmp_path, monkeypatch):
"""Import shows progress with 500+ files."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "big.zip"
files = {"config.yaml": "model: test\n"}
for i in range(600):
files[f"sessions/s{i:04d}.json"] = "{}"
self._make_backup_zip(zip_path, files)
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
run_import(args)
assert (hermes_home / "config.yaml").exists()
assert (hermes_home / "sessions" / "s0599.json").exists()
# ---------------------------------------------------------------------------
# Profile restoration tests
# ---------------------------------------------------------------------------
class TestProfileRestoration:
def _make_backup_zip(self, zip_path: Path, files: dict[str, str | bytes]) -> None:
with zipfile.ZipFile(zip_path, "w") as zf:
for name, content in files.items():
zf.writestr(name, content)
def test_import_creates_profile_wrappers(self, tmp_path, monkeypatch):
"""Import auto-creates wrapper scripts for restored profiles."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
# Mock the wrapper dir to be inside tmp_path
wrapper_dir = tmp_path / ".local" / "bin"
wrapper_dir.mkdir(parents=True)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {
"config.yaml": "model:\n provider: openrouter\n",
"profiles/coder/config.yaml": "model:\n provider: anthropic\n",
"profiles/coder/.env": "ANTHROPIC_API_KEY=sk-test\n",
"profiles/researcher/config.yaml": "model:\n provider: deepseek\n",
})
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
run_import(args)
# Profile directories should exist
assert (hermes_home / "profiles" / "coder" / "config.yaml").exists()
assert (hermes_home / "profiles" / "researcher" / "config.yaml").exists()
# Wrapper scripts should be created
assert (wrapper_dir / "coder").exists()
assert (wrapper_dir / "researcher").exists()
# Wrappers should contain the right content
coder_wrapper = (wrapper_dir / "coder").read_text()
assert "hermes -p coder" in coder_wrapper
def test_import_skips_profile_dirs_without_config(self, tmp_path, monkeypatch):
"""Import doesn't create wrappers for profile dirs without config."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
wrapper_dir = tmp_path / ".local" / "bin"
wrapper_dir.mkdir(parents=True)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {
"config.yaml": "model: test\n",
"profiles/valid/config.yaml": "model: test\n",
"profiles/empty/readme.txt": "nothing here\n",
})
args = Namespace(zipfile=str(zip_path), force=True)
from hermes_cli.backup import run_import
run_import(args)
# Only valid profile should get a wrapper
assert (wrapper_dir / "valid").exists()
assert not (wrapper_dir / "empty").exists()
def test_import_without_profiles_module(self, tmp_path, monkeypatch):
"""Import gracefully handles missing profiles module (fresh install)."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setattr(Path, "home", lambda: tmp_path)
zip_path = tmp_path / "backup.zip"
self._make_backup_zip(zip_path, {
"config.yaml": "model: test\n",
"profiles/coder/config.yaml": "model: test\n",
})
args = Namespace(zipfile=str(zip_path), force=True)
# Simulate profiles module not being available
import hermes_cli.backup as backup_mod
original_import = __builtins__.__import__ if hasattr(__builtins__, '__import__') else __import__
def fake_import(name, *a, **kw):
if name == "hermes_cli.profiles":
raise ImportError("no profiles module")
return original_import(name, *a, **kw)
from hermes_cli.backup import run_import
with patch("builtins.__import__", side_effect=fake_import):
run_import(args)
# Files should still be restored even if wrappers can't be created
assert (hermes_home / "profiles" / "coder" / "config.yaml").exists()

View file

@ -58,13 +58,13 @@ class TestFindOpenclawDirs:
def test_finds_legacy_dirs(self, tmp_path):
clawdbot = tmp_path / ".clawdbot"
clawdbot.mkdir()
moldbot = tmp_path / ".moldbot"
moldbot.mkdir()
moltbot = tmp_path / ".moltbot"
moltbot.mkdir()
with patch("pathlib.Path.home", return_value=tmp_path):
found = claw_mod._find_openclaw_dirs()
assert len(found) == 2
assert clawdbot in found
assert moldbot in found
assert moltbot in found
def test_returns_empty_when_none_exist(self, tmp_path):
with patch("pathlib.Path.home", return_value=tmp_path):
@ -297,7 +297,6 @@ class TestCmdMigrate:
patch.object(claw_mod, "_load_migration_module", return_value=fake_mod),
patch.object(claw_mod, "get_config_path", return_value=config_path),
patch.object(claw_mod, "prompt_yes_no", return_value=True),
patch.object(claw_mod, "_offer_source_archival"),
patch("sys.stdin", mock_stdin),
):
claw_mod._cmd_migrate(args)
@ -306,43 +305,8 @@ class TestCmdMigrate:
assert "Migration Results" in captured.out
assert "Migration complete!" in captured.out
def test_execute_offers_archival_on_success(self, tmp_path, capsys):
"""After successful migration, _offer_source_archival should be called."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
fake_mod = ModuleType("openclaw_to_hermes")
fake_mod.resolve_selected_options = MagicMock(return_value={"soul"})
fake_migrator = MagicMock()
fake_migrator.migrate.return_value = {
"summary": {"migrated": 3, "skipped": 0, "conflict": 0, "error": 0},
"items": [
{"kind": "soul", "status": "migrated", "destination": str(tmp_path / "SOUL.md")},
],
}
fake_mod.Migrator = MagicMock(return_value=fake_migrator)
args = Namespace(
source=str(openclaw_dir),
dry_run=False, preset="full", overwrite=False,
migrate_secrets=False, workspace_target=None,
skill_conflict="skip", yes=True,
)
with (
patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"),
patch.object(claw_mod, "_load_migration_module", return_value=fake_mod),
patch.object(claw_mod, "get_config_path", return_value=tmp_path / "config.yaml"),
patch.object(claw_mod, "save_config"),
patch.object(claw_mod, "load_config", return_value={}),
patch.object(claw_mod, "_offer_source_archival") as mock_archival,
):
claw_mod._cmd_migrate(args)
mock_archival.assert_called_once_with(openclaw_dir, True)
def test_dry_run_skips_archival(self, tmp_path, capsys):
"""Dry run should not offer archival."""
def test_dry_run_does_not_touch_source(self, tmp_path, capsys):
"""Dry run should not modify the source directory."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
@ -369,11 +333,10 @@ class TestCmdMigrate:
patch.object(claw_mod, "get_config_path", return_value=tmp_path / "config.yaml"),
patch.object(claw_mod, "save_config"),
patch.object(claw_mod, "load_config", return_value={}),
patch.object(claw_mod, "_offer_source_archival") as mock_archival,
):
claw_mod._cmd_migrate(args)
mock_archival.assert_not_called()
assert openclaw_dir.is_dir() # Source untouched
def test_execute_cancelled_by_user(self, tmp_path, capsys):
openclaw_dir = tmp_path / ".openclaw"
@ -506,73 +469,6 @@ class TestCmdMigrate:
assert call_kwargs["migrate_secrets"] is True
# ---------------------------------------------------------------------------
# _offer_source_archival
# ---------------------------------------------------------------------------
class TestOfferSourceArchival:
"""Test the post-migration archival offer."""
def test_archives_with_auto_yes(self, tmp_path, capsys):
source = tmp_path / ".openclaw"
source.mkdir()
(source / "workspace").mkdir()
(source / "workspace" / "todo.json").write_text("{}")
claw_mod._offer_source_archival(source, auto_yes=True)
captured = capsys.readouterr()
assert "Archived" in captured.out
assert not source.exists()
assert (tmp_path / ".openclaw.pre-migration").is_dir()
def test_skips_when_user_declines(self, tmp_path, capsys):
source = tmp_path / ".openclaw"
source.mkdir()
mock_stdin = MagicMock()
mock_stdin.isatty.return_value = True
with (
patch.object(claw_mod, "prompt_yes_no", return_value=False),
patch("sys.stdin", mock_stdin),
):
claw_mod._offer_source_archival(source, auto_yes=False)
captured = capsys.readouterr()
assert "Skipped" in captured.out
assert source.is_dir() # Still exists
def test_noop_when_source_missing(self, tmp_path, capsys):
claw_mod._offer_source_archival(tmp_path / "nonexistent", auto_yes=True)
captured = capsys.readouterr()
assert captured.out == "" # No output
def test_shows_state_files(self, tmp_path, capsys):
source = tmp_path / ".openclaw"
source.mkdir()
ws = source / "workspace"
ws.mkdir()
(ws / "todo.json").write_text("{}")
with patch.object(claw_mod, "prompt_yes_no", return_value=False):
claw_mod._offer_source_archival(source, auto_yes=False)
captured = capsys.readouterr()
assert "todo.json" in captured.out
def test_handles_archive_error(self, tmp_path, capsys):
source = tmp_path / ".openclaw"
source.mkdir()
with patch.object(claw_mod, "_archive_directory", side_effect=OSError("permission denied")):
claw_mod._offer_source_archival(source, auto_yes=True)
captured = capsys.readouterr()
assert "Could not archive" in captured.out
# ---------------------------------------------------------------------------
# _cmd_cleanup
# ---------------------------------------------------------------------------

View file

@ -0,0 +1,254 @@
"""Tests for the interactive CLI /model picker (provider → model drill-down)."""
from types import SimpleNamespace
from unittest.mock import MagicMock, patch
class _FakeBuffer:
def __init__(self, text="draft text"):
self.text = text
self.cursor_position = len(text)
self.reset_calls = []
def reset(self, append_to_history=False):
self.reset_calls.append(append_to_history)
self.text = ""
self.cursor_position = 0
def _make_providers():
return [
{
"slug": "openrouter",
"name": "OpenRouter",
"is_current": True,
"is_user_defined": False,
"models": ["anthropic/claude-opus-4.6", "openai/gpt-5.4"],
"total_models": 2,
"source": "built-in",
},
{
"slug": "anthropic",
"name": "Anthropic",
"is_current": False,
"is_user_defined": False,
"models": ["claude-opus-4.6", "claude-sonnet-4.6"],
"total_models": 2,
"source": "built-in",
},
{
"slug": "custom:my-ollama",
"name": "My Ollama",
"is_current": False,
"is_user_defined": True,
"models": ["llama3", "mistral"],
"total_models": 2,
"source": "user-config",
"api_url": "http://localhost:11434/v1",
},
]
def _make_picker_cli(picker_return_value):
cli = MagicMock()
cli._run_curses_picker = MagicMock(return_value=picker_return_value)
cli._app = MagicMock()
cli._status_bar_visible = True
return cli
def _make_modal_cli():
from cli import HermesCLI
cli = HermesCLI.__new__(HermesCLI)
cli.model = "gpt-5.4"
cli.provider = "openrouter"
cli.requested_provider = "openrouter"
cli.base_url = ""
cli.api_key = ""
cli.api_mode = ""
cli._explicit_api_key = ""
cli._explicit_base_url = ""
cli._pending_model_switch_note = None
cli._model_picker_state = None
cli._modal_input_snapshot = None
cli._status_bar_visible = True
cli._invalidate = MagicMock()
cli.agent = None
cli.config = {}
cli.console = MagicMock()
cli._app = SimpleNamespace(
current_buffer=_FakeBuffer(),
invalidate=MagicMock(),
)
return cli
def test_provider_selection_returns_slug_on_choice():
providers = _make_providers()
cli = _make_picker_cli(1)
from cli import HermesCLI
result = HermesCLI._interactive_provider_selection(cli, providers, "gpt-5.4", "OpenRouter")
assert result == "anthropic"
cli._run_curses_picker.assert_called_once()
def test_provider_selection_returns_none_on_cancel():
providers = _make_providers()
cli = _make_picker_cli(None)
from cli import HermesCLI
result = HermesCLI._interactive_provider_selection(cli, providers, "gpt-5.4", "OpenRouter")
assert result is None
def test_provider_selection_default_is_current():
providers = _make_providers()
cli = _make_picker_cli(0)
from cli import HermesCLI
HermesCLI._interactive_provider_selection(cli, providers, "gpt-5.4", "OpenRouter")
assert cli._run_curses_picker.call_args.kwargs["default_index"] == 0
def test_model_selection_returns_model_on_choice():
provider_data = _make_providers()[0]
cli = _make_picker_cli(0)
from cli import HermesCLI
result = HermesCLI._interactive_model_selection(cli, provider_data["models"], provider_data)
assert result == "anthropic/claude-opus-4.6"
def test_model_selection_custom_entry_prompts_for_input():
provider_data = _make_providers()[0]
cli = _make_picker_cli(2)
from cli import HermesCLI
cli._prompt_text_input = MagicMock(return_value="my-custom-model")
result = HermesCLI._interactive_model_selection(cli, provider_data["models"], provider_data)
assert result == "my-custom-model"
cli._prompt_text_input.assert_called_once_with(" Enter model name: ")
def test_model_selection_empty_prompts_for_manual_input():
provider_data = {
"slug": "custom:empty",
"name": "Empty Provider",
"models": [],
"total_models": 0,
}
cli = _make_picker_cli(None)
from cli import HermesCLI
cli._prompt_text_input = MagicMock(return_value="my-model")
result = HermesCLI._interactive_model_selection(cli, [], provider_data)
assert result == "my-model"
cli._prompt_text_input.assert_called_once_with(" Enter model name manually (or Enter to cancel): ")
def test_prompt_text_input_uses_run_in_terminal_when_app_active():
from cli import HermesCLI
cli = _make_modal_cli()
with (
patch("prompt_toolkit.application.run_in_terminal", side_effect=lambda fn: fn()) as run_mock,
patch("builtins.input", return_value="manual-value"),
):
result = HermesCLI._prompt_text_input(cli, "Enter value: ")
assert result == "manual-value"
run_mock.assert_called_once()
assert cli._status_bar_visible is True
def test_should_handle_model_command_inline_uses_command_name_resolution():
from cli import HermesCLI
cli = _make_modal_cli()
with patch("hermes_cli.commands.resolve_command", return_value=SimpleNamespace(name="model")):
assert HermesCLI._should_handle_model_command_inline(cli, "/model") is True
with patch("hermes_cli.commands.resolve_command", return_value=SimpleNamespace(name="help")):
assert HermesCLI._should_handle_model_command_inline(cli, "/model") is False
assert HermesCLI._should_handle_model_command_inline(cli, "/model", has_images=True) is False
def test_process_command_model_without_args_opens_modal_picker_and_captures_draft():
from cli import HermesCLI
cli = _make_modal_cli()
providers = _make_providers()
with (
patch("hermes_cli.model_switch.list_authenticated_providers", return_value=providers),
patch("cli._cprint"),
):
result = cli.process_command("/model")
assert result is True
assert cli._model_picker_state is not None
assert cli._model_picker_state["stage"] == "provider"
assert cli._model_picker_state["selected"] == 0
assert cli._modal_input_snapshot == {"text": "draft text", "cursor_position": len("draft text")}
assert cli._app.current_buffer.text == ""
def test_model_picker_provider_then_model_selection_applies_switch_result_and_restores_draft():
from cli import HermesCLI
cli = _make_modal_cli()
providers = _make_providers()
with (
patch("hermes_cli.model_switch.list_authenticated_providers", return_value=providers),
patch("cli._cprint"),
):
assert cli.process_command("/model") is True
cli._model_picker_state["selected"] = 1
with patch("hermes_cli.models.provider_model_ids", return_value=["claude-opus-4.6", "claude-sonnet-4.6"]):
HermesCLI._handle_model_picker_selection(cli)
assert cli._model_picker_state["stage"] == "model"
assert cli._model_picker_state["provider_data"]["slug"] == "anthropic"
assert cli._model_picker_state["model_list"] == ["claude-opus-4.6", "claude-sonnet-4.6"]
cli._model_picker_state["selected"] = 0
switch_result = SimpleNamespace(
success=True,
error_message=None,
new_model="claude-opus-4.6",
target_provider="anthropic",
api_key="",
base_url="",
api_mode="anthropic_messages",
provider_label="Anthropic",
model_info=None,
warning_message=None,
provider_changed=True,
)
with (
patch("hermes_cli.model_switch.switch_model", return_value=switch_result) as switch_mock,
patch("cli._cprint"),
):
HermesCLI._handle_model_picker_selection(cli)
assert cli._model_picker_state is None
assert cli.model == "claude-opus-4.6"
assert cli.provider == "anthropic"
assert cli.requested_provider == "anthropic"
assert cli._app.current_buffer.text == "draft text"
switch_mock.assert_called_once()
assert switch_mock.call_args.kwargs["explicit_provider"] == "anthropic"

View file

@ -0,0 +1,241 @@
"""Regression test: openai-codex must appear in /model picker when
credentials are only in the Codex CLI shared file (~/.codex/auth.json)
and haven't been migrated to the Hermes auth store yet.
Root cause: list_authenticated_providers() checked the raw Hermes auth
store but didn't know about the Codex CLI fallback import path.
Fix: _seed_from_singletons() now imports from the Codex CLI when the
Hermes auth store has no openai-codex tokens, and
list_authenticated_providers() falls back to load_pool() for OAuth
providers.
"""
import base64
import json
import os
import sys
import time
from pathlib import Path
from unittest.mock import patch
import pytest
def _make_fake_jwt(expiry_offset: int = 3600) -> str:
"""Build a fake JWT with a future expiry."""
header = base64.urlsafe_b64encode(b'{"alg":"RS256"}').rstrip(b"=").decode()
exp = int(time.time()) + expiry_offset
payload_bytes = json.dumps({"exp": exp, "sub": "test"}).encode()
payload = base64.urlsafe_b64encode(payload_bytes).rstrip(b"=").decode()
return f"{header}.{payload}.fakesig"
@pytest.fixture()
def codex_cli_only_env(tmp_path, monkeypatch):
"""Set up an environment where Codex tokens exist only in ~/.codex/auth.json,
NOT in the Hermes auth store."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
codex_home = tmp_path / ".codex"
codex_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setenv("CODEX_HOME", str(codex_home))
# Empty Hermes auth store
(hermes_home / "auth.json").write_text(
json.dumps({"version": 2, "providers": {}})
)
# Valid Codex CLI tokens
fake_jwt = _make_fake_jwt()
(codex_home / "auth.json").write_text(
json.dumps({
"tokens": {
"access_token": fake_jwt,
"refresh_token": "fake-refresh-token",
}
})
)
# Clear provider env vars so only OAuth is a detection path
for var in [
"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"NOUS_API_KEY", "DEEPSEEK_API_KEY", "COPILOT_GITHUB_TOKEN",
"GH_TOKEN", "GEMINI_API_KEY",
]:
monkeypatch.delenv(var, raising=False)
return hermes_home
def test_codex_cli_tokens_detected_by_model_picker(codex_cli_only_env):
"""openai-codex should appear when tokens only exist in ~/.codex/auth.json."""
from hermes_cli.model_switch import list_authenticated_providers
providers = list_authenticated_providers(
current_provider="openai-codex",
max_models=10,
)
slugs = [p["slug"] for p in providers]
assert "openai-codex" in slugs, (
f"openai-codex not found in /model picker providers: {slugs}"
)
codex = next(p for p in providers if p["slug"] == "openai-codex")
assert codex["is_current"] is True
assert codex["total_models"] > 0
def test_codex_cli_tokens_migrated_after_detection(codex_cli_only_env):
"""After the /model picker detects Codex CLI tokens, they should be
migrated into the Hermes auth store for subsequent fast lookups."""
from hermes_cli.model_switch import list_authenticated_providers
# First call triggers migration
list_authenticated_providers(current_provider="openai-codex")
# Verify tokens are now in Hermes auth store
auth_path = codex_cli_only_env / "auth.json"
store = json.loads(auth_path.read_text())
providers = store.get("providers", {})
assert "openai-codex" in providers, (
f"openai-codex not migrated to Hermes auth store: {list(providers.keys())}"
)
tokens = providers["openai-codex"].get("tokens", {})
assert tokens.get("access_token"), "access_token missing after migration"
assert tokens.get("refresh_token"), "refresh_token missing after migration"
@pytest.fixture()
def hermes_auth_only_env(tmp_path, monkeypatch):
"""Tokens already in Hermes auth store (no Codex CLI needed)."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
# Point CODEX_HOME to nonexistent dir to prove it's not needed
monkeypatch.setenv("CODEX_HOME", str(tmp_path / "no_codex"))
(hermes_home / "auth.json").write_text(json.dumps({
"version": 2,
"providers": {
"openai-codex": {
"tokens": {
"access_token": _make_fake_jwt(),
"refresh_token": "fake-refresh",
},
"last_refresh": "2026-04-12T00:00:00Z",
}
},
}))
for var in [
"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"NOUS_API_KEY", "DEEPSEEK_API_KEY",
]:
monkeypatch.delenv(var, raising=False)
return hermes_home
def test_normal_path_still_works(hermes_auth_only_env):
"""openai-codex appears when tokens are already in Hermes auth store."""
from hermes_cli.model_switch import list_authenticated_providers
providers = list_authenticated_providers(
current_provider="openai-codex",
max_models=10,
)
slugs = [p["slug"] for p in providers]
assert "openai-codex" in slugs
@pytest.fixture()
def claude_code_only_env(tmp_path, monkeypatch):
"""Set up an environment where Anthropic credentials only exist in
~/.claude/.credentials.json (Claude Code) not in env vars or Hermes
auth store."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
# No Codex CLI
monkeypatch.setenv("CODEX_HOME", str(tmp_path / "no_codex"))
(hermes_home / "auth.json").write_text(
json.dumps({"version": 2, "providers": {}})
)
# Claude Code credentials in the correct format
claude_dir = tmp_path / ".claude"
claude_dir.mkdir()
(claude_dir / ".credentials.json").write_text(json.dumps({
"claudeAiOauth": {
"accessToken": _make_fake_jwt(),
"refreshToken": "fake-refresh",
"expiresAt": int(time.time() * 1000) + 3_600_000,
}
}))
# Patch Path.home() so the adapter finds the file
monkeypatch.setattr(Path, "home", classmethod(lambda cls: tmp_path))
for var in [
"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"ANTHROPIC_TOKEN", "CLAUDE_CODE_OAUTH_TOKEN",
"NOUS_API_KEY", "DEEPSEEK_API_KEY",
]:
monkeypatch.delenv(var, raising=False)
return hermes_home
def test_claude_code_file_detected_by_model_picker(claude_code_only_env):
"""anthropic should appear when credentials only exist in ~/.claude/.credentials.json."""
from hermes_cli.model_switch import list_authenticated_providers
providers = list_authenticated_providers(
current_provider="anthropic",
max_models=10,
)
slugs = [p["slug"] for p in providers]
assert "anthropic" in slugs, (
f"anthropic not found in /model picker providers: {slugs}"
)
anthropic = next(p for p in providers if p["slug"] == "anthropic")
assert anthropic["is_current"] is True
assert anthropic["total_models"] > 0
def test_no_codex_when_no_credentials(tmp_path, monkeypatch):
"""openai-codex should NOT appear when no credentials exist anywhere."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setenv("CODEX_HOME", str(tmp_path / "no_codex"))
(hermes_home / "auth.json").write_text(
json.dumps({"version": 2, "providers": {}})
)
for var in [
"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"NOUS_API_KEY", "DEEPSEEK_API_KEY", "COPILOT_GITHUB_TOKEN",
"GH_TOKEN", "GEMINI_API_KEY",
]:
monkeypatch.delenv(var, raising=False)
from hermes_cli.model_switch import list_authenticated_providers
providers = list_authenticated_providers(
current_provider="openrouter",
max_models=10,
)
slugs = [p["slug"] for p in providers]
assert "openai-codex" not in slugs, (
"openai-codex should not appear without any credentials"
)

View file

@ -68,6 +68,7 @@ class TestLoadConfigDefaults:
assert "max_turns" not in config
assert "terminal" in config
assert config["terminal"]["backend"] == "local"
assert config["display"]["interim_assistant_messages"] is True
def test_legacy_root_level_max_turns_migrates_to_agent_config(self, tmp_path):
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
@ -421,3 +422,25 @@ class TestAnthropicTokenMigration:
}):
migrate_config(interactive=False, quiet=True)
assert load_env().get("ANTHROPIC_TOKEN") == "current-token"
class TestInterimAssistantMessageConfig:
"""Test the explicit gateway interim-message config gate."""
def test_default_config_enables_interim_assistant_messages(self):
assert DEFAULT_CONFIG["display"]["interim_assistant_messages"] is True
def test_migrate_to_v15_adds_interim_assistant_message_gate(self, tmp_path):
config_path = tmp_path / "config.yaml"
config_path.write_text(
yaml.safe_dump({"_config_version": 14, "display": {"tool_progress": "off"}}),
encoding="utf-8",
)
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
migrate_config(interactive=False, quiet=True)
raw = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert raw["_config_version"] == 16
assert raw["display"]["tool_progress"] == "off"
assert raw["display"]["interim_assistant_messages"] is True

View file

@ -0,0 +1,342 @@
"""Tests for container-aware CLI routing (NixOS container mode).
When container.enable = true in the NixOS module, the activation script
writes a .container-mode metadata file. The host CLI detects this and
execs into the container instead of running locally.
"""
import os
import subprocess
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from hermes_cli.config import (
_is_inside_container,
get_container_exec_info,
)
# =============================================================================
# _is_inside_container
# =============================================================================
def test_is_inside_container_dockerenv():
"""Detects /.dockerenv marker file."""
with patch("os.path.exists") as mock_exists:
mock_exists.side_effect = lambda p: p == "/.dockerenv"
assert _is_inside_container() is True
def test_is_inside_container_containerenv():
"""Detects Podman's /run/.containerenv marker."""
with patch("os.path.exists") as mock_exists:
mock_exists.side_effect = lambda p: p == "/run/.containerenv"
assert _is_inside_container() is True
def test_is_inside_container_cgroup_docker():
"""Detects 'docker' in /proc/1/cgroup."""
with patch("os.path.exists", return_value=False), \
patch("builtins.open", create=True) as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = MagicMock(return_value=False)
mock_open.return_value.read = MagicMock(
return_value="12:memory:/docker/abc123\n"
)
assert _is_inside_container() is True
def test_is_inside_container_false_on_host():
"""Returns False when none of the container indicators are present."""
with patch("os.path.exists", return_value=False), \
patch("builtins.open", side_effect=OSError("no such file")):
assert _is_inside_container() is False
# =============================================================================
# get_container_exec_info
# =============================================================================
@pytest.fixture
def container_env(tmp_path, monkeypatch):
"""Set up a fake HERMES_HOME with .container-mode file."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.delenv("HERMES_DEV", raising=False)
container_mode = hermes_home / ".container-mode"
container_mode.write_text(
"# Written by NixOS activation script. Do not edit manually.\n"
"backend=podman\n"
"container_name=hermes-agent\n"
"exec_user=hermes\n"
"hermes_bin=/data/current-package/bin/hermes\n"
)
return hermes_home
def test_get_container_exec_info_returns_metadata(container_env):
"""Reads .container-mode and returns all fields including exec_user."""
with patch("hermes_cli.config._is_inside_container", return_value=False):
info = get_container_exec_info()
assert info is not None
assert info["backend"] == "podman"
assert info["container_name"] == "hermes-agent"
assert info["exec_user"] == "hermes"
assert info["hermes_bin"] == "/data/current-package/bin/hermes"
def test_get_container_exec_info_none_inside_container(container_env):
"""Returns None when we're already inside a container."""
with patch("hermes_cli.config._is_inside_container", return_value=True):
info = get_container_exec_info()
assert info is None
def test_get_container_exec_info_none_without_file(tmp_path, monkeypatch):
"""Returns None when .container-mode doesn't exist (native mode)."""
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.delenv("HERMES_DEV", raising=False)
with patch("hermes_cli.config._is_inside_container", return_value=False):
info = get_container_exec_info()
assert info is None
def test_get_container_exec_info_skipped_when_hermes_dev(container_env, monkeypatch):
"""Returns None when HERMES_DEV=1 is set (dev mode bypass)."""
monkeypatch.setenv("HERMES_DEV", "1")
with patch("hermes_cli.config._is_inside_container", return_value=False):
info = get_container_exec_info()
assert info is None
def test_get_container_exec_info_not_skipped_when_hermes_dev_zero(container_env, monkeypatch):
"""HERMES_DEV=0 does NOT trigger bypass — only '1' does."""
monkeypatch.setenv("HERMES_DEV", "0")
with patch("hermes_cli.config._is_inside_container", return_value=False):
info = get_container_exec_info()
assert info is not None
def test_get_container_exec_info_defaults():
"""Falls back to defaults for missing keys."""
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
hermes_home = Path(tmpdir) / ".hermes"
hermes_home.mkdir()
(hermes_home / ".container-mode").write_text(
"# minimal file with no keys\n"
)
with patch("hermes_cli.config._is_inside_container", return_value=False), \
patch("hermes_cli.config.get_hermes_home", return_value=hermes_home), \
patch.dict(os.environ, {}, clear=False):
os.environ.pop("HERMES_DEV", None)
info = get_container_exec_info()
assert info is not None
assert info["backend"] == "docker"
assert info["container_name"] == "hermes-agent"
assert info["exec_user"] == "hermes"
assert info["hermes_bin"] == "/data/current-package/bin/hermes"
def test_get_container_exec_info_docker_backend(container_env):
"""Correctly reads docker backend with custom exec_user."""
(container_env / ".container-mode").write_text(
"backend=docker\n"
"container_name=hermes-custom\n"
"exec_user=myuser\n"
"hermes_bin=/opt/hermes/bin/hermes\n"
)
with patch("hermes_cli.config._is_inside_container", return_value=False):
info = get_container_exec_info()
assert info["backend"] == "docker"
assert info["container_name"] == "hermes-custom"
assert info["exec_user"] == "myuser"
assert info["hermes_bin"] == "/opt/hermes/bin/hermes"
def test_get_container_exec_info_crashes_on_permission_error(container_env):
"""PermissionError propagates instead of being silently swallowed."""
with patch("hermes_cli.config._is_inside_container", return_value=False), \
patch("builtins.open", side_effect=PermissionError("permission denied")):
with pytest.raises(PermissionError):
get_container_exec_info()
# =============================================================================
# _exec_in_container
# =============================================================================
@pytest.fixture
def docker_container_info():
return {
"backend": "docker",
"container_name": "hermes-agent",
"exec_user": "hermes",
"hermes_bin": "/data/current-package/bin/hermes",
}
@pytest.fixture
def podman_container_info():
return {
"backend": "podman",
"container_name": "hermes-agent",
"exec_user": "hermes",
"hermes_bin": "/data/current-package/bin/hermes",
}
def test_exec_in_container_calls_execvp(docker_container_info):
"""Verifies os.execvp is called with correct args: runtime, tty flags,
user, env vars, container name, binary, and CLI args."""
from hermes_cli.main import _exec_in_container
with patch("shutil.which", return_value="/usr/bin/docker"), \
patch("subprocess.run") as mock_run, \
patch("sys.stdin") as mock_stdin, \
patch("os.execvp") as mock_execvp, \
patch.dict(os.environ, {"TERM": "xterm-256color", "LANG": "en_US.UTF-8"},
clear=False):
mock_stdin.isatty.return_value = True
mock_run.return_value = MagicMock(returncode=0)
_exec_in_container(docker_container_info, ["chat", "-m", "opus"])
mock_execvp.assert_called_once()
cmd = mock_execvp.call_args[0][1]
assert cmd[0] == "/usr/bin/docker"
assert cmd[1] == "exec"
assert "-it" in cmd
idx_u = cmd.index("-u")
assert cmd[idx_u + 1] == "hermes"
e_indices = [i for i, v in enumerate(cmd) if v == "-e"]
e_values = [cmd[i + 1] for i in e_indices]
assert "TERM=xterm-256color" in e_values
assert "LANG=en_US.UTF-8" in e_values
assert "hermes-agent" in cmd
assert "/data/current-package/bin/hermes" in cmd
assert "chat" in cmd
def test_exec_in_container_non_tty_uses_i_only(docker_container_info):
"""Non-TTY mode uses -i instead of -it."""
from hermes_cli.main import _exec_in_container
with patch("shutil.which", return_value="/usr/bin/docker"), \
patch("subprocess.run") as mock_run, \
patch("sys.stdin") as mock_stdin, \
patch("os.execvp") as mock_execvp:
mock_stdin.isatty.return_value = False
mock_run.return_value = MagicMock(returncode=0)
_exec_in_container(docker_container_info, ["sessions", "list"])
cmd = mock_execvp.call_args[0][1]
assert "-i" in cmd
assert "-it" not in cmd
def test_exec_in_container_no_runtime_hard_fails(podman_container_info):
"""Hard fails when runtime not found (no fallback)."""
from hermes_cli.main import _exec_in_container
with patch("shutil.which", return_value=None), \
patch("subprocess.run") as mock_run, \
patch("os.execvp") as mock_execvp, \
pytest.raises(SystemExit) as exc_info:
_exec_in_container(podman_container_info, ["chat"])
mock_run.assert_not_called()
mock_execvp.assert_not_called()
assert exc_info.value.code != 0
def test_exec_in_container_sudo_probe_sets_prefix(podman_container_info):
"""When first probe fails and sudo probe succeeds, execvp is called
with sudo -n prefix."""
from hermes_cli.main import _exec_in_container
def which_side_effect(name):
if name == "podman":
return "/usr/bin/podman"
if name == "sudo":
return "/usr/bin/sudo"
return None
with patch("shutil.which", side_effect=which_side_effect), \
patch("subprocess.run") as mock_run, \
patch("sys.stdin") as mock_stdin, \
patch("os.execvp") as mock_execvp:
mock_stdin.isatty.return_value = True
mock_run.side_effect = [
MagicMock(returncode=1), # direct probe fails
MagicMock(returncode=0), # sudo probe succeeds
]
_exec_in_container(podman_container_info, ["chat"])
mock_execvp.assert_called_once()
cmd = mock_execvp.call_args[0][1]
assert cmd[0] == "/usr/bin/sudo"
assert cmd[1] == "-n"
assert cmd[2] == "/usr/bin/podman"
assert cmd[3] == "exec"
def test_exec_in_container_probe_timeout_prints_message(docker_container_info):
"""TimeoutExpired from probe produces a human-readable error, not a
raw traceback."""
from hermes_cli.main import _exec_in_container
with patch("shutil.which", return_value="/usr/bin/docker"), \
patch("subprocess.run", side_effect=subprocess.TimeoutExpired(
cmd=["docker", "inspect"], timeout=15)), \
patch("os.execvp") as mock_execvp, \
pytest.raises(SystemExit) as exc_info:
_exec_in_container(docker_container_info, ["chat"])
mock_execvp.assert_not_called()
assert exc_info.value.code == 1
def test_exec_in_container_container_not_running_no_sudo(docker_container_info):
"""When runtime exists but container not found and no sudo available,
prints helpful error about root containers."""
from hermes_cli.main import _exec_in_container
def which_side_effect(name):
if name == "docker":
return "/usr/bin/docker"
return None
with patch("shutil.which", side_effect=which_side_effect), \
patch("subprocess.run") as mock_run, \
patch("os.execvp") as mock_execvp, \
pytest.raises(SystemExit) as exc_info:
mock_run.return_value = MagicMock(returncode=1)
_exec_in_container(docker_container_info, ["chat"])
mock_execvp.assert_not_called()
assert exc_info.value.code == 1

View file

@ -122,3 +122,54 @@ class TestCustomProviderModelSwitch:
model = config.get("model")
assert isinstance(model, dict)
assert model["default"] == "model-X"
def test_api_mode_set_from_provider_info(self, config_home):
"""When custom_providers entry has api_mode, it should be applied."""
import yaml
from hermes_cli.main import _model_flow_named_custom
provider_info = {
"name": "Anthropic Proxy",
"base_url": "https://proxy.example.com/anthropic",
"api_key": "***",
"model": "claude-3",
"api_mode": "anthropic_messages",
}
with patch("hermes_cli.models.fetch_api_models", return_value=["claude-3"]), \
patch.dict("sys.modules", {"simple_term_menu": None}), \
patch("builtins.input", return_value="1"), \
patch("builtins.print"):
_model_flow_named_custom({}, provider_info)
config = yaml.safe_load((config_home / "config.yaml").read_text()) or {}
model = config.get("model")
assert isinstance(model, dict)
assert model.get("api_mode") == "anthropic_messages"
def test_api_mode_cleared_when_not_specified(self, config_home):
"""When custom_providers entry has no api_mode, stale api_mode is removed."""
import yaml
from hermes_cli.main import _model_flow_named_custom
# Pre-seed a stale api_mode in config
config_path = config_home / "config.yaml"
config_path.write_text(yaml.dump({"model": {"api_mode": "anthropic_messages"}}))
provider_info = {
"name": "My vLLM",
"base_url": "https://vllm.example.com/v1",
"api_key": "***",
"model": "llama-3",
}
with patch("hermes_cli.models.fetch_api_models", return_value=["llama-3"]), \
patch.dict("sys.modules", {"simple_term_menu": None}), \
patch("builtins.input", return_value="1"), \
patch("builtins.print"):
_model_flow_named_custom({}, provider_info)
config = yaml.safe_load((config_home / "config.yaml").read_text()) or {}
model = config.get("model")
assert isinstance(model, dict)
assert "api_mode" not in model, "Stale api_mode should be removed"

View file

@ -1,288 +1,255 @@
"""Tests for hermes_cli/logs.py — log viewing and filtering."""
"""Tests for hermes_cli.logs — log viewing and filtering."""
import os
import textwrap
from datetime import datetime, timedelta
from io import StringIO
from pathlib import Path
from unittest.mock import patch
import pytest
from hermes_cli.logs import (
LOG_FILES,
_extract_level,
_extract_logger_name,
_line_matches_component,
_matches_filters,
_parse_line_timestamp,
_parse_since,
_read_last_n_lines,
list_logs,
tail_log,
_read_tail,
)
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def log_dir(tmp_path, monkeypatch):
"""Create a fake HERMES_HOME with a logs/ directory."""
home = Path(os.environ["HERMES_HOME"])
logs = home / "logs"
logs.mkdir(parents=True, exist_ok=True)
return logs
@pytest.fixture
def sample_agent_log(log_dir):
"""Write a realistic agent.log with mixed levels and sessions."""
lines = textwrap.dedent("""\
2026-04-05 10:00:00,000 INFO run_agent: conversation turn: session=sess_aaa model=claude provider=openrouter platform=cli history=0 msg='hello'
2026-04-05 10:00:01,000 INFO run_agent: tool terminal completed (0.50s, 200 chars)
2026-04-05 10:00:02,000 INFO run_agent: API call #1: model=claude provider=openrouter in=1000 out=200 total=1200 latency=1.5s
2026-04-05 10:00:03,000 WARNING run_agent: Tool web_search returned error (2.00s): timeout
2026-04-05 10:00:04,000 INFO run_agent: conversation turn: session=sess_bbb model=gpt-5 provider=openai platform=telegram history=5 msg='fix bug'
2026-04-05 10:00:05,000 ERROR run_agent: API call failed after 3 retries. rate limited
2026-04-05 10:00:06,000 INFO run_agent: tool read_file completed (0.01s, 500 chars)
2026-04-05 10:00:07,000 DEBUG run_agent: verbose internal detail
2026-04-05 10:00:08,000 INFO credential_pool: credential pool: marking key-1 exhausted (status=429), rotating
2026-04-05 10:00:09,000 INFO credential_pool: credential pool: rotated to key-2
""")
path = log_dir / "agent.log"
path.write_text(lines)
return path
@pytest.fixture
def sample_errors_log(log_dir):
"""Write a small errors.log."""
lines = textwrap.dedent("""\
2026-04-05 10:00:03,000 WARNING run_agent: Tool web_search returned error (2.00s): timeout
2026-04-05 10:00:05,000 ERROR run_agent: API call failed after 3 retries. rate limited
""")
path = log_dir / "errors.log"
path.write_text(lines)
return path
# ---------------------------------------------------------------------------
# _parse_since
# Timestamp parsing
# ---------------------------------------------------------------------------
class TestParseSince:
def test_hours(self):
cutoff = _parse_since("2h")
assert cutoff is not None
assert (datetime.now() - cutoff).total_seconds() == pytest.approx(7200, abs=5)
assert abs((datetime.now() - cutoff).total_seconds() - 7200) < 2
def test_minutes(self):
cutoff = _parse_since("30m")
assert cutoff is not None
assert (datetime.now() - cutoff).total_seconds() == pytest.approx(1800, abs=5)
assert abs((datetime.now() - cutoff).total_seconds() - 1800) < 2
def test_days(self):
cutoff = _parse_since("1d")
assert cutoff is not None
assert (datetime.now() - cutoff).total_seconds() == pytest.approx(86400, abs=5)
assert abs((datetime.now() - cutoff).total_seconds() - 86400) < 2
def test_seconds(self):
cutoff = _parse_since("60s")
cutoff = _parse_since("120s")
assert cutoff is not None
assert (datetime.now() - cutoff).total_seconds() == pytest.approx(60, abs=5)
assert abs((datetime.now() - cutoff).total_seconds() - 120) < 2
def test_invalid_returns_none(self):
assert _parse_since("abc") is None
assert _parse_since("") is None
assert _parse_since("10x") is None
def test_whitespace_handling(self):
cutoff = _parse_since(" 1h ")
def test_whitespace_tolerance(self):
cutoff = _parse_since(" 5m ")
assert cutoff is not None
# ---------------------------------------------------------------------------
# _parse_line_timestamp
# ---------------------------------------------------------------------------
class TestParseLineTimestamp:
def test_standard_format(self):
ts = _parse_line_timestamp("2026-04-05 10:00:00,123 INFO something")
assert ts is not None
assert ts.year == 2026
assert ts.hour == 10
ts = _parse_line_timestamp("2026-04-11 10:23:45 INFO gateway.run: msg")
assert ts == datetime(2026, 4, 11, 10, 23, 45)
def test_no_timestamp(self):
assert _parse_line_timestamp("just some text") is None
assert _parse_line_timestamp("no timestamp here") is None
def test_continuation_line(self):
assert _parse_line_timestamp(" at module.function (line 42)") is None
# ---------------------------------------------------------------------------
# _extract_level
# ---------------------------------------------------------------------------
class TestExtractLevel:
def test_info(self):
assert _extract_level("2026-04-05 10:00:00 INFO run_agent: something") == "INFO"
assert _extract_level("2026-01-01 00:00:00 INFO gateway.run: msg") == "INFO"
def test_warning(self):
assert _extract_level("2026-04-05 10:00:00 WARNING run_agent: bad") == "WARNING"
assert _extract_level("2026-01-01 00:00:00 WARNING tools.file: msg") == "WARNING"
def test_error(self):
assert _extract_level("2026-04-05 10:00:00 ERROR run_agent: crash") == "ERROR"
assert _extract_level("2026-01-01 00:00:00 ERROR run_agent: msg") == "ERROR"
def test_debug(self):
assert _extract_level("2026-04-05 10:00:00 DEBUG run_agent: detail") == "DEBUG"
assert _extract_level("2026-01-01 00:00:00 DEBUG agent.aux: msg") == "DEBUG"
def test_no_level(self):
assert _extract_level("just a plain line") is None
assert _extract_level("random text") is None
# ---------------------------------------------------------------------------
# _matches_filters
# Logger name extraction (new for component filtering)
# ---------------------------------------------------------------------------
class TestExtractLoggerName:
def test_standard_line(self):
line = "2026-04-11 10:23:45 INFO gateway.run: Starting gateway"
assert _extract_logger_name(line) == "gateway.run"
def test_nested_logger(self):
line = "2026-04-11 10:23:45 INFO gateway.platforms.telegram: connected"
assert _extract_logger_name(line) == "gateway.platforms.telegram"
def test_warning_level(self):
line = "2026-04-11 10:23:45 WARNING tools.terminal_tool: timeout"
assert _extract_logger_name(line) == "tools.terminal_tool"
def test_with_session_tag(self):
line = "2026-04-11 10:23:45 INFO [abc123] tools.file_tools: reading file"
assert _extract_logger_name(line) == "tools.file_tools"
def test_with_session_tag_and_error(self):
line = "2026-04-11 10:23:45 ERROR [sess_xyz] agent.context_compressor: failed"
assert _extract_logger_name(line) == "agent.context_compressor"
def test_top_level_module(self):
line = "2026-04-11 10:23:45 INFO run_agent: starting conversation"
assert _extract_logger_name(line) == "run_agent"
def test_no_match(self):
assert _extract_logger_name("random text") is None
class TestLineMatchesComponent:
def test_gateway_component(self):
line = "2026-04-11 10:23:45 INFO gateway.run: msg"
assert _line_matches_component(line, ("gateway",))
def test_gateway_nested(self):
line = "2026-04-11 10:23:45 INFO gateway.platforms.telegram: msg"
assert _line_matches_component(line, ("gateway",))
def test_tools_component(self):
line = "2026-04-11 10:23:45 INFO tools.terminal_tool: msg"
assert _line_matches_component(line, ("tools",))
def test_agent_with_multiple_prefixes(self):
prefixes = ("agent", "run_agent", "model_tools")
assert _line_matches_component(
"2026-04-11 10:23:45 INFO agent.context_compressor: msg", prefixes)
assert _line_matches_component(
"2026-04-11 10:23:45 INFO run_agent: msg", prefixes)
assert _line_matches_component(
"2026-04-11 10:23:45 INFO model_tools: msg", prefixes)
def test_no_match(self):
line = "2026-04-11 10:23:45 INFO tools.browser: msg"
assert not _line_matches_component(line, ("gateway",))
def test_with_session_tag(self):
line = "2026-04-11 10:23:45 INFO [abc] gateway.run: msg"
assert _line_matches_component(line, ("gateway",))
def test_unparseable_line(self):
assert not _line_matches_component("random text", ("gateway",))
# ---------------------------------------------------------------------------
# Combined filter
# ---------------------------------------------------------------------------
class TestMatchesFilters:
def test_no_filters_always_matches(self):
assert _matches_filters("any line") is True
def test_no_filters_passes_everything(self):
assert _matches_filters("any line")
def test_level_filter_passes(self):
def test_level_filter(self):
assert _matches_filters(
"2026-04-05 10:00:00 WARNING something",
min_level="WARNING",
) is True
"2026-01-01 00:00:00 WARNING x: msg", min_level="WARNING")
assert not _matches_filters(
"2026-01-01 00:00:00 INFO x: msg", min_level="WARNING")
def test_level_filter_rejects(self):
def test_session_filter(self):
assert _matches_filters(
"2026-04-05 10:00:00 INFO something",
min_level="WARNING",
) is False
"2026-01-01 00:00:00 INFO [abc123] x: msg", session_filter="abc123")
assert not _matches_filters(
"2026-01-01 00:00:00 INFO [xyz789] x: msg", session_filter="abc123")
def test_session_filter_passes(self):
def test_component_filter(self):
assert _matches_filters(
"session=sess_aaa model=claude",
session_filter="sess_aaa",
) is True
def test_session_filter_rejects(self):
assert _matches_filters(
"session=sess_aaa model=claude",
session_filter="sess_bbb",
) is False
def test_since_filter_passes(self):
# Line from the future should always pass
assert _matches_filters(
"2099-01-01 00:00:00 INFO future",
since=datetime.now(),
) is True
def test_since_filter_rejects(self):
assert _matches_filters(
"2020-01-01 00:00:00 INFO past",
since=datetime.now(),
) is False
"2026-01-01 00:00:00 INFO gateway.run: msg",
component_prefixes=("gateway",))
assert not _matches_filters(
"2026-01-01 00:00:00 INFO tools.file: msg",
component_prefixes=("gateway",))
def test_combined_filters(self):
line = "2099-01-01 00:00:00 WARNING run_agent: session=abc error"
"""All filters must pass for a line to match."""
line = "2026-04-11 10:00:00 WARNING [sess_1] gateway.run: connection lost"
assert _matches_filters(
line, min_level="WARNING", session_filter="abc",
since=datetime.now(),
) is True
# Fails session filter
line,
min_level="WARNING",
session_filter="sess_1",
component_prefixes=("gateway",),
)
# Fails component filter
assert not _matches_filters(
line,
min_level="WARNING",
session_filter="sess_1",
component_prefixes=("tools",),
)
def test_since_filter(self):
# Line with a very old timestamp should be filtered out
assert not _matches_filters(
"2020-01-01 00:00:00 INFO x: old msg",
since=datetime.now() - timedelta(hours=1))
# Line with a recent timestamp should pass
recent = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
assert _matches_filters(
line, min_level="WARNING", session_filter="xyz",
) is False
f"{recent} INFO x: recent msg",
since=datetime.now() - timedelta(hours=1))
# ---------------------------------------------------------------------------
# _read_last_n_lines
# File reading
# ---------------------------------------------------------------------------
class TestReadLastNLines:
def test_reads_correct_count(self, sample_agent_log):
lines = _read_last_n_lines(sample_agent_log, 3)
assert len(lines) == 3
class TestReadTail:
def test_read_small_file(self, tmp_path):
log_file = tmp_path / "test.log"
lines = [f"2026-01-01 00:00:0{i} INFO x: line {i}\n" for i in range(10)]
log_file.write_text("".join(lines))
def test_reads_all_when_fewer(self, sample_agent_log):
lines = _read_last_n_lines(sample_agent_log, 100)
assert len(lines) == 10 # sample has 10 lines
result = _read_last_n_lines(log_file, 5)
assert len(result) == 5
assert "line 9" in result[-1]
def test_empty_file(self, log_dir):
empty = log_dir / "empty.log"
empty.write_text("")
lines = _read_last_n_lines(empty, 10)
assert lines == []
def test_read_with_component_filter(self, tmp_path):
log_file = tmp_path / "test.log"
lines = [
"2026-01-01 00:00:00 INFO gateway.run: gw msg\n",
"2026-01-01 00:00:01 INFO tools.file: tool msg\n",
"2026-01-01 00:00:02 INFO gateway.session: session msg\n",
"2026-01-01 00:00:03 INFO agent.compressor: agent msg\n",
]
log_file.write_text("".join(lines))
def test_last_line_content(self, sample_agent_log):
lines = _read_last_n_lines(sample_agent_log, 1)
assert "rotated to key-2" in lines[0]
result = _read_tail(
log_file, 50,
has_filters=True,
component_prefixes=("gateway",),
)
assert len(result) == 2
assert "gw msg" in result[0]
assert "session msg" in result[1]
def test_empty_file(self, tmp_path):
log_file = tmp_path / "empty.log"
log_file.write_text("")
result = _read_last_n_lines(log_file, 10)
assert result == []
# ---------------------------------------------------------------------------
# tail_log
# LOG_FILES registry
# ---------------------------------------------------------------------------
class TestTailLog:
def test_basic_tail(self, sample_agent_log, capsys):
tail_log("agent", num_lines=3)
captured = capsys.readouterr()
assert "agent.log" in captured.out
# Should have the header + 3 lines
lines = captured.out.strip().split("\n")
assert len(lines) == 4 # 1 header + 3 content
def test_level_filter(self, sample_agent_log, capsys):
tail_log("agent", num_lines=50, level="ERROR")
captured = capsys.readouterr()
assert "level>=ERROR" in captured.out
# Only the ERROR line should appear
content_lines = [l for l in captured.out.strip().split("\n") if not l.startswith("---")]
assert len(content_lines) == 1
assert "API call failed" in content_lines[0]
def test_session_filter(self, sample_agent_log, capsys):
tail_log("agent", num_lines=50, session="sess_bbb")
captured = capsys.readouterr()
content_lines = [l for l in captured.out.strip().split("\n") if not l.startswith("---")]
assert len(content_lines) == 1
assert "sess_bbb" in content_lines[0]
def test_errors_log(self, sample_errors_log, capsys):
tail_log("errors", num_lines=10)
captured = capsys.readouterr()
assert "errors.log" in captured.out
assert "WARNING" in captured.out or "ERROR" in captured.out
def test_unknown_log_exits(self):
with pytest.raises(SystemExit):
tail_log("nonexistent")
def test_missing_file_exits(self, log_dir):
with pytest.raises(SystemExit):
tail_log("agent") # agent.log doesn't exist in clean log_dir
# ---------------------------------------------------------------------------
# list_logs
# ---------------------------------------------------------------------------
class TestListLogs:
def test_lists_files(self, sample_agent_log, sample_errors_log, capsys):
list_logs()
captured = capsys.readouterr()
assert "agent.log" in captured.out
assert "errors.log" in captured.out
def test_empty_dir(self, log_dir, capsys):
list_logs()
captured = capsys.readouterr()
assert "no log files yet" in captured.out
def test_shows_sizes(self, sample_agent_log, capsys):
list_logs()
captured = capsys.readouterr()
# File is small, should show as bytes or KB
assert "B" in captured.out or "KB" in captured.out
class TestLogFiles:
def test_known_log_files(self):
assert "agent" in LOG_FILES
assert "errors" in LOG_FILES
assert "gateway" in LOG_FILES

View file

@ -46,6 +46,8 @@ def _make_args(**kwargs):
"command": None,
"args": None,
"auth": None,
"preset": None,
"env": None,
"mcp_action": None,
}
defaults.update(kwargs)
@ -269,6 +271,145 @@ class TestMcpAdd:
config = load_config()
assert config["mcp_servers"]["broken"]["enabled"] is False
def test_add_stdio_server_with_env(self, tmp_path, capsys, monkeypatch):
"""Stdio servers can persist explicit environment variables."""
fake_tools = [FakeTool("search", "Search repos")]
def mock_probe(name, config, **kw):
assert config["env"] == {
"MY_API_KEY": "secret123",
"DEBUG": "true",
}
return [(t.name, t.description) for t in fake_tools]
monkeypatch.setattr(
"hermes_cli.mcp_config._probe_single_server", mock_probe
)
monkeypatch.setattr("builtins.input", lambda _: "")
from hermes_cli.mcp_config import cmd_mcp_add
cmd_mcp_add(_make_args(
name="github",
command="npx",
args=["@mcp/github"],
env=["MY_API_KEY=secret123", "DEBUG=true"],
))
out = capsys.readouterr().out
assert "Saved" in out
from hermes_cli.config import load_config
config = load_config()
srv = config["mcp_servers"]["github"]
assert srv["env"] == {
"MY_API_KEY": "secret123",
"DEBUG": "true",
}
def test_add_stdio_server_rejects_invalid_env_name(self, capsys):
"""Invalid environment variable names are rejected up front."""
from hermes_cli.mcp_config import cmd_mcp_add
cmd_mcp_add(_make_args(
name="github",
command="npx",
args=["@mcp/github"],
env=["BAD-NAME=value"],
))
out = capsys.readouterr().out
assert "Invalid --env variable name" in out
def test_add_http_server_rejects_env_flag(self, capsys):
"""The --env flag is only valid for stdio transports."""
from hermes_cli.mcp_config import cmd_mcp_add
cmd_mcp_add(_make_args(
name="ink",
url="https://mcp.ml.ink/mcp",
env=["DEBUG=true"],
))
out = capsys.readouterr().out
assert "only supported for stdio MCP servers" in out
def test_add_preset_fills_transport(self, tmp_path, capsys, monkeypatch):
"""A preset fills in command/args when no explicit transport given."""
monkeypatch.setattr(
"hermes_cli.mcp_config._MCP_PRESETS",
{"testmcp": {"command": "npx", "args": ["-y", "test-mcp-server"], "display_name": "Test MCP"}},
)
fake_tools = [FakeTool("do_thing", "Does a thing")]
def mock_probe(name, config, **kw):
assert name == "myserver"
assert config["command"] == "npx"
assert config["args"] == ["-y", "test-mcp-server"]
assert "env" not in config
return [(t.name, t.description) for t in fake_tools]
monkeypatch.setattr(
"hermes_cli.mcp_config._probe_single_server", mock_probe
)
monkeypatch.setattr("builtins.input", lambda _: "")
from hermes_cli.mcp_config import cmd_mcp_add
from hermes_cli.config import read_raw_config
cmd_mcp_add(_make_args(name="myserver", preset="testmcp"))
out = capsys.readouterr().out
assert "Saved" in out
config = read_raw_config()
srv = config["mcp_servers"]["myserver"]
assert srv["command"] == "npx"
assert srv["args"] == ["-y", "test-mcp-server"]
assert "env" not in srv
def test_preset_does_not_override_explicit_command(self, tmp_path, capsys, monkeypatch):
"""Explicit transports win over presets."""
monkeypatch.setattr(
"hermes_cli.mcp_config._MCP_PRESETS",
{"testmcp": {"command": "npx", "args": ["-y", "test-mcp-server"], "display_name": "Test MCP"}},
)
fake_tools = [FakeTool("search", "Search repos")]
def mock_probe(name, config, **kw):
assert config["command"] == "uvx"
assert config["args"] == ["custom-server"]
assert "env" not in config
return [(t.name, t.description) for t in fake_tools]
monkeypatch.setattr(
"hermes_cli.mcp_config._probe_single_server", mock_probe
)
monkeypatch.setattr("builtins.input", lambda _: "")
from hermes_cli.mcp_config import cmd_mcp_add
from hermes_cli.config import read_raw_config
cmd_mcp_add(_make_args(
name="custom",
preset="testmcp",
command="uvx",
args=["custom-server"],
))
out = capsys.readouterr().out
assert "Saved" in out
config = read_raw_config()
srv = config["mcp_servers"]["custom"]
assert srv["command"] == "uvx"
assert srv["args"] == ["custom-server"]
assert "env" not in srv
def test_unknown_preset_rejected(self, capsys):
"""An unknown preset name is rejected with a clear error."""
from hermes_cli.mcp_config import cmd_mcp_add
cmd_mcp_add(_make_args(name="foo", preset="nonexistent"))
out = capsys.readouterr().out
assert "Unknown MCP preset" in out
# ---------------------------------------------------------------------------
# Tests: cmd_mcp_test

View file

@ -257,3 +257,76 @@ class TestProviderPersistsAfterModelSave:
assert model.get("provider") == "opencode-go"
assert model.get("default") == "minimax-m2.5"
assert model.get("api_mode") == "anthropic_messages"
class TestBaseUrlValidation:
"""Reject non-URL values in the base URL prompt (e.g. shell commands)."""
def test_invalid_base_url_rejected(self, config_home, monkeypatch, capsys):
"""Typing a non-URL string should not be saved as the base URL."""
from hermes_cli.auth import PROVIDER_REGISTRY
pconfig = PROVIDER_REGISTRY.get("zai")
if not pconfig:
pytest.skip("zai not in PROVIDER_REGISTRY")
monkeypatch.setenv("GLM_API_KEY", "test-key")
from hermes_cli.main import _model_flow_api_key_provider
from hermes_cli.config import load_config, get_env_value
# User types a shell command instead of a URL at the base URL prompt
with patch("hermes_cli.auth._prompt_model_selection", return_value="glm-5"), \
patch("hermes_cli.auth.deactivate_provider"), \
patch("builtins.input", return_value="nano ~/.hermes/.env"):
_model_flow_api_key_provider(load_config(), "zai", "old-model")
# The garbage value should NOT have been saved
saved = get_env_value("GLM_BASE_URL") or ""
assert not saved or saved.startswith(("http://", "https://")), \
f"Non-URL value was saved as GLM_BASE_URL: {saved}"
captured = capsys.readouterr()
assert "Invalid URL" in captured.out
def test_valid_base_url_accepted(self, config_home, monkeypatch):
"""A proper URL should be saved normally."""
from hermes_cli.auth import PROVIDER_REGISTRY
pconfig = PROVIDER_REGISTRY.get("zai")
if not pconfig:
pytest.skip("zai not in PROVIDER_REGISTRY")
monkeypatch.setenv("GLM_API_KEY", "test-key")
from hermes_cli.main import _model_flow_api_key_provider
from hermes_cli.config import load_config, get_env_value
with patch("hermes_cli.auth._prompt_model_selection", return_value="glm-5"), \
patch("hermes_cli.auth.deactivate_provider"), \
patch("builtins.input", return_value="https://custom.z.ai/api/paas/v4"):
_model_flow_api_key_provider(load_config(), "zai", "old-model")
saved = get_env_value("GLM_BASE_URL") or ""
assert saved == "https://custom.z.ai/api/paas/v4"
def test_empty_base_url_keeps_default(self, config_home, monkeypatch):
"""Pressing Enter (empty) should not change the base URL."""
from hermes_cli.auth import PROVIDER_REGISTRY
pconfig = PROVIDER_REGISTRY.get("zai")
if not pconfig:
pytest.skip("zai not in PROVIDER_REGISTRY")
monkeypatch.setenv("GLM_API_KEY", "test-key")
monkeypatch.delenv("GLM_BASE_URL", raising=False)
from hermes_cli.main import _model_flow_api_key_provider
from hermes_cli.config import load_config, get_env_value
with patch("hermes_cli.auth._prompt_model_selection", return_value="glm-5"), \
patch("hermes_cli.auth.deactivate_provider"), \
patch("builtins.input", return_value=""):
_model_flow_api_key_provider(load_config(), "zai", "old-model")
saved = get_env_value("GLM_BASE_URL") or ""
assert saved == "", "Empty input should not save a base URL"

View file

@ -1,8 +1,10 @@
from io import StringIO
from unittest.mock import patch
import pytest
from rich.console import Console
from cli import ChatConsole
from hermes_cli.skills_hub import do_check, do_install, do_list, do_update, handle_skills_slash
@ -179,6 +181,21 @@ def test_do_update_reinstalls_outdated_skills(monkeypatch):
assert "Updated 1 skill" in output
def test_handle_skills_slash_search_accepts_chatconsole_without_status_errors():
results = [type("R", (), {
"name": "kubernetes",
"description": "Cluster orchestration",
"source": "skills.sh",
"trust_level": "community",
"identifier": "skills-sh/example/kubernetes",
})()]
with patch("tools.skills_hub.unified_search", return_value=results), \
patch("tools.skills_hub.create_source_router", return_value={}), \
patch("tools.skills_hub.GitHubAuth"):
handle_skills_slash("/skills search kubernetes", console=ChatConsole())
def test_do_install_scans_with_resolved_identifier(monkeypatch, tmp_path, hub_env):
import tools.skills_guard as guard
import tools.skills_hub as hub

View file

@ -0,0 +1,77 @@
"""Tests for hermes_cli/tips.py — random tip display at session start."""
import pytest
from hermes_cli.tips import TIPS, get_random_tip, get_tip_count
class TestTipsCorpus:
"""Validate the tip corpus itself."""
def test_has_at_least_200_tips(self):
assert len(TIPS) >= 200, f"Expected 200+ tips, got {len(TIPS)}"
def test_no_duplicates(self):
assert len(TIPS) == len(set(TIPS)), "Duplicate tips found"
def test_all_tips_are_strings(self):
for i, tip in enumerate(TIPS):
assert isinstance(tip, str), f"Tip {i} is not a string: {type(tip)}"
def test_no_empty_tips(self):
for i, tip in enumerate(TIPS):
assert tip.strip(), f"Tip {i} is empty or whitespace-only"
def test_max_length_reasonable(self):
"""Tips should fit on a single terminal line (~120 chars max)."""
for i, tip in enumerate(TIPS):
assert len(tip) <= 150, (
f"Tip {i} too long ({len(tip)} chars): {tip[:60]}..."
)
def test_no_leading_trailing_whitespace(self):
for i, tip in enumerate(TIPS):
assert tip == tip.strip(), f"Tip {i} has leading/trailing whitespace"
class TestGetRandomTip:
"""Validate the get_random_tip() function."""
def test_returns_string(self):
tip = get_random_tip()
assert isinstance(tip, str)
assert len(tip) > 0
def test_returns_tip_from_corpus(self):
tip = get_random_tip()
assert tip in TIPS
def test_randomness(self):
"""Multiple calls should eventually return different tips."""
seen = set()
for _ in range(50):
seen.add(get_random_tip())
# With 200+ tips and 50 draws, we should see at least 10 unique
assert len(seen) >= 10, f"Only got {len(seen)} unique tips in 50 draws"
class TestGetTipCount:
def test_matches_corpus_length(self):
assert get_tip_count() == len(TIPS)
class TestTipIntegrationInCLI:
"""Test that the tip display code in cli.py works correctly."""
def test_tip_import_works(self):
"""The import used in cli.py must succeed."""
from hermes_cli.tips import get_random_tip
assert callable(get_random_tip)
def test_tip_display_format(self):
"""Verify the Rich markup format doesn't break."""
tip = get_random_tip()
color = "#B8860B"
markup = f"[dim {color}]✦ Tip: {tip}[/]"
# Should not contain nested/broken Rich tags
assert markup.count("[/]") == 1
assert "[dim #B8860B]" in markup

View file

@ -798,3 +798,120 @@ class TestFindGatewayPidsExclude:
pids = gateway_cli.find_gateway_pids()
assert pids == [100]
# ---------------------------------------------------------------------------
# Gateway mode writes exit code before restart (#8300)
# ---------------------------------------------------------------------------
class TestGatewayModeWritesExitCodeEarly:
"""When running as ``hermes update --gateway``, the exit code marker must be
written *before* the gateway restart attempt. Without this, systemd's
``KillMode=mixed`` kills the update process (and its wrapping shell) during
the cgroup teardown, so the shell epilogue that normally writes the exit
code never executes. The new gateway's update watcher then polls for 30
minutes and sends a spurious timeout message.
"""
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_exit_code_written_in_gateway_mode(
self, mock_run, _mock_which, capsys, tmp_path, monkeypatch,
):
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
# Point HERMES_HOME at a temp dir so the marker file lands there
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
import hermes_cli.config as _cfg
monkeypatch.setattr(_cfg, "get_hermes_home", lambda: hermes_home)
# Also patch the module-level ref used by cmd_update
import hermes_cli.main as _main_mod
monkeypatch.setattr(_main_mod, "get_hermes_home", lambda: hermes_home)
mock_run.side_effect = _make_run_side_effect(commit_count="1")
args = SimpleNamespace(gateway=True)
with patch.object(gateway_cli, "find_gateway_pids", return_value=[]):
cmd_update(args)
exit_code_path = hermes_home / ".update_exit_code"
assert exit_code_path.exists(), ".update_exit_code not written in gateway mode"
assert exit_code_path.read_text() == "0"
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_exit_code_not_written_in_normal_mode(
self, mock_run, _mock_which, capsys, tmp_path, monkeypatch,
):
"""Non-gateway mode should NOT write the exit code (the shell does it)."""
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
import hermes_cli.config as _cfg
monkeypatch.setattr(_cfg, "get_hermes_home", lambda: hermes_home)
import hermes_cli.main as _main_mod
monkeypatch.setattr(_main_mod, "get_hermes_home", lambda: hermes_home)
mock_run.side_effect = _make_run_side_effect(commit_count="1")
args = SimpleNamespace(gateway=False)
with patch.object(gateway_cli, "find_gateway_pids", return_value=[]):
cmd_update(args)
exit_code_path = hermes_home / ".update_exit_code"
assert not exit_code_path.exists(), ".update_exit_code should not be written outside gateway mode"
@patch("shutil.which", return_value=None)
@patch("subprocess.run")
def test_exit_code_written_before_restart_call(
self, mock_run, _mock_which, capsys, tmp_path, monkeypatch,
):
"""Exit code must exist BEFORE systemctl restart is called."""
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
import hermes_cli.config as _cfg
monkeypatch.setattr(_cfg, "get_hermes_home", lambda: hermes_home)
import hermes_cli.main as _main_mod
monkeypatch.setattr(_main_mod, "get_hermes_home", lambda: hermes_home)
exit_code_path = hermes_home / ".update_exit_code"
# Track whether exit code exists when systemctl restart is called
exit_code_existed_at_restart = []
original_side_effect = _make_run_side_effect(
commit_count="1", systemd_active=True,
)
def tracking_side_effect(cmd, **kwargs):
joined = " ".join(str(c) for c in cmd)
if "systemctl" in joined and "restart" in joined:
exit_code_existed_at_restart.append(exit_code_path.exists())
return original_side_effect(cmd, **kwargs)
mock_run.side_effect = tracking_side_effect
args = SimpleNamespace(gateway=True)
with patch.object(gateway_cli, "find_gateway_pids", return_value=[]):
cmd_update(args)
assert exit_code_existed_at_restart, "systemctl restart was never called"
assert exit_code_existed_at_restart[0] is True, \
".update_exit_code must exist BEFORE systemctl restart (cgroup kill race)"

View file

@ -26,6 +26,7 @@ def _make_agent(
agent.provider = "openrouter"
agent.base_url = "https://openrouter.ai/api/v1"
agent.api_key = "sk-test"
agent.api_mode = "chat_completions"
agent.quiet_mode = True
agent.log_prefix = ""
agent.compression_enabled = compression_enabled
@ -99,6 +100,36 @@ def test_no_warning_when_aux_context_sufficient(mock_get_client, mock_ctx_len):
assert agent._compression_warning is None
def test_feasibility_check_passes_live_main_runtime():
"""Compression feasibility should probe using the live session runtime."""
agent = _make_agent(main_context=200_000, threshold_percent=0.50)
agent.model = "gpt-5.4"
agent.provider = "openai-codex"
agent.base_url = "https://chatgpt.com/backend-api/codex"
agent.api_key = "codex-token"
agent.api_mode = "codex_responses"
mock_client = MagicMock()
mock_client.base_url = "https://chatgpt.com/backend-api/codex"
mock_client.api_key = "codex-token"
with patch("agent.auxiliary_client.get_text_auxiliary_client", return_value=(mock_client, "gpt-5.4")) as mock_get_client, \
patch("agent.model_metadata.get_model_context_length", return_value=200_000):
agent._emit_status = lambda msg: None
agent._check_compression_model_feasibility()
mock_get_client.assert_called_once_with(
"compression",
main_runtime={
"model": "gpt-5.4",
"provider": "openai-codex",
"base_url": "https://chatgpt.com/backend-api/codex",
"api_key": "codex-token",
"api_mode": "codex_responses",
},
)
@patch("agent.auxiliary_client.get_text_auxiliary_client")
def test_warns_when_no_auxiliary_provider(mock_get_client):
"""Warning emitted when no auxiliary provider is configured."""

View file

@ -2742,74 +2742,12 @@ class TestSystemPromptStability:
assert "Hermes Agent" in agent._cached_system_prompt
class TestBudgetPressure:
"""Budget pressure warning system (issue #414)."""
"""Budget exhaustion grace call system."""
def test_no_warning_below_caution(self, agent):
agent.max_iterations = 60
assert agent._get_budget_warning(30) is None
def test_caution_at_70_percent(self, agent):
agent.max_iterations = 60
msg = agent._get_budget_warning(42)
assert msg is not None
assert "[BUDGET:" in msg
assert "18 iterations left" in msg
def test_warning_at_90_percent(self, agent):
agent.max_iterations = 60
msg = agent._get_budget_warning(54)
assert "[BUDGET WARNING:" in msg
assert "Provide your final response NOW" in msg
def test_last_iteration(self, agent):
agent.max_iterations = 60
msg = agent._get_budget_warning(59)
assert "1 iteration(s) left" in msg
def test_disabled(self, agent):
agent.max_iterations = 60
agent._budget_pressure_enabled = False
assert agent._get_budget_warning(55) is None
def test_zero_max_iterations(self, agent):
agent.max_iterations = 0
assert agent._get_budget_warning(0) is None
def test_injects_into_json_tool_result(self, agent):
"""Warning should be injected as _budget_warning field in JSON tool results."""
import json
agent.max_iterations = 10
messages = [
{"role": "tool", "content": json.dumps({"output": "done", "exit_code": 0}), "tool_call_id": "tc1"}
]
warning = agent._get_budget_warning(9)
assert warning is not None
# Simulate the injection logic
last_content = messages[-1]["content"]
parsed = json.loads(last_content)
parsed["_budget_warning"] = warning
messages[-1]["content"] = json.dumps(parsed, ensure_ascii=False)
result = json.loads(messages[-1]["content"])
assert "_budget_warning" in result
assert "BUDGET WARNING" in result["_budget_warning"]
assert result["output"] == "done" # original content preserved
def test_appends_to_non_json_tool_result(self, agent):
"""Warning should be appended as text for non-JSON tool results."""
agent.max_iterations = 10
messages = [
{"role": "tool", "content": "plain text result", "tool_call_id": "tc1"}
]
warning = agent._get_budget_warning(9)
# Simulate injection logic for non-JSON
last_content = messages[-1]["content"]
try:
import json
json.loads(last_content)
except (json.JSONDecodeError, TypeError):
messages[-1]["content"] = last_content + f"\n\n{warning}"
assert "plain text result" in messages[-1]["content"]
assert "BUDGET WARNING" in messages[-1]["content"]
def test_grace_call_flags_initialized(self, agent):
"""Agent should have budget grace call flags."""
assert agent._budget_exhausted_injected is False
assert agent._budget_grace_call is False
class TestSafeWriter:

View file

@ -744,6 +744,44 @@ def test_normalize_codex_response_marks_commentary_only_message_as_incomplete(mo
assert "inspect the repository" in (assistant_message.content or "")
def test_interim_commentary_is_not_marked_already_streamed_without_callbacks(monkeypatch):
agent = _build_agent(monkeypatch)
observed = {}
agent._fire_stream_delta("short version: yes")
agent.interim_assistant_callback = lambda text, *, already_streamed=False: observed.update(
{"text": text, "already_streamed": already_streamed}
)
agent._emit_interim_assistant_message({"role": "assistant", "content": "short version: yes"})
assert observed == {
"text": "short version: yes",
"already_streamed": False,
}
def test_interim_commentary_is_not_marked_already_streamed_when_stream_callback_fails(monkeypatch):
agent = _build_agent(monkeypatch)
observed = {}
def failing_callback(_text):
raise RuntimeError("display failed")
agent.stream_delta_callback = failing_callback
agent._fire_stream_delta("short version: yes")
agent.interim_assistant_callback = lambda text, *, already_streamed=False: observed.update(
{"text": text, "already_streamed": already_streamed}
)
agent._emit_interim_assistant_message({"role": "assistant", "content": "short version: yes"})
assert observed == {
"text": "short version: yes",
"already_streamed": False,
}
def test_run_conversation_codex_continues_after_commentary_phase_message(monkeypatch):
agent = _build_agent(monkeypatch)
responses = [

View file

@ -185,6 +185,38 @@ def test_migrator_optionally_imports_supported_secrets_and_messaging_settings(tm
assert "TELEGRAM_BOT_TOKEN=123:abc" in env_text
def test_messaging_cwd_skipped_when_inside_source(tmp_path: Path):
"""MESSAGING_CWD pointing inside the OpenClaw source dir should be skipped."""
mod = load_module()
source = tmp_path / ".openclaw"
target = tmp_path / ".hermes"
target.mkdir()
# Workspace path is inside the source directory
ws_path = str(source / "workspace")
(source / "credentials").mkdir(parents=True)
(source / "openclaw.json").write_text(
json.dumps({"agents": {"defaults": {"workspace": ws_path}}}),
encoding="utf-8",
)
migrator = mod.Migrator(
source_root=source,
target_root=target,
execute=True,
workspace_target=None,
overwrite=False,
migrate_secrets=True,
output_dir=target / "migration-report",
selected_options={"messaging-settings"},
)
migrator.migrate()
env_path = target / ".env"
if env_path.exists():
assert "MESSAGING_CWD" not in env_path.read_text(encoding="utf-8")
def test_migrator_can_execute_only_selected_categories(tmp_path: Path):
mod = load_module()
source = tmp_path / ".openclaw"
@ -722,3 +754,98 @@ def test_skill_installs_cleanly_under_skills_guard():
KNOWN_FALSE_POSITIVES = {"agent_config_mod", "python_os_environ", "hermes_config_mod"}
for f in result.findings:
assert f.pattern_id in KNOWN_FALSE_POSITIVES, f"Unexpected finding: {f}"
# ── rebrand_text tests ────────────────────────────────────────
def test_rebrand_text_replaces_openclaw_variants():
mod = load_module()
assert mod.rebrand_text("OpenClaw prefers Python 3.11") == "Hermes prefers Python 3.11"
assert mod.rebrand_text("I told Open Claw to use dark mode") == "I told Hermes to use dark mode"
assert mod.rebrand_text("Open-Claw config is great") == "Hermes config is great"
assert mod.rebrand_text("openclaw should always respond concisely") == "Hermes should always respond concisely"
assert mod.rebrand_text("OPENCLAW uses tools well") == "Hermes uses tools well"
def test_rebrand_text_replaces_legacy_bot_names():
mod = load_module()
assert mod.rebrand_text("ClawdBot remembers my timezone") == "Hermes remembers my timezone"
assert mod.rebrand_text("clawdbot prefers tabs") == "Hermes prefers tabs"
assert mod.rebrand_text("MoltBot was configured for Spanish") == "Hermes was configured for Spanish"
assert mod.rebrand_text("moltbot uses Python") == "Hermes uses Python"
def test_rebrand_text_preserves_unrelated_content():
mod = load_module()
text = "User prefers dark mode and lives in Las Vegas"
assert mod.rebrand_text(text) == text
def test_rebrand_text_handles_multiple_replacements():
mod = load_module()
text = "OpenClaw said to ask ClawdBot about MoltBot settings"
assert mod.rebrand_text(text) == "Hermes said to ask Hermes about Hermes settings"
def test_migrate_memory_rebrands_entries(tmp_path):
mod = load_module()
source_root = tmp_path / "openclaw"
source_root.mkdir()
workspace = source_root / "workspace"
workspace.mkdir()
memory_md = workspace / "MEMORY.md"
memory_md.write_text(
"# Memory\n\n- OpenClaw should use Python 3.11\n- ClawdBot prefers dark mode\n",
encoding="utf-8",
)
target_root = tmp_path / "hermes"
target_root.mkdir()
(target_root / "memories").mkdir()
migrator = mod.Migrator(
source_root=source_root,
target_root=target_root,
execute=True,
workspace_target=None,
overwrite=False,
migrate_secrets=False,
output_dir=tmp_path / "report",
selected_options={"memory"},
)
migrator.migrate()
result = (target_root / "memories" / "MEMORY.md").read_text(encoding="utf-8")
assert "OpenClaw" not in result
assert "ClawdBot" not in result
assert "Hermes" in result
def test_migrate_soul_rebrands_content(tmp_path):
mod = load_module()
source_root = tmp_path / "openclaw"
source_root.mkdir()
workspace = source_root / "workspace"
workspace.mkdir()
soul_md = workspace / "SOUL.md"
soul_md.write_text("You are OpenClaw, an AI assistant made by SparkLab.", encoding="utf-8")
target_root = tmp_path / "hermes"
target_root.mkdir()
migrator = mod.Migrator(
source_root=source_root,
target_root=target_root,
execute=True,
workspace_target=None,
overwrite=False,
migrate_secrets=False,
output_dir=tmp_path / "report",
selected_options={"soul"},
)
migrator.migrate()
result = (target_root / "SOUL.md").read_text(encoding="utf-8")
assert "OpenClaw" not in result
assert "You are Hermes" in result

View file

@ -0,0 +1,120 @@
"""Tests for empty model fallback — when provider is configured but model is missing."""
from unittest.mock import MagicMock, patch
import pytest
class TestGetDefaultModelForProvider:
"""Unit tests for hermes_cli.models.get_default_model_for_provider."""
def test_known_provider_returns_first_model(self):
from hermes_cli.models import get_default_model_for_provider
result = get_default_model_for_provider("openai-codex")
# Should return first model from _PROVIDER_MODELS["openai-codex"]
assert result
assert isinstance(result, str)
def test_openrouter_returns_empty(self):
"""OpenRouter uses dynamic model fetch, no static catalog entry."""
from hermes_cli.models import get_default_model_for_provider
# OpenRouter is not in _PROVIDER_MODELS — it uses live fetching
result = get_default_model_for_provider("openrouter")
assert result == ""
def test_unknown_provider_returns_empty(self):
from hermes_cli.models import get_default_model_for_provider
assert get_default_model_for_provider("nonexistent-provider") == ""
def test_custom_provider_returns_empty(self):
"""Custom provider has no model catalog — should return empty."""
from hermes_cli.models import get_default_model_for_provider
# Custom providers don't have entries in _PROVIDER_MODELS
assert get_default_model_for_provider("some-random-custom") == ""
class TestGatewayEmptyModelFallback:
"""Test that _resolve_session_agent_runtime fills in empty model from provider catalog."""
def test_empty_model_filled_from_provider(self):
"""When config has no model but provider is openai-codex, use first codex model."""
from gateway.run import GatewayRunner
runner = object.__new__(GatewayRunner)
runner._session_model_overrides = {}
# Mock _resolve_gateway_model to return empty string
# Mock _resolve_runtime_agent_kwargs to return openai-codex provider
with patch("gateway.run._resolve_gateway_model", return_value=""), \
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
"provider": "openai-codex",
"api_key": "test-key",
"base_url": "https://chatgpt.com/backend-api/codex",
"api_mode": "codex_responses",
}):
model, kwargs = runner._resolve_session_agent_runtime()
# Model should have been filled in from provider catalog
assert model, "Model should not be empty when provider is known"
assert isinstance(model, str)
assert kwargs["provider"] == "openai-codex"
def test_nonempty_model_not_overridden(self):
"""When config has a model set, don't override it."""
from gateway.run import GatewayRunner
runner = object.__new__(GatewayRunner)
runner._session_model_overrides = {}
with patch("gateway.run._resolve_gateway_model", return_value="gpt-5.4"), \
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
"provider": "openai-codex",
"api_key": "test-key",
"base_url": "https://chatgpt.com/backend-api/codex",
"api_mode": "codex_responses",
}):
model, kwargs = runner._resolve_session_agent_runtime()
assert model == "gpt-5.4", "Explicit model should not be overridden"
def test_empty_model_no_provider_stays_empty(self):
"""When both model and provider are empty, model stays empty."""
from gateway.run import GatewayRunner
runner = object.__new__(GatewayRunner)
runner._session_model_overrides = {}
with patch("gateway.run._resolve_gateway_model", return_value=""), \
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
"provider": "",
"api_key": "test-key",
"base_url": "https://example.com",
"api_mode": "chat_completions",
}):
model, kwargs = runner._resolve_session_agent_runtime()
# Can't fill in a default without knowing the provider
assert model == ""
class TestResolveGatewayModel:
"""Test _resolve_gateway_model reads model from config correctly."""
def test_returns_default_key(self):
from gateway.run import _resolve_gateway_model
assert _resolve_gateway_model({"model": {"default": "gpt-5.4"}}) == "gpt-5.4"
def test_returns_model_key_fallback(self):
from gateway.run import _resolve_gateway_model
assert _resolve_gateway_model({"model": {"model": "gpt-5.4"}}) == "gpt-5.4"
def test_returns_empty_when_missing(self):
from gateway.run import _resolve_gateway_model
assert _resolve_gateway_model({"model": {}}) == ""
def test_returns_empty_when_no_model_section(self):
from gateway.run import _resolve_gateway_model
assert _resolve_gateway_model({}) == ""
def test_string_model_config(self):
from gateway.run import _resolve_gateway_model
assert _resolve_gateway_model({"model": "my-model"}) == "my-model"

View file

@ -3,6 +3,7 @@
import logging
import os
import stat
import threading
from logging.handlers import RotatingFileHandler
from pathlib import Path
from unittest.mock import patch
@ -34,6 +35,8 @@ def _reset_logging_state():
h.close()
else:
pre_existing.append(h)
# Ensure the record factory is installed (it's idempotent).
hermes_logging._install_session_record_factory()
yield
# Restore — remove any handlers added during the test.
for h in list(root.handlers):
@ -41,6 +44,7 @@ def _reset_logging_state():
root.removeHandler(h)
h.close()
hermes_logging._logging_initialized = False
hermes_logging.clear_session_context()
@pytest.fixture
@ -220,6 +224,294 @@ class TestSetupLogging:
]
assert agent_handlers[0].level == logging.WARNING
def test_record_factory_installed(self, hermes_home):
"""The custom record factory injects session_tag on all records."""
hermes_logging.setup_logging(hermes_home=hermes_home)
factory = logging.getLogRecordFactory()
assert getattr(factory, "_hermes_session_injector", False), (
"Record factory should have _hermes_session_injector marker"
)
# Verify session_tag exists on a fresh record
record = factory("test", logging.INFO, "", 0, "msg", (), None)
assert hasattr(record, "session_tag")
class TestGatewayMode:
"""setup_logging(mode='gateway') creates a filtered gateway.log."""
def test_gateway_log_created(self, hermes_home):
hermes_logging.setup_logging(hermes_home=hermes_home, mode="gateway")
root = logging.getLogger()
gw_handlers = [
h for h in root.handlers
if isinstance(h, RotatingFileHandler)
and "gateway.log" in getattr(h, "baseFilename", "")
]
assert len(gw_handlers) == 1
def test_gateway_log_not_created_in_cli_mode(self, hermes_home):
hermes_logging.setup_logging(hermes_home=hermes_home, mode="cli")
root = logging.getLogger()
gw_handlers = [
h for h in root.handlers
if isinstance(h, RotatingFileHandler)
and "gateway.log" in getattr(h, "baseFilename", "")
]
assert len(gw_handlers) == 0
def test_gateway_log_receives_gateway_records(self, hermes_home):
"""gateway.log captures records from gateway.* loggers."""
hermes_logging.setup_logging(hermes_home=hermes_home, mode="gateway")
gw_logger = logging.getLogger("gateway.platforms.telegram")
gw_logger.info("telegram connected")
for h in logging.getLogger().handlers:
h.flush()
gw_log = hermes_home / "logs" / "gateway.log"
assert gw_log.exists()
assert "telegram connected" in gw_log.read_text()
def test_gateway_log_rejects_non_gateway_records(self, hermes_home):
"""gateway.log does NOT capture records from tools.*, agent.*, etc."""
hermes_logging.setup_logging(hermes_home=hermes_home, mode="gateway")
tool_logger = logging.getLogger("tools.terminal_tool")
tool_logger.info("running command")
agent_logger = logging.getLogger("agent.context_compressor")
agent_logger.info("compressing context")
for h in logging.getLogger().handlers:
h.flush()
gw_log = hermes_home / "logs" / "gateway.log"
if gw_log.exists():
content = gw_log.read_text()
assert "running command" not in content
assert "compressing context" not in content
def test_agent_log_still_receives_all(self, hermes_home):
"""agent.log (catch-all) still receives gateway AND tool records."""
hermes_logging.setup_logging(hermes_home=hermes_home, mode="gateway")
logging.getLogger("gateway.run").info("gateway msg")
logging.getLogger("tools.file_tools").info("file msg")
for h in logging.getLogger().handlers:
h.flush()
agent_log = hermes_home / "logs" / "agent.log"
content = agent_log.read_text()
assert "gateway msg" in content
assert "file msg" in content
class TestSessionContext:
"""set_session_context / clear_session_context + _SessionFilter."""
def test_session_tag_in_log_output(self, hermes_home):
"""When session context is set, log lines include [session_id]."""
hermes_logging.setup_logging(hermes_home=hermes_home)
hermes_logging.set_session_context("abc123")
test_logger = logging.getLogger("test.session_tag")
test_logger.info("tagged message")
for h in logging.getLogger().handlers:
h.flush()
agent_log = hermes_home / "logs" / "agent.log"
content = agent_log.read_text()
assert "[abc123]" in content
assert "tagged message" in content
def test_no_session_tag_without_context(self, hermes_home):
"""Without session context, log lines have no session tag."""
hermes_logging.setup_logging(hermes_home=hermes_home)
hermes_logging.clear_session_context()
test_logger = logging.getLogger("test.no_session")
test_logger.info("untagged message")
for h in logging.getLogger().handlers:
h.flush()
agent_log = hermes_home / "logs" / "agent.log"
content = agent_log.read_text()
assert "untagged message" in content
# Should not have any [xxx] session tag
import re
for line in content.splitlines():
if "untagged message" in line:
assert not re.search(r"\[.+?\]", line.split("INFO")[1].split("test.no_session")[0])
def test_clear_session_context(self, hermes_home):
"""After clearing, session tag disappears."""
hermes_logging.setup_logging(hermes_home=hermes_home)
hermes_logging.set_session_context("xyz789")
hermes_logging.clear_session_context()
test_logger = logging.getLogger("test.cleared")
test_logger.info("after clear")
for h in logging.getLogger().handlers:
h.flush()
agent_log = hermes_home / "logs" / "agent.log"
content = agent_log.read_text()
assert "[xyz789]" not in content
def test_session_context_thread_isolated(self, hermes_home):
"""Session context is per-thread — one thread's context doesn't leak."""
hermes_logging.setup_logging(hermes_home=hermes_home)
results = {}
def thread_a():
hermes_logging.set_session_context("thread_a_session")
logging.getLogger("test.thread_a").info("from thread A")
for h in logging.getLogger().handlers:
h.flush()
def thread_b():
hermes_logging.set_session_context("thread_b_session")
logging.getLogger("test.thread_b").info("from thread B")
for h in logging.getLogger().handlers:
h.flush()
ta = threading.Thread(target=thread_a)
tb = threading.Thread(target=thread_b)
ta.start()
ta.join()
tb.start()
tb.join()
agent_log = hermes_home / "logs" / "agent.log"
content = agent_log.read_text()
# Each thread's message should have its own session tag
for line in content.splitlines():
if "from thread A" in line:
assert "[thread_a_session]" in line
assert "[thread_b_session]" not in line
if "from thread B" in line:
assert "[thread_b_session]" in line
assert "[thread_a_session]" not in line
class TestRecordFactory:
"""Unit tests for the custom LogRecord factory."""
def test_record_has_session_tag(self):
"""Every record gets a session_tag attribute."""
factory = logging.getLogRecordFactory()
record = factory("test", logging.INFO, "", 0, "msg", (), None)
assert hasattr(record, "session_tag")
def test_empty_tag_without_context(self):
hermes_logging.clear_session_context()
factory = logging.getLogRecordFactory()
record = factory("test", logging.INFO, "", 0, "msg", (), None)
assert record.session_tag == ""
def test_tag_with_context(self):
hermes_logging.set_session_context("sess_42")
factory = logging.getLogRecordFactory()
record = factory("test", logging.INFO, "", 0, "msg", (), None)
assert record.session_tag == " [sess_42]"
def test_idempotent_install(self):
"""Calling _install_session_record_factory() twice doesn't double-wrap."""
hermes_logging._install_session_record_factory()
factory_a = logging.getLogRecordFactory()
hermes_logging._install_session_record_factory()
factory_b = logging.getLogRecordFactory()
assert factory_a is factory_b
def test_works_with_any_handler(self):
"""A handler using %(session_tag)s works even without _SessionFilter."""
hermes_logging.set_session_context("any_handler_test")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(session_tag)s %(message)s"))
logger = logging.getLogger("_test_any_handler")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
try:
# Should not raise KeyError
logger.info("hello")
finally:
logger.removeHandler(handler)
class TestComponentFilter:
"""Unit tests for _ComponentFilter."""
def test_passes_matching_prefix(self):
f = hermes_logging._ComponentFilter(("gateway",))
record = logging.LogRecord(
"gateway.run", logging.INFO, "", 0, "msg", (), None
)
assert f.filter(record) is True
def test_passes_nested_matching_prefix(self):
f = hermes_logging._ComponentFilter(("gateway",))
record = logging.LogRecord(
"gateway.platforms.telegram", logging.INFO, "", 0, "msg", (), None
)
assert f.filter(record) is True
def test_blocks_non_matching(self):
f = hermes_logging._ComponentFilter(("gateway",))
record = logging.LogRecord(
"tools.terminal_tool", logging.INFO, "", 0, "msg", (), None
)
assert f.filter(record) is False
def test_multiple_prefixes(self):
f = hermes_logging._ComponentFilter(("agent", "run_agent", "model_tools"))
assert f.filter(logging.LogRecord(
"agent.compressor", logging.INFO, "", 0, "", (), None
))
assert f.filter(logging.LogRecord(
"run_agent", logging.INFO, "", 0, "", (), None
))
assert f.filter(logging.LogRecord(
"model_tools", logging.INFO, "", 0, "", (), None
))
assert not f.filter(logging.LogRecord(
"tools.browser", logging.INFO, "", 0, "", (), None
))
class TestComponentPrefixes:
"""COMPONENT_PREFIXES covers the expected components."""
def test_gateway_prefix(self):
assert "gateway" in hermes_logging.COMPONENT_PREFIXES
assert ("gateway",) == hermes_logging.COMPONENT_PREFIXES["gateway"]
def test_agent_prefix(self):
prefixes = hermes_logging.COMPONENT_PREFIXES["agent"]
assert "agent" in prefixes
assert "run_agent" in prefixes
assert "model_tools" in prefixes
def test_tools_prefix(self):
assert ("tools",) == hermes_logging.COMPONENT_PREFIXES["tools"]
def test_cli_prefix(self):
prefixes = hermes_logging.COMPONENT_PREFIXES["cli"]
assert "hermes_cli" in prefixes
assert "cli" in prefixes
def test_cron_prefix(self):
assert ("cron",) == hermes_logging.COMPONENT_PREFIXES["cron"]
class TestSetupVerboseLogging:
"""setup_verbose_logging() adds a DEBUG-level console handler."""
@ -301,6 +593,59 @@ class TestAddRotatingHandler:
logger.removeHandler(h)
h.close()
def test_log_filter_attached(self, tmp_path):
"""Optional log_filter is attached to the handler."""
log_path = tmp_path / "filtered.log"
logger = logging.getLogger("_test_rotating_filter")
formatter = logging.Formatter("%(message)s")
component_filter = hermes_logging._ComponentFilter(("test",))
hermes_logging._add_rotating_handler(
logger, log_path,
level=logging.INFO, max_bytes=1024, backup_count=1,
formatter=formatter,
log_filter=component_filter,
)
handlers = [h for h in logger.handlers if isinstance(h, RotatingFileHandler)]
assert len(handlers) == 1
assert component_filter in handlers[0].filters
# Clean up
for h in list(logger.handlers):
if isinstance(h, RotatingFileHandler):
logger.removeHandler(h)
h.close()
def test_no_session_filter_on_handler(self, tmp_path):
"""Handlers rely on record factory, not per-handler _SessionFilter."""
log_path = tmp_path / "no_session_filter.log"
logger = logging.getLogger("_test_no_session_filter")
formatter = logging.Formatter("%(session_tag)s%(message)s")
hermes_logging._add_rotating_handler(
logger, log_path,
level=logging.INFO, max_bytes=1024, backup_count=1,
formatter=formatter,
)
handlers = [h for h in logger.handlers if isinstance(h, RotatingFileHandler)]
assert len(handlers) == 1
# No _SessionFilter on the handler — record factory handles it
assert len(handlers[0].filters) == 0
# But session_tag still works (via record factory)
hermes_logging.set_session_context("factory_test")
logger.info("test msg")
handlers[0].flush()
content = log_path.read_text()
assert "[factory_test]" in content
# Clean up
for h in list(logger.handlers):
if isinstance(h, RotatingFileHandler):
logger.removeHandler(h)
h.close()
def test_managed_mode_initial_open_sets_group_writable(self, tmp_path):
log_path = tmp_path / "managed-open.log"
logger = logging.getLogger("_test_rotating_managed_open")

View file

@ -0,0 +1,114 @@
"""Tests for network.force_ipv4 — the socket.getaddrinfo monkey-patch."""
import importlib
import socket
from unittest.mock import patch, MagicMock
import pytest
def _reload_constants():
"""Reload hermes_constants to get a fresh apply_ipv4_preference."""
import hermes_constants
importlib.reload(hermes_constants)
return hermes_constants
class TestApplyIPv4Preference:
"""Tests for apply_ipv4_preference()."""
def setup_method(self):
"""Save the original getaddrinfo before each test."""
self._original = socket.getaddrinfo
def teardown_method(self):
"""Restore the original getaddrinfo after each test."""
socket.getaddrinfo = self._original
def test_noop_when_force_false(self):
"""No patch when force=False."""
from hermes_constants import apply_ipv4_preference
original = socket.getaddrinfo
apply_ipv4_preference(force=False)
assert socket.getaddrinfo is original
def test_patches_getaddrinfo_when_forced(self):
"""Patches socket.getaddrinfo when force=True."""
from hermes_constants import apply_ipv4_preference
original = socket.getaddrinfo
apply_ipv4_preference(force=True)
assert socket.getaddrinfo is not original
assert getattr(socket.getaddrinfo, "_hermes_ipv4_patched", False) is True
def test_double_patch_is_safe(self):
"""Calling apply twice doesn't double-wrap."""
from hermes_constants import apply_ipv4_preference
apply_ipv4_preference(force=True)
first_patch = socket.getaddrinfo
apply_ipv4_preference(force=True)
assert socket.getaddrinfo is first_patch
def test_af_unspec_becomes_af_inet(self):
"""AF_UNSPEC (default) calls get rewritten to AF_INET."""
from hermes_constants import apply_ipv4_preference
calls = []
original = socket.getaddrinfo
def mock_getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
calls.append(family)
return [(socket.AF_INET, socket.SOCK_STREAM, 6, "", ("93.184.216.34", 80))]
socket.getaddrinfo = mock_getaddrinfo
apply_ipv4_preference(force=True)
# Call with default family (AF_UNSPEC = 0)
socket.getaddrinfo("example.com", 80)
assert calls[-1] == socket.AF_INET, "AF_UNSPEC should be rewritten to AF_INET"
def test_explicit_family_preserved(self):
"""Explicit AF_INET6 requests are not intercepted."""
from hermes_constants import apply_ipv4_preference
calls = []
original = socket.getaddrinfo
def mock_getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
calls.append(family)
return [(family, socket.SOCK_STREAM, 6, "", ("::1", 80))]
socket.getaddrinfo = mock_getaddrinfo
apply_ipv4_preference(force=True)
socket.getaddrinfo("example.com", 80, family=socket.AF_INET6)
assert calls[-1] == socket.AF_INET6, "Explicit AF_INET6 should pass through"
def test_fallback_on_gaierror(self):
"""Falls back to AF_UNSPEC if AF_INET resolution fails."""
from hermes_constants import apply_ipv4_preference
call_families = []
def mock_getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
call_families.append(family)
if family == socket.AF_INET:
raise socket.gaierror("No A record")
# AF_UNSPEC fallback returns IPv6
return [(socket.AF_INET6, socket.SOCK_STREAM, 6, "", ("::1", 80))]
socket.getaddrinfo = mock_getaddrinfo
apply_ipv4_preference(force=True)
result = socket.getaddrinfo("ipv6only.example.com", 80)
# Should have tried AF_INET first, then fallen back to AF_UNSPEC
assert call_families == [socket.AF_INET, 0]
assert result[0][0] == socket.AF_INET6
class TestConfigDefault:
"""Verify network section exists in DEFAULT_CONFIG."""
def test_network_section_in_default_config(self):
from hermes_cli.config import DEFAULT_CONFIG
assert "network" in DEFAULT_CONFIG
assert DEFAULT_CONFIG["network"]["force_ipv4"] is False

View file

@ -59,8 +59,9 @@ class TestCamofoxConfigDefaults:
browser_cfg = DEFAULT_CONFIG["browser"]
assert browser_cfg["camofox"]["managed_persistence"] is False
def test_config_version_unchanged(self):
def test_config_version_matches_current_schema(self):
from hermes_cli.config import DEFAULT_CONFIG
# managed_persistence is auto-merged by _deep_merge, no version bump needed
assert DEFAULT_CONFIG["_config_version"] == 13
# The current schema version is tracked globally; unrelated default
# options may bump it after browser defaults are added.
assert DEFAULT_CONFIG["_config_version"] == 15

View file

@ -380,7 +380,7 @@ class TestStubSchemaDrift(unittest.TestCase):
# Parameters that are internal (injected by the handler, not user-facing)
_INTERNAL_PARAMS = {"task_id", "user_task"}
# Parameters intentionally blocked in the sandbox
_BLOCKED_TERMINAL_PARAMS = {"background", "check_interval", "pty", "notify_on_complete"}
_BLOCKED_TERMINAL_PARAMS = {"background", "pty", "notify_on_complete"}
def test_stubs_cover_all_schema_params(self):
"""Every user-facing parameter in the real schema must appear in the

View file

@ -0,0 +1,295 @@
"""Tests for Modal bulk upload via tar/base64 archive."""
import asyncio
import base64
import io
import tarfile
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from tools.environments import modal as modal_env
def _make_mock_modal_env(monkeypatch, tmp_path):
"""Create a minimal mock ModalEnvironment for testing upload methods.
Returns a ModalEnvironment-like object with _sandbox and _worker mocked.
We don't call __init__ because it requires the Modal SDK.
"""
env = object.__new__(modal_env.ModalEnvironment)
env._sandbox = MagicMock()
env._worker = MagicMock()
env._persistent = False
env._task_id = "test"
env._sync_manager = None
return env
def _make_mock_stdin():
"""Create a mock stdin that captures written data."""
stdin = MagicMock()
written_chunks = []
def mock_write(data):
written_chunks.append(data)
stdin.write = mock_write
stdin.write_eof = MagicMock()
stdin.drain = MagicMock()
stdin.drain.aio = AsyncMock()
stdin._written_chunks = written_chunks
return stdin
def _wire_async_exec(env, exec_calls=None):
"""Wire mock sandbox.exec.aio and a real run_coroutine on the env.
Optionally captures exec call args into *exec_calls* list.
Returns (exec_calls, run_kwargs, stdin_mock).
"""
if exec_calls is None:
exec_calls = []
run_kwargs: dict = {}
stdin_mock = _make_mock_stdin()
async def mock_exec_fn(*args, **kwargs):
exec_calls.append(args)
proc = MagicMock()
proc.wait = MagicMock()
proc.wait.aio = AsyncMock(return_value=0)
proc.stdin = stdin_mock
proc.stderr = MagicMock()
proc.stderr.read = MagicMock()
proc.stderr.read.aio = AsyncMock(return_value="")
return proc
env._sandbox.exec = MagicMock()
env._sandbox.exec.aio = mock_exec_fn
def real_run_coroutine(coro, **kwargs):
run_kwargs.update(kwargs)
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(coro)
finally:
loop.close()
env._worker.run_coroutine = real_run_coroutine
return exec_calls, run_kwargs, stdin_mock
class TestModalBulkUpload:
"""Test _modal_bulk_upload method."""
def test_empty_files_is_noop(self, monkeypatch, tmp_path):
"""Empty file list should not call worker.run_coroutine."""
env = _make_mock_modal_env(monkeypatch, tmp_path)
env._modal_bulk_upload([])
env._worker.run_coroutine.assert_not_called()
def test_tar_archive_contains_all_files(self, monkeypatch, tmp_path):
"""The tar archive sent via stdin should contain all files."""
env = _make_mock_modal_env(monkeypatch, tmp_path)
src_a = tmp_path / "a.json"
src_b = tmp_path / "b.py"
src_a.write_text("cred_content")
src_b.write_text("skill_content")
files = [
(str(src_a), "/root/.hermes/credentials/a.json"),
(str(src_b), "/root/.hermes/skills/b.py"),
]
exec_calls, _, stdin_mock = _wire_async_exec(env)
env._modal_bulk_upload(files)
# Verify the command reads from stdin (no echo with embedded payload)
assert len(exec_calls) == 1
args = exec_calls[0]
assert args[0] == "bash"
assert args[1] == "-c"
cmd = args[2]
assert "mkdir -p" in cmd
assert "base64 -d" in cmd
assert "tar xzf" in cmd
assert "-C /" in cmd
# Reassemble the base64 payload from stdin chunks and verify tar contents
payload = "".join(stdin_mock._written_chunks)
tar_data = base64.b64decode(payload)
buf = io.BytesIO(tar_data)
with tarfile.open(fileobj=buf, mode="r:gz") as tar:
names = sorted(tar.getnames())
assert "root/.hermes/credentials/a.json" in names
assert "root/.hermes/skills/b.py" in names
# Verify content
a_content = tar.extractfile("root/.hermes/credentials/a.json").read()
assert a_content == b"cred_content"
b_content = tar.extractfile("root/.hermes/skills/b.py").read()
assert b_content == b"skill_content"
# Verify stdin was closed
stdin_mock.write_eof.assert_called_once()
def test_mkdir_includes_all_parents(self, monkeypatch, tmp_path):
"""Remote parent directories should be pre-created in the command."""
env = _make_mock_modal_env(monkeypatch, tmp_path)
src = tmp_path / "f.txt"
src.write_text("data")
files = [
(str(src), "/root/.hermes/credentials/f.txt"),
(str(src), "/root/.hermes/skills/deep/nested/f.txt"),
]
exec_calls, _, _ = _wire_async_exec(env)
env._modal_bulk_upload(files)
cmd = exec_calls[0][2]
assert "/root/.hermes/credentials" in cmd
assert "/root/.hermes/skills/deep/nested" in cmd
def test_single_exec_call(self, monkeypatch, tmp_path):
"""Bulk upload should use exactly one exec call regardless of file count."""
env = _make_mock_modal_env(monkeypatch, tmp_path)
files = []
for i in range(20):
src = tmp_path / f"file_{i}.txt"
src.write_text(f"content_{i}")
files.append((str(src), f"/root/.hermes/cache/file_{i}.txt"))
exec_calls, _, _ = _wire_async_exec(env)
env._modal_bulk_upload(files)
# Should be exactly 1 exec call, not 20
assert len(exec_calls) == 1
def test_bulk_upload_wired_in_filesyncmanager(self, monkeypatch):
"""Verify ModalEnvironment passes bulk_upload_fn to FileSyncManager."""
captured_kwargs = {}
def capture_fsm(**kwargs):
captured_kwargs.update(kwargs)
return type("M", (), {"sync": lambda self, **k: None})()
monkeypatch.setattr(modal_env, "FileSyncManager", capture_fsm)
# Create a minimal env without full __init__
env = object.__new__(modal_env.ModalEnvironment)
env._sandbox = MagicMock()
env._worker = MagicMock()
env._persistent = False
env._task_id = "test"
# Manually call the part of __init__ that wires FileSyncManager
from tools.environments.file_sync import iter_sync_files
env._sync_manager = modal_env.FileSyncManager(
get_files_fn=lambda: iter_sync_files("/root/.hermes"),
upload_fn=env._modal_upload,
delete_fn=env._modal_delete,
bulk_upload_fn=env._modal_bulk_upload,
)
assert "bulk_upload_fn" in captured_kwargs
assert captured_kwargs["bulk_upload_fn"] is not None
assert callable(captured_kwargs["bulk_upload_fn"])
def test_timeout_set_to_120(self, monkeypatch, tmp_path):
"""Bulk upload uses a 120s timeout (not the per-file 15s)."""
env = _make_mock_modal_env(monkeypatch, tmp_path)
src = tmp_path / "f.txt"
src.write_text("data")
files = [(str(src), "/root/.hermes/f.txt")]
_, run_kwargs, _ = _wire_async_exec(env)
env._modal_bulk_upload(files)
assert run_kwargs.get("timeout") == 120
def test_nonzero_exit_raises(self, monkeypatch, tmp_path):
"""Non-zero exit code from remote exec should raise RuntimeError."""
env = _make_mock_modal_env(monkeypatch, tmp_path)
src = tmp_path / "f.txt"
src.write_text("data")
files = [(str(src), "/root/.hermes/f.txt")]
stdin_mock = _make_mock_stdin()
async def mock_exec_fn(*args, **kwargs):
proc = MagicMock()
proc.wait = MagicMock()
proc.wait.aio = AsyncMock(return_value=1) # non-zero exit
proc.stdin = stdin_mock
proc.stderr = MagicMock()
proc.stderr.read = MagicMock()
proc.stderr.read.aio = AsyncMock(return_value="tar: error")
return proc
env._sandbox.exec = MagicMock()
env._sandbox.exec.aio = mock_exec_fn
def real_run_coroutine(coro, **kwargs):
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(coro)
finally:
loop.close()
env._worker.run_coroutine = real_run_coroutine
with pytest.raises(RuntimeError, match="Modal bulk upload failed"):
env._modal_bulk_upload(files)
def test_payload_not_in_command_string(self, monkeypatch, tmp_path):
"""The base64 payload must NOT appear in the bash -c argument.
This is the core ARG_MAX fix: the payload goes through stdin,
not embedded in the command string.
"""
env = _make_mock_modal_env(monkeypatch, tmp_path)
src = tmp_path / "f.txt"
src.write_text("some data to upload")
files = [(str(src), "/root/.hermes/f.txt")]
exec_calls, _, stdin_mock = _wire_async_exec(env)
env._modal_bulk_upload(files)
# The command should NOT contain an echo with the payload
cmd = exec_calls[0][2]
assert "echo" not in cmd
# The payload should go through stdin
assert len(stdin_mock._written_chunks) > 0
def test_stdin_chunked_for_large_payloads(self, monkeypatch, tmp_path):
"""Payloads larger than _STDIN_CHUNK_SIZE should be split into multiple writes."""
env = _make_mock_modal_env(monkeypatch, tmp_path)
# Use random bytes so gzip cannot compress them -- ensures the
# base64 payload exceeds one 1 MB chunk.
import os as _os
src = tmp_path / "large.bin"
src.write_bytes(_os.urandom(1024 * 1024 + 512 * 1024))
files = [(str(src), "/root/.hermes/large.bin")]
exec_calls, _, stdin_mock = _wire_async_exec(env)
env._modal_bulk_upload(files)
# Should have multiple stdin write chunks
assert len(stdin_mock._written_chunks) >= 2
# Reassembled payload should still decode to valid tar
payload = "".join(stdin_mock._written_chunks)
tar_data = base64.b64decode(payload)
buf = io.BytesIO(tar_data)
with tarfile.open(fileobj=buf, mode="r:gz") as tar:
names = tar.getnames()
assert "root/.hermes/large.bin" in names

Some files were not shown because too many files have changed in this diff Show more