Merge branch 'feat/ink-refactor' of github.com:NousResearch/hermes-agent into feat/ink-refactor

This commit is contained in:
Brooklyn Nicholson 2026-04-09 19:08:52 -05:00
commit aca479c1ae
110 changed files with 6295 additions and 3193 deletions

View file

@ -1,5 +1,8 @@
FROM debian:13.4
# Disable Python stdout buffering to ensure logs are printed immediately
ENV PYTHONUNBUFFERED=1
# Install system dependencies in one layer, clear APT cache
RUN apt-get update && \
apt-get install -y --no-install-recommends \

View file

@ -33,8 +33,10 @@ Use any model you want — [Nous Portal](https://portal.nousresearch.com), [Open
curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash
```
Works on Linux, macOS, and WSL2. The installer handles everything — Python, Node.js, dependencies, and the `hermes` command. No prerequisites except git.
Works on Linux, macOS, WSL2, and Android via Termux. The installer handles the platform-specific setup for you.
> **Android / Termux:** The tested manual path is documented in the [Termux guide](https://hermes-agent.nousresearch.com/docs/getting-started/termux). On Termux, Hermes installs a curated `.[termux]` extra because the full `.[all]` extra currently pulls Android-incompatible voice dependencies.
>
> **Windows:** Native Windows is not supported. Please install [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) and run the command above.
After installation:

View file

@ -1238,10 +1238,27 @@ def build_anthropic_kwargs(
) -> Dict[str, Any]:
"""Build kwargs for anthropic.messages.create().
When *max_tokens* is None, the model's native output limit is used
(e.g. 128K for Opus 4.6, 64K for Sonnet 4.6). If *context_length*
is provided, the effective limit is clamped so it doesn't exceed
the context window.
Naming note two distinct concepts, easily confused:
max_tokens = OUTPUT token cap for a single response.
Anthropic's API calls this "max_tokens" but it only
limits the *output*. Anthropic's own native SDK
renamed it "max_output_tokens" for clarity.
context_length = TOTAL context window (input tokens + output tokens).
The API enforces: input_tokens + max_tokens context_length.
Stored on the ContextCompressor; reduced on overflow errors.
When *max_tokens* is None the model's native output ceiling is used
(e.g. 128K for Opus 4.6, 64K for Sonnet 4.6).
When *context_length* is provided and the model's native output ceiling
exceeds it (e.g. a local endpoint with an 8K window), the output cap is
clamped to context_length 1. This only kicks in for unusually small
context windows; for full-size models the native output cap is always
smaller than the context window so no clamping happens.
NOTE: this clamping does not account for prompt size if the prompt is
large, Anthropic may still reject the request. The caller must detect
"max_tokens too large given prompt" errors and retry with a smaller cap
(see parse_available_output_tokens_from_error + _ephemeral_max_output_tokens).
When *is_oauth* is True, applies Claude Code compatibility transforms:
system prompt prefix, tool name prefixing, and prompt sanitization.
@ -1256,10 +1273,14 @@ def build_anthropic_kwargs(
anthropic_tools = convert_tools_to_anthropic(tools) if tools else []
model = normalize_model_name(model, preserve_dots=preserve_dots)
# effective_max_tokens = output cap for this call (≠ total context window)
effective_max_tokens = max_tokens or _get_anthropic_max_output(model)
# Clamp to context window if the user set a lower context_length
# (e.g. custom endpoint with limited capacity).
# Clamp output cap to fit inside the total context window.
# Only matters for small custom endpoints where context_length < native
# output ceiling. For standard Anthropic models context_length (e.g.
# 200K) is always larger than the output ceiling (e.g. 128K), so this
# branch is not taken.
if context_length and effective_max_tokens > context_length:
effective_max_tokens = max(context_length - 1, 1)

View file

@ -702,7 +702,7 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]:
logger.debug("Auxiliary text client: %s (%s) via pool", pconfig.name, model)
extra = {}
if "api.kimi.com" in base_url.lower():
extra["default_headers"] = {"User-Agent": "KimiCLI/1.0"}
extra["default_headers"] = {"User-Agent": "KimiCLI/1.3"}
elif "api.githubcopilot.com" in base_url.lower():
from hermes_cli.models import copilot_default_headers
@ -721,7 +721,7 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]:
logger.debug("Auxiliary text client: %s (%s)", pconfig.name, model)
extra = {}
if "api.kimi.com" in base_url.lower():
extra["default_headers"] = {"User-Agent": "KimiCLI/1.0"}
extra["default_headers"] = {"User-Agent": "KimiCLI/1.3"}
elif "api.githubcopilot.com" in base_url.lower():
from hermes_cli.models import copilot_default_headers
@ -1047,6 +1047,32 @@ def _is_payment_error(exc: Exception) -> bool:
return False
def _is_connection_error(exc: Exception) -> bool:
"""Detect connection/network errors that warrant provider fallback.
Returns True for errors indicating the provider endpoint is unreachable
(DNS failure, connection refused, TLS errors, timeouts). These are
distinct from API errors (4xx/5xx) which indicate the provider IS
reachable but returned an error.
"""
from openai import APIConnectionError, APITimeoutError
if isinstance(exc, (APIConnectionError, APITimeoutError)):
return True
# urllib3 / httpx / httpcore connection errors
err_type = type(exc).__name__
if any(kw in err_type for kw in ("Connection", "Timeout", "DNS", "SSL")):
return True
err_lower = str(exc).lower()
if any(kw in err_lower for kw in (
"connection refused", "name or service not known",
"no route to host", "network is unreachable",
"timed out", "connection reset",
)):
return True
return False
def _try_payment_fallback(
failed_provider: str,
task: str = None,
@ -1111,7 +1137,7 @@ def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]:
main_model = _read_main_model()
if (main_provider and main_model
and main_provider not in _AGGREGATOR_PROVIDERS
and main_provider not in ("auto", "custom", "")):
and main_provider not in ("auto", "")):
client, resolved = resolve_provider_client(main_provider, main_model)
if client is not None:
logger.info("Auxiliary auto-detect: using main provider %s (%s)",
@ -1169,7 +1195,7 @@ def _to_async_client(sync_client, model: str):
async_kwargs["default_headers"] = copilot_default_headers()
elif "api.kimi.com" in base_lower:
async_kwargs["default_headers"] = {"User-Agent": "KimiCLI/1.0"}
async_kwargs["default_headers"] = {"User-Agent": "KimiCLI/1.3"}
return AsyncOpenAI(**async_kwargs), model
@ -1289,7 +1315,13 @@ def resolve_provider_client(
)
return None, None
final_model = model or _read_main_model() or "gpt-4o-mini"
client = OpenAI(api_key=custom_key, base_url=custom_base)
extra = {}
if "api.kimi.com" in custom_base.lower():
extra["default_headers"] = {"User-Agent": "KimiCLI/1.3"}
elif "api.githubcopilot.com" in custom_base.lower():
from hermes_cli.models import copilot_default_headers
extra["default_headers"] = copilot_default_headers()
client = OpenAI(api_key=custom_key, base_url=custom_base, **extra)
return (_to_async_client(client, final_model) if async_mode
else (client, final_model))
# Try custom first, then codex, then API-key providers
@ -1368,7 +1400,7 @@ def resolve_provider_client(
# Provider-specific headers
headers = {}
if "api.kimi.com" in base_url.lower():
headers["User-Agent"] = "KimiCLI/1.0"
headers["User-Agent"] = "KimiCLI/1.3"
elif "api.githubcopilot.com" in base_url.lower():
from hermes_cli.models import copilot_default_headers
@ -2093,7 +2125,18 @@ def call_llm(
# try alternative providers instead of giving up. This handles the
# common case where a user runs out of OpenRouter credits but has
# Codex OAuth or another provider available.
if _is_payment_error(first_err):
#
# ── Connection error fallback ────────────────────────────────
# When a provider endpoint is unreachable (DNS failure, connection
# refused, timeout), try alternative providers. This handles stale
# Codex/OAuth tokens that authenticate but whose endpoint is down,
# and providers the user never configured that got picked up by
# the auto-detection chain.
should_fallback = _is_payment_error(first_err) or _is_connection_error(first_err)
if should_fallback:
reason = "payment error" if _is_payment_error(first_err) else "connection error"
logger.info("Auxiliary %s: %s on %s (%s), trying fallback",
task or "call", reason, resolved_provider, first_err)
fb_client, fb_model, fb_label = _try_payment_fallback(
resolved_provider, task)
if fb_client is not None:

View file

@ -691,33 +691,43 @@ Write only the summary body. Do not include any preamble or prefix."""
)
compressed.append(msg)
_merge_summary_into_tail = False
if summary:
last_head_role = messages[compress_start - 1].get("role", "user") if compress_start > 0 else "user"
first_tail_role = messages[compress_end].get("role", "user") if compress_end < n_messages else "user"
# Pick a role that avoids consecutive same-role with both neighbors.
# Priority: avoid colliding with head (already committed), then tail.
if last_head_role in ("assistant", "tool"):
summary_role = "user"
else:
summary_role = "assistant"
# If the chosen role collides with the tail AND flipping wouldn't
# collide with the head, flip it.
if summary_role == first_tail_role:
flipped = "assistant" if summary_role == "user" else "user"
if flipped != last_head_role:
summary_role = flipped
else:
# Both roles would create consecutive same-role messages
# (e.g. head=assistant, tail=user — neither role works).
# Merge the summary into the first tail message instead
# of inserting a standalone message that breaks alternation.
_merge_summary_into_tail = True
if not _merge_summary_into_tail:
compressed.append({"role": summary_role, "content": summary})
else:
# If LLM summary failed, insert a static fallback so the model
# knows context was lost rather than silently dropping everything.
if not summary:
if not self.quiet_mode:
logger.debug("No summary model available — middle turns dropped without summary")
logger.warning("Summary generation failed — inserting static fallback context marker")
n_dropped = compress_end - compress_start
summary = (
f"{SUMMARY_PREFIX}\n"
f"Summary generation was unavailable. {n_dropped} conversation turns were "
f"removed to free context space but could not be summarized. The removed "
f"turns contained earlier work in this session. Continue based on the "
f"recent messages below and the current state of any files or resources."
)
_merge_summary_into_tail = False
last_head_role = messages[compress_start - 1].get("role", "user") if compress_start > 0 else "user"
first_tail_role = messages[compress_end].get("role", "user") if compress_end < n_messages else "user"
# Pick a role that avoids consecutive same-role with both neighbors.
# Priority: avoid colliding with head (already committed), then tail.
if last_head_role in ("assistant", "tool"):
summary_role = "user"
else:
summary_role = "assistant"
# If the chosen role collides with the tail AND flipping wouldn't
# collide with the head, flip it.
if summary_role == first_tail_role:
flipped = "assistant" if summary_role == "user" else "user"
if flipped != last_head_role:
summary_role = flipped
else:
# Both roles would create consecutive same-role messages
# (e.g. head=assistant, tail=user — neither role works).
# Merge the summary into the first tail message instead
# of inserting a standalone message that breaks alternation.
_merge_summary_into_tail = True
if not _merge_summary_into_tail:
compressed.append({"role": summary_role, "content": summary})
for i in range(compress_end, n_messages):
msg = messages[i].copy()

View file

@ -18,12 +18,14 @@ import hermes_cli.auth as auth_mod
from hermes_cli.auth import (
CODEX_ACCESS_TOKEN_REFRESH_SKEW_SECONDS,
DEFAULT_AGENT_KEY_MIN_TTL_SECONDS,
KIMI_CODE_BASE_URL,
PROVIDER_REGISTRY,
_codex_access_token_is_expiring,
_decode_jwt_claims,
_import_codex_cli_tokens,
_load_auth_store,
_load_provider_state,
_resolve_kimi_base_url,
_resolve_zai_base_url,
read_credential_pool,
write_credential_pool,
@ -1084,7 +1086,9 @@ def _seed_from_env(provider: str, entries: List[PooledCredential]) -> Tuple[bool
active_sources.add(source)
auth_type = AUTH_TYPE_OAUTH if provider == "anthropic" and not token.startswith("sk-ant-api") else AUTH_TYPE_API_KEY
base_url = env_url or pconfig.inference_base_url
if provider == "zai":
if provider == "kimi-coding":
base_url = _resolve_kimi_base_url(token, pconfig.inference_base_url, env_url)
elif provider == "zai":
base_url = _resolve_zai_base_url(token, pconfig.inference_base_url, env_url)
changed |= _upsert_entry(
entries,

View file

@ -596,6 +596,9 @@ def _classify_400(
err_obj = body.get("error", {})
if isinstance(err_obj, dict):
err_body_msg = (err_obj.get("message") or "").strip().lower()
# Responses API (and some providers) use flat body: {"message": "..."}
if not err_body_msg:
err_body_msg = (body.get("message") or "").strip().lower()
is_generic = len(err_body_msg) < 30 or err_body_msg in ("error", "")
is_large = approx_tokens > context_length * 0.4 or approx_tokens > 80000 or num_messages > 80
@ -674,6 +677,27 @@ def _classify_by_message(
should_compress=True,
)
# Usage-limit patterns need the same disambiguation as 402: some providers
# surface "usage limit" errors without an HTTP status code. A transient
# signal ("try again", "resets at", …) means it's a periodic quota, not
# billing exhaustion.
has_usage_limit = any(p in error_msg for p in _USAGE_LIMIT_PATTERNS)
if has_usage_limit:
has_transient_signal = any(p in error_msg for p in _USAGE_LIMIT_TRANSIENT_SIGNALS)
if has_transient_signal:
return result_fn(
FailoverReason.rate_limit,
retryable=True,
should_rotate_credential=True,
should_fallback=True,
)
return result_fn(
FailoverReason.billing,
retryable=False,
should_rotate_credential=True,
should_fallback=True,
)
# Billing patterns
if any(p in error_msg for p in _BILLING_PATTERNS):
return result_fn(

View file

@ -603,6 +603,49 @@ def parse_context_limit_from_error(error_msg: str) -> Optional[int]:
return None
def parse_available_output_tokens_from_error(error_msg: str) -> Optional[int]:
"""Detect an "output cap too large" error and return how many output tokens are available.
Background two distinct context errors exist:
1. "Prompt too long" the INPUT itself exceeds the context window.
Fix: compress history and/or halve context_length.
2. "max_tokens too large" input is fine, but input + requested_output > window.
Fix: reduce max_tokens (the output cap) for this call.
Do NOT touch context_length the window hasn't shrunk.
Anthropic's API returns errors like:
"max_tokens: 32768 > context_window: 200000 - input_tokens: 190000 = available_tokens: 10000"
Returns the number of output tokens that would fit (e.g. 10000 above), or None if
the error does not look like a max_tokens-too-large error.
"""
error_lower = error_msg.lower()
# Must look like an output-cap error, not a prompt-length error.
is_output_cap_error = (
"max_tokens" in error_lower
and ("available_tokens" in error_lower or "available tokens" in error_lower)
)
if not is_output_cap_error:
return None
# Extract the available_tokens figure.
# Anthropic format: "… = available_tokens: 10000"
patterns = [
r'available_tokens[:\s]+(\d+)',
r'available\s+tokens[:\s]+(\d+)',
# fallback: last number after "=" in expressions like "200000 - 190000 = 10000"
r'=\s*(\d+)\s*$',
]
for pattern in patterns:
match = re.search(pattern, error_lower)
if match:
tokens = int(match.group(1))
if tokens >= 1:
return tokens
return None
def _model_id_matches(candidate_id: str, lookup_model: str) -> bool:
"""Return True if *candidate_id* (from server) matches *lookup_model* (configured).

View file

@ -1158,7 +1158,7 @@ def main(
providers_order (str): Comma-separated list of OpenRouter providers to try in order (e.g. "anthropic,openai,google")
provider_sort (str): Sort providers by "price", "throughput", or "latency" (OpenRouter only)
max_tokens (int): Maximum tokens for model responses (optional, uses model default if not set)
reasoning_effort (str): OpenRouter reasoning effort level: "xhigh", "high", "medium", "low", "minimal", "none" (default: "medium")
reasoning_effort (str): OpenRouter reasoning effort level: "none", "minimal", "low", "medium", "high", "xhigh" (default: "medium")
reasoning_disabled (bool): Completely disable reasoning/thinking tokens (default: False)
prefill_messages_file (str): Path to JSON file containing prefill messages (list of {role, content} dicts)
max_samples (int): Only process the first N samples from the dataset (optional, processes all if not set)
@ -1227,7 +1227,7 @@ def main(
print("🧠 Reasoning: DISABLED (effort=none)")
elif reasoning_effort:
# Use specified effort level
valid_efforts = ["xhigh", "high", "medium", "low", "minimal", "none"]
valid_efforts = ["none", "minimal", "low", "medium", "high", "xhigh"]
if reasoning_effort not in valid_efforts:
print(f"❌ Error: --reasoning_effort must be one of: {', '.join(valid_efforts)}")
return

View file

@ -48,6 +48,25 @@ model:
# api_key: "your-key-here" # Uncomment to set here instead of .env
base_url: "https://openrouter.ai/api/v1"
# ── Token limits — two settings, easy to confuse ──────────────────────────
#
# context_length: TOTAL context window (input + output tokens combined).
# Controls when Hermes compresses history and validates requests.
# Leave unset — Hermes auto-detects the correct value from the provider.
# Set manually only when auto-detection is wrong (e.g. a local server with
# a custom num_ctx, or a proxy that doesn't expose /v1/models).
#
# context_length: 131072
#
# max_tokens: OUTPUT cap — maximum tokens the model may generate per response.
# Unrelated to how long your conversation history can be.
# The OpenAI-standard name "max_tokens" is a misnomer; Anthropic's native
# API has since renamed it "max_output_tokens" for clarity.
# Leave unset to use the model's native output ceiling (recommended).
# Set only if you want to deliberately limit individual response length.
#
# max_tokens: 8192
# =============================================================================
# OpenRouter Provider Routing (only applies when using OpenRouter)
# =============================================================================

568
cli.py
View file

@ -1046,7 +1046,7 @@ def _cprint(text: str):
# ---------------------------------------------------------------------------
# File-drop detection — extracted as a pure function for testability.
# File-drop / local attachment detection — extracted as pure helpers for tests.
# ---------------------------------------------------------------------------
_IMAGE_EXTENSIONS = frozenset({
@ -1055,12 +1055,103 @@ _IMAGE_EXTENSIONS = frozenset({
})
def _detect_file_drop(user_input: str) -> "dict | None":
"""Detect if *user_input* is a dragged/pasted file path, not a slash command.
from hermes_constants import is_termux as _is_termux_environment
When a user drags a file into the terminal, macOS pastes the absolute path
(e.g. ``/Users/roland/Desktop/file.png``) which starts with ``/`` and would
otherwise be mistaken for a slash command.
def _termux_example_image_path(filename: str = "cat.png") -> str:
"""Return a realistic example media path for the current Termux setup."""
candidates = [
os.path.expanduser("~/storage/shared"),
"/sdcard",
"/storage/emulated/0",
"/storage/self/primary",
]
for root in candidates:
if os.path.isdir(root):
return os.path.join(root, "Pictures", filename)
return os.path.join("~/storage/shared", "Pictures", filename)
def _split_path_input(raw: str) -> tuple[str, str]:
"""Split a leading file path token from trailing free-form text.
Supports quoted paths and backslash-escaped spaces so callers can accept
inputs like:
/tmp/pic.png describe this
~/storage/shared/My\ Photos/cat.png what is this?
"/storage/emulated/0/DCIM/Camera/cat 1.png" summarize
"""
raw = str(raw or "").strip()
if not raw:
return "", ""
if raw[0] in {'"', "'"}:
quote = raw[0]
pos = 1
while pos < len(raw):
ch = raw[pos]
if ch == '\\' and pos + 1 < len(raw):
pos += 2
continue
if ch == quote:
token = raw[1:pos]
remainder = raw[pos + 1 :].strip()
return token, remainder
pos += 1
return raw[1:], ""
pos = 0
while pos < len(raw):
ch = raw[pos]
if ch == '\\' and pos + 1 < len(raw) and raw[pos + 1] == ' ':
pos += 2
elif ch == ' ':
break
else:
pos += 1
token = raw[:pos].replace('\\ ', ' ')
remainder = raw[pos:].strip()
return token, remainder
def _resolve_attachment_path(raw_path: str) -> Path | None:
"""Resolve a user-supplied local attachment path.
Accepts quoted or unquoted paths, expands ``~`` and env vars, and resolves
relative paths from ``TERMINAL_CWD`` when set (matching terminal tool cwd).
Returns ``None`` when the path does not resolve to an existing file.
"""
token = str(raw_path or "").strip()
if not token:
return None
if (token.startswith('"') and token.endswith('"')) or (token.startswith("'") and token.endswith("'")):
token = token[1:-1].strip()
if not token:
return None
expanded = os.path.expandvars(os.path.expanduser(token))
path = Path(expanded)
if not path.is_absolute():
base_dir = Path(os.getenv("TERMINAL_CWD", os.getcwd()))
path = base_dir / path
try:
resolved = path.resolve()
except Exception:
resolved = path
if not resolved.exists() or not resolved.is_file():
return None
return resolved
def _detect_file_drop(user_input: str) -> "dict | None":
"""Detect if *user_input* starts with a real local file path.
This catches dragged/pasted paths before they are mistaken for slash
commands, and also supports Termux-friendly paths like ``~/storage/...``.
Returns a dict on match::
@ -1072,29 +1163,31 @@ def _detect_file_drop(user_input: str) -> "dict | None":
Returns ``None`` when the input is not a real file path.
"""
if not isinstance(user_input, str) or not user_input.startswith("/"):
if not isinstance(user_input, str):
return None
# Walk the string absorbing backslash-escaped spaces ("\ ").
raw = user_input
pos = 0
while pos < len(raw):
ch = raw[pos]
if ch == '\\' and pos + 1 < len(raw) and raw[pos + 1] == ' ':
pos += 2 # skip escaped space
elif ch == ' ':
break
else:
pos += 1
first_token_raw = raw[:pos]
first_token = first_token_raw.replace('\\ ', ' ')
drop_path = Path(first_token)
if not drop_path.exists() or not drop_path.is_file():
stripped = user_input.strip()
if not stripped:
return None
starts_like_path = (
stripped.startswith("/")
or stripped.startswith("~")
or stripped.startswith("./")
or stripped.startswith("../")
or stripped.startswith('"/')
or stripped.startswith('"~')
or stripped.startswith("'/")
or stripped.startswith("'~")
)
if not starts_like_path:
return None
first_token, remainder = _split_path_input(stripped)
drop_path = _resolve_attachment_path(first_token)
if drop_path is None:
return None
remainder = raw[pos:].strip()
return {
"path": drop_path,
"is_image": drop_path.suffix.lower() in _IMAGE_EXTENSIONS,
@ -1102,6 +1195,69 @@ def _detect_file_drop(user_input: str) -> "dict | None":
}
def _format_image_attachment_badges(attached_images: list[Path], image_counter: int, width: int | None = None) -> str:
"""Format the attached-image badge row for the interactive CLI.
Narrow terminals such as Termux should get a compact summary that fits on a
single row, while wider terminals can show the classic per-image badges.
"""
if not attached_images:
return ""
width = width or shutil.get_terminal_size((80, 24)).columns
def _trunc(name: str, limit: int) -> str:
return name if len(name) <= limit else name[: max(1, limit - 3)] + "..."
if width < 52:
if len(attached_images) == 1:
return f"[📎 {_trunc(attached_images[0].name, 20)}]"
return f"[📎 {len(attached_images)} images attached]"
if width < 80:
if len(attached_images) == 1:
return f"[📎 {_trunc(attached_images[0].name, 32)}]"
first = _trunc(attached_images[0].name, 20)
extra = len(attached_images) - 1
return f"[📎 {first}] [+{extra}]"
base = image_counter - len(attached_images) + 1
return " ".join(
f"[📎 Image #{base + i}]"
for i in range(len(attached_images))
)
def _collect_query_images(query: str | None, image_arg: str | None = None) -> tuple[str, list[Path]]:
"""Collect local image attachments for single-query CLI flows."""
message = query or ""
images: list[Path] = []
if isinstance(message, str):
dropped = _detect_file_drop(message)
if dropped and dropped.get("is_image"):
images.append(dropped["path"])
message = dropped["remainder"] or f"[User attached image: {dropped['path'].name}]"
if image_arg:
explicit_path = _resolve_attachment_path(image_arg)
if explicit_path is None:
raise ValueError(f"Image file not found: {image_arg}")
if explicit_path.suffix.lower() not in _IMAGE_EXTENSIONS:
raise ValueError(f"Not a supported image file: {explicit_path}")
images.append(explicit_path)
deduped: list[Path] = []
seen: set[str] = set()
for img in images:
key = str(img)
if key in seen:
continue
seen.add(key)
deduped.append(img)
return message, deduped
class ChatConsole:
"""Rich Console adapter for prompt_toolkit's patch_stdout context.
@ -1641,7 +1797,12 @@ class HermesCLI:
return f"[{('' * filled) + ('' * max(0, width - filled))}]"
def _get_status_bar_snapshot(self) -> Dict[str, Any]:
model_name = self.model or "unknown"
# Prefer the agent's model name — it updates on fallback.
# self.model reflects the originally configured model and never
# changes mid-session, so the TUI would show a stale name after
# _try_activate_fallback() switches provider/model.
agent = getattr(self, "agent", None)
model_name = (getattr(agent, "model", None) or self.model or "unknown")
model_short = model_name.split("/")[-1] if "/" in model_name else model_name
if model_short.endswith(".gguf"):
model_short = model_short[:-5]
@ -1667,7 +1828,6 @@ class HermesCLI:
"compressions": 0,
}
agent = getattr(self, "agent", None)
if not agent:
return snapshot
@ -1735,15 +1895,70 @@ class HermesCLI:
width += ch_width
return "".join(out).rstrip() + ellipsis
@staticmethod
def _get_tui_terminal_width(default: tuple[int, int] = (80, 24)) -> int:
"""Return the live prompt_toolkit width, falling back to ``shutil``.
The TUI layout can be narrower than ``shutil.get_terminal_size()`` reports,
especially on Termux/mobile shells, so prefer prompt_toolkit's width whenever
an app is active.
"""
try:
from prompt_toolkit.application import get_app
return get_app().output.get_size().columns
except Exception:
return shutil.get_terminal_size(default).columns
def _use_minimal_tui_chrome(self, width: Optional[int] = None) -> bool:
"""Hide low-value chrome on narrow/mobile terminals to preserve rows."""
if width is None:
width = self._get_tui_terminal_width()
return width < 64
def _tui_input_rule_height(self, position: str, width: Optional[int] = None) -> int:
"""Return the visible height for the top/bottom input separator rules."""
if position not in {"top", "bottom"}:
raise ValueError(f"Unknown input rule position: {position}")
if position == "top":
return 1
return 0 if self._use_minimal_tui_chrome(width=width) else 1
def _agent_spacer_height(self, width: Optional[int] = None) -> int:
"""Return the spacer height shown above the status bar while the agent runs."""
if not getattr(self, "_agent_running", False):
return 0
return 0 if self._use_minimal_tui_chrome(width=width) else 1
def _spinner_widget_height(self, width: Optional[int] = None) -> int:
"""Return the visible height for the spinner/status text line above the status bar."""
if not getattr(self, "_spinner_text", ""):
return 0
return 0 if self._use_minimal_tui_chrome(width=width) else 1
def _get_voice_status_fragments(self, width: Optional[int] = None):
"""Return the voice status bar fragments for the interactive TUI."""
width = width or self._get_tui_terminal_width()
compact = self._use_minimal_tui_chrome(width=width)
if self._voice_recording:
if compact:
return [("class:voice-status-recording", " ● REC ")]
return [("class:voice-status-recording", " ● REC Ctrl+B to stop ")]
if self._voice_processing:
if compact:
return [("class:voice-status", " ◉ STT ")]
return [("class:voice-status", " ◉ Transcribing... ")]
if compact:
return [("class:voice-status", " 🎤 Ctrl+B ")]
tts = " | TTS on" if self._voice_tts else ""
cont = " | Continuous" if self._voice_continuous else ""
return [("class:voice-status", f" 🎤 Voice mode{tts}{cont} — Ctrl+B to record ")]
def _build_status_bar_text(self, width: Optional[int] = None) -> str:
"""Return a compact one-line session status string for the TUI footer."""
try:
snapshot = self._get_status_bar_snapshot()
if width is None:
try:
from prompt_toolkit.application import get_app
width = get_app().output.get_size().columns
except Exception:
width = shutil.get_terminal_size((80, 24)).columns
width = self._get_tui_terminal_width()
percent = snapshot["context_percent"]
percent_label = f"{percent}%" if percent is not None else "--"
duration_label = snapshot["duration"]
@ -1779,11 +1994,7 @@ class HermesCLI:
# values (especially on SSH) that differ from what prompt_toolkit
# actually renders, causing the fragments to overflow to a second
# line and produce duplicated status bar rows over long sessions.
try:
from prompt_toolkit.application import get_app
width = get_app().output.get_size().columns
except Exception:
width = shutil.get_terminal_size((80, 24)).columns
width = self._get_tui_terminal_width()
duration_label = snapshot["duration"]
if width < 52:
@ -2985,6 +3196,14 @@ class HermesCLI:
doesn't fire for image-only clipboard content (e.g., VSCode terminal,
Windows Terminal with WSL2).
"""
if _is_termux_environment():
_cprint(
f" {_DIM}Clipboard image paste is not available on Termux — "
f"use /image <path> or paste a local image path like "
f"{_termux_example_image_path()}{_RST}"
)
return
from hermes_cli.clipboard import has_clipboard_image
if has_clipboard_image():
if self._try_attach_clipboard_image():
@ -2995,6 +3214,7 @@ class HermesCLI:
else:
_cprint(f" {_DIM}(._.) No image found in clipboard{_RST}")
<<<<<<< HEAD
def _write_osc52_clipboard(self, text: str) -> None:
"""Copy *text* to terminal clipboard via OSC 52."""
payload = base64.b64encode(text.encode("utf-8")).decode("ascii")
@ -3051,6 +3271,33 @@ class HermesCLI:
_cprint(f" Clipboard copy failed: {e}")
def _preprocess_images_with_vision(self, text: str, images: list) -> str:
=======
def _handle_image_command(self, cmd_original: str):
"""Handle /image <path> — attach a local image file for the next prompt."""
raw_args = (cmd_original.split(None, 1)[1].strip() if " " in cmd_original else "")
if not raw_args:
hint = _termux_example_image_path() if _is_termux_environment() else "/path/to/image.png"
_cprint(f" {_DIM}Usage: /image <path> e.g. /image {hint}{_RST}")
return
path_token, _remainder = _split_path_input(raw_args)
image_path = _resolve_attachment_path(path_token)
if image_path is None:
_cprint(f" {_DIM}(>_<) File not found: {path_token}{_RST}")
return
if image_path.suffix.lower() not in _IMAGE_EXTENSIONS:
_cprint(f" {_DIM}(._.) Not a supported image file: {image_path.name}{_RST}")
return
self._attached_images.append(image_path)
_cprint(f" 📎 Attached image: {image_path.name}")
if _remainder:
_cprint(f" {_DIM}Now type your prompt (or use --image in single-query mode): {_remainder}{_RST}")
elif _is_termux_environment():
_cprint(f" {_DIM}Tip: type your next message, or run hermes chat -q --image {_termux_example_image_path(image_path.name)} \"What do you see?\"{_RST}")
def _preprocess_images_with_vision(self, text: str, images: list, *, announce: bool = True) -> str:
>>>>>>> main
"""Analyze attached images via the vision tool and return enriched text.
Instead of embedding raw base64 ``image_url`` content parts in the
@ -3077,7 +3324,8 @@ class HermesCLI:
if not img_path.exists():
continue
size_kb = img_path.stat().st_size // 1024
_cprint(f" {_DIM}👁️ analyzing {img_path.name} ({size_kb}KB)...{_RST}")
if announce:
_cprint(f" {_DIM}👁️ analyzing {img_path.name} ({size_kb}KB)...{_RST}")
try:
result_json = _asyncio.run(
vision_analyze_tool(image_url=str(img_path), user_prompt=analysis_prompt)
@ -3090,21 +3338,24 @@ class HermesCLI:
f"[If you need a closer look, use vision_analyze with "
f"image_url: {img_path}]"
)
_cprint(f" {_DIM}✓ image analyzed{_RST}")
if announce:
_cprint(f" {_DIM}✓ image analyzed{_RST}")
else:
enriched_parts.append(
f"[The user attached an image but it couldn't be analyzed. "
f"You can try examining it with vision_analyze using "
f"image_url: {img_path}]"
)
_cprint(f" {_DIM}⚠ vision analysis failed — path included for retry{_RST}")
if announce:
_cprint(f" {_DIM}⚠ vision analysis failed — path included for retry{_RST}")
except Exception as e:
enriched_parts.append(
f"[The user attached an image but analysis failed ({e}). "
f"You can try examining it with vision_analyze using "
f"image_url: {img_path}]"
)
_cprint(f" {_DIM}⚠ vision analysis error — path included for retry{_RST}")
if announce:
_cprint(f" {_DIM}⚠ vision analysis error — path included for retry{_RST}")
# Combine: vision descriptions first, then the user's original text
user_text = text if isinstance(text, str) and text else ""
@ -3198,7 +3449,10 @@ class HermesCLI:
_cprint(f"\n {_DIM}Tip: Just type your message to chat with Hermes!{_RST}")
_cprint(f" {_DIM}Multi-line: Alt+Enter for a new line{_RST}")
_cprint(f" {_DIM}Paste image: Alt+V (or /paste){_RST}\n")
if _is_termux_environment():
_cprint(f" {_DIM}Attach image: /image {_termux_example_image_path()} or start your prompt with a local image path{_RST}\n")
else:
_cprint(f" {_DIM}Paste image: Alt+V (or /paste){_RST}\n")
def show_tools(self):
"""Display available tools with kawaii ASCII art."""
@ -4102,59 +4356,7 @@ class HermesCLI:
print(" To change model or provider, use: hermes model")
def _handle_prompt_command(self, cmd: str):
"""Handle the /prompt command to view or set system prompt."""
parts = cmd.split(maxsplit=1)
if len(parts) > 1:
# Set new prompt
new_prompt = parts[1].strip()
if new_prompt.lower() == "clear":
self.system_prompt = ""
self.agent = None # Force re-init
if save_config_value("agent.system_prompt", ""):
print("(^_^)b System prompt cleared (saved to config)")
else:
print("(^_^) System prompt cleared (session only)")
else:
self.system_prompt = new_prompt
self.agent = None # Force re-init
if save_config_value("agent.system_prompt", new_prompt):
print("(^_^)b System prompt set (saved to config)")
else:
print("(^_^) System prompt set (session only)")
print(f" \"{new_prompt[:60]}{'...' if len(new_prompt) > 60 else ''}\"")
else:
# Show current prompt
print()
print("+" + "-" * 50 + "+")
print("|" + " " * 15 + "(^_^) System Prompt" + " " * 15 + "|")
print("+" + "-" * 50 + "+")
print()
if self.system_prompt:
# Word wrap the prompt for display
words = self.system_prompt.split()
lines = []
current_line = ""
for word in words:
if len(current_line) + len(word) + 1 <= 50:
current_line += (" " if current_line else "") + word
else:
lines.append(current_line)
current_line = word
if current_line:
lines.append(current_line)
for line in lines:
print(f" {line}")
else:
print(" (no custom prompt set - using default)")
print()
print(" Usage:")
print(" /prompt <text> - Set a custom system prompt")
print(" /prompt clear - Remove custom prompt")
print(" /personality - Use a predefined personality")
print()
@staticmethod
@ -4654,9 +4856,7 @@ class HermesCLI:
self._handle_model_switch(cmd_original)
elif canonical == "provider":
self._show_model_and_providers()
elif canonical == "prompt":
# Use original case so prompt text isn't lowercased
self._handle_prompt_command(cmd_original)
elif canonical == "personality":
# Use original case (handler lowercases the personality name itself)
self._handle_personality_command(cmd_original)
@ -4700,6 +4900,8 @@ class HermesCLI:
self._handle_copy_command(cmd_original)
elif canonical == "paste":
self._handle_paste_command()
elif canonical == "image":
self._handle_image_command(cmd_original)
elif canonical == "reload-mcp":
with self._busy_command(self._slow_command_status(cmd_original)):
self._reload_mcp()
@ -5140,6 +5342,9 @@ class HermesCLI:
def _try_launch_chrome_debug(port: int, system: str) -> bool:
"""Try to launch Chrome/Chromium with remote debugging enabled.
Uses a dedicated user-data-dir so the debug instance doesn't conflict
with an already-running Chrome using the default profile.
Returns True if a launch command was executed (doesn't guarantee success).
"""
import subprocess as _sp
@ -5149,10 +5354,20 @@ class HermesCLI:
if not candidates:
return False
# Dedicated profile dir so debug Chrome won't collide with normal Chrome
data_dir = str(_hermes_home / "chrome-debug")
os.makedirs(data_dir, exist_ok=True)
chrome = candidates[0]
try:
_sp.Popen(
[chrome, f"--remote-debugging-port={port}"],
[
chrome,
f"--remote-debugging-port={port}",
f"--user-data-dir={data_dir}",
"--no-first-run",
"--no-default-browser-check",
],
stdout=_sp.DEVNULL,
stderr=_sp.DEVNULL,
start_new_session=True, # detach from terminal
@ -5227,18 +5442,33 @@ class HermesCLI:
print(f" ✓ Chrome launched and listening on port {_port}")
else:
print(f" ⚠ Chrome launched but port {_port} isn't responding yet")
print(" You may need to close existing Chrome windows first and retry")
print(" Try again in a few seconds — the debug instance may still be starting")
else:
print(" ⚠ Could not auto-launch Chrome")
# Show manual instructions as fallback
_data_dir = str(_hermes_home / "chrome-debug")
sys_name = _plat.system()
if sys_name == "Darwin":
chrome_cmd = 'open -a "Google Chrome" --args --remote-debugging-port=9222'
chrome_cmd = (
'open -a "Google Chrome" --args'
f" --remote-debugging-port=9222"
f' --user-data-dir="{_data_dir}"'
" --no-first-run --no-default-browser-check"
)
elif sys_name == "Windows":
chrome_cmd = 'chrome.exe --remote-debugging-port=9222'
chrome_cmd = (
f'chrome.exe --remote-debugging-port=9222'
f' --user-data-dir="{_data_dir}"'
f" --no-first-run --no-default-browser-check"
)
else:
chrome_cmd = "google-chrome --remote-debugging-port=9222"
print(f" Launch Chrome manually: {chrome_cmd}")
chrome_cmd = (
f"google-chrome --remote-debugging-port=9222"
f' --user-data-dir="{_data_dir}"'
f" --no-first-run --no-default-browser-check"
)
print(f" Launch Chrome manually:")
print(f" {chrome_cmd}")
else:
print(f" ⚠ Port {_port} is not reachable at {cdp_url}")
@ -5411,7 +5641,7 @@ class HermesCLI:
Usage:
/reasoning Show current effort level and display state
/reasoning <level> Set reasoning effort (none, low, medium, high, xhigh)
/reasoning <level> Set reasoning effort (none, minimal, low, medium, high, xhigh)
/reasoning show|on Show model thinking/reasoning in output
/reasoning hide|off Hide model thinking/reasoning from output
"""
@ -5429,7 +5659,7 @@ class HermesCLI:
display_state = "on ✓" if self.show_reasoning else "off"
_cprint(f" {_GOLD}Reasoning effort: {level}{_RST}")
_cprint(f" {_GOLD}Reasoning display: {display_state}{_RST}")
_cprint(f" {_DIM}Usage: /reasoning <none|low|medium|high|xhigh|show|hide>{_RST}")
_cprint(f" {_DIM}Usage: /reasoning <none|minimal|low|medium|high|xhigh|show|hide>{_RST}")
return
arg = parts[1].strip().lower()
@ -5455,7 +5685,7 @@ class HermesCLI:
parsed = _parse_reasoning_config(arg)
if parsed is None:
_cprint(f" {_DIM}(._.) Unknown argument: {arg}{_RST}")
_cprint(f" {_DIM}Valid levels: none, low, minimal, medium, high, xhigh{_RST}")
_cprint(f" {_DIM}Valid levels: none, minimal, low, medium, high, xhigh{_RST}")
_cprint(f" {_DIM}Display: show, hide{_RST}")
return
@ -5867,10 +6097,23 @@ class HermesCLI:
"""Start capturing audio from the microphone."""
if getattr(self, '_should_exit', False):
return
from tools.voice_mode import AudioRecorder, check_voice_requirements
from tools.voice_mode import create_audio_recorder, check_voice_requirements
reqs = check_voice_requirements()
if not reqs["audio_available"]:
if _is_termux_environment():
details = reqs.get("details", "")
if "Termux:API Android app is not installed" in details:
raise RuntimeError(
"Termux:API command package detected, but the Android app is missing.\n"
"Install/update the Termux:API Android app, then retry /voice on.\n"
"Fallback: pkg install python-numpy portaudio && python -m pip install sounddevice"
)
raise RuntimeError(
"Voice mode requires either Termux:API microphone access or Python audio libraries.\n"
"Option 1: pkg install termux-api and install the Termux:API Android app\n"
"Option 2: pkg install python-numpy portaudio && python -m pip install sounddevice"
)
raise RuntimeError(
"Voice mode requires sounddevice and numpy.\n"
"Install with: pip install sounddevice numpy\n"
@ -5899,7 +6142,7 @@ class HermesCLI:
pass
if self._voice_recorder is None:
self._voice_recorder = AudioRecorder()
self._voice_recorder = create_audio_recorder()
# Apply config-driven silence params
self._voice_recorder._silence_threshold = voice_cfg.get("silence_threshold", 200)
@ -5928,7 +6171,13 @@ class HermesCLI:
with self._voice_lock:
self._voice_recording = False
raise
_cprint(f"\n{_GOLD}● Recording...{_RST} {_DIM}(auto-stops on silence | Ctrl+B to stop & exit continuous){_RST}")
if getattr(self._voice_recorder, "supports_silence_autostop", True):
_recording_hint = "auto-stops on silence | Ctrl+B to stop & exit continuous"
elif _is_termux_environment():
_recording_hint = "Termux:API capture | Ctrl+B to stop"
else:
_recording_hint = "Ctrl+B to stop"
_cprint(f"\n{_GOLD}● Recording...{_RST} {_DIM}({_recording_hint}){_RST}")
# Periodically refresh prompt to update audio level indicator
def _refresh_level():
@ -6136,8 +6385,13 @@ class HermesCLI:
for line in reqs["details"].split("\n"):
_cprint(f" {_DIM}{line}{_RST}")
if reqs["missing_packages"]:
_cprint(f"\n {_BOLD}Install: pip install {' '.join(reqs['missing_packages'])}{_RST}")
_cprint(f" {_DIM}Or: pip install hermes-agent[voice]{_RST}")
if _is_termux_environment():
_cprint(f"\n {_BOLD}Option 1: pkg install termux-api{_RST}")
_cprint(f" {_DIM}Then install/update the Termux:API Android app for microphone capture{_RST}")
_cprint(f" {_BOLD}Option 2: pkg install python-numpy portaudio && python -m pip install sounddevice{_RST}")
else:
_cprint(f"\n {_BOLD}Install: pip install {' '.join(reqs['missing_packages'])}{_RST}")
_cprint(f" {_DIM}Or: pip install hermes-agent[voice]{_RST}")
return
with self._voice_lock:
@ -7091,27 +7345,39 @@ class HermesCLI:
def _get_tui_prompt_fragments(self):
"""Return the prompt_toolkit fragments for the current interactive state."""
symbol, state_suffix = self._get_tui_prompt_symbols()
compact = self._use_minimal_tui_chrome(width=self._get_tui_terminal_width())
def _state_fragment(style: str, icon: str, extra: str = ""):
if compact:
text = icon
if extra:
text = f"{text} {extra.strip()}".rstrip()
return [(style, text + " ")]
if extra:
return [(style, f"{icon} {extra} {state_suffix}")]
return [(style, f"{icon} {state_suffix}")]
if self._voice_recording:
bar = self._audio_level_bar()
return [("class:voice-recording", f"{bar} {state_suffix}")]
return _state_fragment("class:voice-recording", "", bar)
if self._voice_processing:
return [("class:voice-processing", f"{state_suffix}")]
return _state_fragment("class:voice-processing", "")
if self._sudo_state:
return [("class:sudo-prompt", f"🔐 {state_suffix}")]
return _state_fragment("class:sudo-prompt", "🔐")
if self._secret_state:
return [("class:sudo-prompt", f"🔑 {state_suffix}")]
return _state_fragment("class:sudo-prompt", "🔑")
if self._approval_state:
return [("class:prompt-working", f"{state_suffix}")]
return _state_fragment("class:prompt-working", "")
if self._clarify_freetext:
return [("class:clarify-selected", f" {state_suffix}")]
return _state_fragment("class:clarify-selected", "")
if self._clarify_state:
return [("class:prompt-working", f"? {state_suffix}")]
return _state_fragment("class:prompt-working", "?")
if self._command_running:
return [("class:prompt-working", f"{self._command_spinner_frame()} {state_suffix}")]
return _state_fragment("class:prompt-working", self._command_spinner_frame())
if self._agent_running:
return [("class:prompt-working", f" {state_suffix}")]
return _state_fragment("class:prompt-working", "")
if self._voice_mode:
return [("class:voice-prompt", f"🎤 {state_suffix}")]
return _state_fragment("class:voice-prompt", "🎤")
return [("class:prompt", symbol)]
def _get_tui_prompt_text(self) -> str:
@ -7967,9 +8233,9 @@ class HermesCLI:
def get_hint_height():
if cli_ref._sudo_state or cli_ref._secret_state or cli_ref._approval_state or cli_ref._clarify_state or cli_ref._command_running:
return 1
# Keep a 1-line spacer while agent runs so output doesn't push
# right up against the top rule of the input area
return 1 if cli_ref._agent_running else 0
# Keep a spacer while the agent runs on roomy terminals, but reclaim
# the row on narrow/mobile screens where every line matters.
return cli_ref._agent_spacer_height()
def get_spinner_text():
txt = cli_ref._spinner_text
@ -7978,7 +8244,7 @@ class HermesCLI:
return [('class:hint', f' {txt}')]
def get_spinner_height():
return 1 if cli_ref._spinner_text else 0
return cli_ref._spinner_widget_height()
spinner_widget = Window(
content=FormattedTextControl(get_spinner_text),
@ -8169,18 +8435,17 @@ class HermesCLI:
filter=Condition(lambda: cli_ref._approval_state is not None),
)
# Horizontal rules above and below the input (bronze, 1 line each).
# The bottom rule moves down as the TextArea grows with newlines.
# Using char='─' instead of hardcoded repetition so the rule
# always spans the full terminal width on any screen size.
# Horizontal rules above and below the input.
# On narrow/mobile terminals we keep the top separator for structure but
# hide the bottom one to recover a full row for conversation content.
input_rule_top = Window(
char='',
height=1,
height=lambda: cli_ref._tui_input_rule_height("top"),
style='class:input-rule',
)
input_rule_bot = Window(
char='',
height=1,
height=lambda: cli_ref._tui_input_rule_height("bottom"),
style='class:input-rule',
)
@ -8190,10 +8455,9 @@ class HermesCLI:
def _get_image_bar():
if not cli_ref._attached_images:
return []
base = cli_ref._image_counter - len(cli_ref._attached_images) + 1
badges = " ".join(
f"[📎 Image #{base + i}]"
for i in range(len(cli_ref._attached_images))
badges = _format_image_attachment_badges(
cli_ref._attached_images,
cli_ref._image_counter,
)
return [("class:image-badge", f" {badges} ")]
@ -8204,13 +8468,7 @@ class HermesCLI:
# Persistent voice mode status bar (visible only when voice mode is on)
def _get_voice_status():
if cli_ref._voice_recording:
return [('class:voice-status-recording', ' ● REC Ctrl+B to stop ')]
if cli_ref._voice_processing:
return [('class:voice-status', ' ◉ Transcribing... ')]
tts = " | TTS on" if cli_ref._voice_tts else ""
cont = " | Continuous" if cli_ref._voice_continuous else ""
return [('class:voice-status', f' 🎤 Voice mode{tts}{cont} — Ctrl+B to record ')]
return cli_ref._get_voice_status_fragments()
voice_status_bar = ConditionalContainer(
Window(
@ -8666,6 +8924,7 @@ class HermesCLI:
def main(
query: str = None,
q: str = None,
image: str = None,
toolsets: str = None,
skills: str | list[str] | tuple[str, ...] = None,
model: str = None,
@ -8691,6 +8950,7 @@ def main(
Args:
query: Single query to execute (then exit). Alias: -q
q: Shorthand for --query
image: Optional local image path to attach to a single query
toolsets: Comma-separated list of toolsets to enable (e.g., "web,terminal")
skills: Comma-separated or repeated list of skills to preload for the session
model: Model to use (default: anthropic/claude-opus-4-20250514)
@ -8711,6 +8971,7 @@ def main(
python cli.py --toolsets web,terminal # Use specific toolsets
python cli.py --skills hermes-agent-dev,github-auth
python cli.py -q "What is Python?" # Single query mode
python cli.py -q "Describe this" --image ~/storage/shared/Pictures/cat.png
python cli.py --list-tools # List tools and exit
python cli.py --resume 20260225_143052_a1b2c3 # Resume session
python cli.py -w # Start in isolated git worktree
@ -8833,13 +9094,21 @@ def main(
atexit.register(_run_cleanup)
# Handle single query mode
if query:
if query or image:
query, single_query_images = _collect_query_images(query, image)
if quiet:
# Quiet mode: suppress banner, spinner, tool previews.
# Only print the final response and parseable session info.
cli.tool_progress_mode = "off"
if cli._ensure_runtime_credentials():
turn_route = cli._resolve_turn_agent_config(query)
effective_query = query
if single_query_images:
effective_query = cli._preprocess_images_with_vision(
query,
single_query_images,
announce=False,
)
turn_route = cli._resolve_turn_agent_config(effective_query)
if turn_route["signature"] != cli._active_agent_route_signature:
cli.agent = None
if cli._init_agent(
@ -8848,8 +9117,9 @@ def main(
route_label=turn_route["label"],
):
cli.agent.quiet_mode = True
cli.agent.suppress_status_output = True
result = cli.agent.run_conversation(
user_message=query,
user_message=effective_query,
conversation_history=cli.conversation_history,
)
response = result.get("final_response", "") if isinstance(result, dict) else str(result)
@ -8864,8 +9134,10 @@ def main(
sys.exit(1)
else:
cli.show_banner()
cli.console.print(f"[bold blue]Query:[/] {query}")
cli.chat(query)
_query_label = query or ("[image attached]" if single_query_images else "")
if _query_label:
cli.console.print(f"[bold blue]Query:[/] {_query_label}")
cli.chat(query, images=single_query_images or None)
cli._print_exit_summary()
return

15
constraints-termux.txt Normal file
View file

@ -0,0 +1,15 @@
# Termux / Android dependency constraints for Hermes Agent.
#
# Usage:
# python -m pip install -e '.[termux]' -c constraints-termux.txt
#
# These pins keep the tested Android install path stable when upstream packages
# move faster than Termux-compatible wheels / sdists.
ipython<10
jedi>=0.18.1,<0.20
parso>=0.8.4,<0.9
stack-data>=0.6,<0.7
pexpect>4.3,<5
matplotlib-inline>=0.1.7,<0.2
asttokens>=2.1,<3

View file

@ -532,6 +532,8 @@ def load_gateway_config() -> GatewayConfig:
bridged["reply_prefix"] = platform_cfg["reply_prefix"]
if "require_mention" in platform_cfg:
bridged["require_mention"] = platform_cfg["require_mention"]
if "free_response_channels" in platform_cfg:
bridged["free_response_channels"] = platform_cfg["free_response_channels"]
if "mention_patterns" in platform_cfg:
bridged["mention_patterns"] = platform_cfg["mention_patterns"]
if not bridged:
@ -546,6 +548,19 @@ def load_gateway_config() -> GatewayConfig:
plat_data["extra"] = extra
extra.update(bridged)
# Slack settings → env vars (env vars take precedence)
slack_cfg = yaml_cfg.get("slack", {})
if isinstance(slack_cfg, dict):
if "require_mention" in slack_cfg and not os.getenv("SLACK_REQUIRE_MENTION"):
os.environ["SLACK_REQUIRE_MENTION"] = str(slack_cfg["require_mention"]).lower()
if "allow_bots" in slack_cfg and not os.getenv("SLACK_ALLOW_BOTS"):
os.environ["SLACK_ALLOW_BOTS"] = str(slack_cfg["allow_bots"]).lower()
frc = slack_cfg.get("free_response_channels")
if frc is not None and not os.getenv("SLACK_FREE_RESPONSE_CHANNELS"):
if isinstance(frc, list):
frc = ",".join(str(v) for v in frc)
os.environ["SLACK_FREE_RESPONSE_CHANNELS"] = str(frc)
# Discord settings → env vars (env vars take precedence)
discord_cfg = yaml_cfg.get("discord", {})
if isinstance(discord_cfg, dict):

View file

@ -10,18 +10,142 @@ import logging
import os
import random
import re
import subprocess
import sys
import uuid
from abc import ABC, abstractmethod
from urllib.parse import urlsplit
logger = logging.getLogger(__name__)
def _detect_macos_system_proxy() -> str | None:
"""Read the macOS system HTTP(S) proxy via ``scutil --proxy``.
Returns an ``http://host:port`` URL string if an HTTP or HTTPS proxy is
enabled, otherwise *None*. Falls back silently on non-macOS or on any
subprocess error.
"""
if sys.platform != "darwin":
return None
try:
out = subprocess.check_output(
["scutil", "--proxy"], timeout=3, text=True, stderr=subprocess.DEVNULL,
)
except Exception:
return None
props: dict[str, str] = {}
for line in out.splitlines():
line = line.strip()
if " : " in line:
key, _, val = line.partition(" : ")
props[key.strip()] = val.strip()
# Prefer HTTPS, fall back to HTTP
for enable_key, host_key, port_key in (
("HTTPSEnable", "HTTPSProxy", "HTTPSPort"),
("HTTPEnable", "HTTPProxy", "HTTPPort"),
):
if props.get(enable_key) == "1":
host = props.get(host_key)
port = props.get(port_key)
if host and port:
return f"http://{host}:{port}"
return None
def resolve_proxy_url(platform_env_var: str | None = None) -> str | None:
"""Return a proxy URL from env vars, or macOS system proxy.
Check order:
0. *platform_env_var* (e.g. ``DISCORD_PROXY``) highest priority
1. HTTPS_PROXY / HTTP_PROXY / ALL_PROXY (and lowercase variants)
2. macOS system proxy via ``scutil --proxy`` (auto-detect)
Returns *None* if no proxy is found.
"""
if platform_env_var:
value = (os.environ.get(platform_env_var) or "").strip()
if value:
return value
for key in ("HTTPS_PROXY", "HTTP_PROXY", "ALL_PROXY",
"https_proxy", "http_proxy", "all_proxy"):
value = (os.environ.get(key) or "").strip()
if value:
return value
return _detect_macos_system_proxy()
def proxy_kwargs_for_bot(proxy_url: str | None) -> dict:
"""Build kwargs for ``commands.Bot()`` / ``discord.Client()`` with proxy.
Returns:
- SOCKS URL ``{"connector": ProxyConnector(..., rdns=True)}``
- HTTP URL ``{"proxy": url}``
- *None* ``{}``
``rdns=True`` forces remote DNS resolution through the proxy required
by many SOCKS implementations (Shadowrocket, Clash) and essential for
bypassing DNS pollution behind the GFW.
"""
if not proxy_url:
return {}
if proxy_url.lower().startswith("socks"):
try:
from aiohttp_socks import ProxyConnector
connector = ProxyConnector.from_url(proxy_url, rdns=True)
return {"connector": connector}
except ImportError:
logger.warning(
"aiohttp_socks not installed — SOCKS proxy %s ignored. "
"Run: pip install aiohttp-socks",
proxy_url,
)
return {}
return {"proxy": proxy_url}
def proxy_kwargs_for_aiohttp(proxy_url: str | None) -> tuple[dict, dict]:
"""Build kwargs for standalone ``aiohttp.ClientSession`` with proxy.
Returns ``(session_kwargs, request_kwargs)`` where:
- SOCKS ``({"connector": ProxyConnector(...)}, {})``
- HTTP ``({}, {"proxy": url})``
- None ``({}, {})``
Usage::
sess_kw, req_kw = proxy_kwargs_for_aiohttp(proxy_url)
async with aiohttp.ClientSession(**sess_kw) as session:
async with session.get(url, **req_kw) as resp:
...
"""
if not proxy_url:
return {}, {}
if proxy_url.lower().startswith("socks"):
try:
from aiohttp_socks import ProxyConnector
connector = ProxyConnector.from_url(proxy_url, rdns=True)
return {"connector": connector}, {}
except ImportError:
logger.warning(
"aiohttp_socks not installed — SOCKS proxy %s ignored. "
"Run: pip install aiohttp-socks",
proxy_url,
)
return {}, {}
return {}, {"proxy": proxy_url}
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any, Callable, Awaitable, Tuple
from enum import Enum
import sys
from pathlib import Path as _Path
sys.path.insert(0, str(_Path(__file__).resolve().parents[2]))

View file

@ -529,10 +529,17 @@ class DiscordAdapter(BasePlatformAdapter):
intents.members = any(not entry.isdigit() for entry in self._allowed_user_ids)
intents.voice_states = True
# Create bot
# Resolve proxy (DISCORD_PROXY > generic env vars > macOS system proxy)
from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_bot
proxy_url = resolve_proxy_url(platform_env_var="DISCORD_PROXY")
if proxy_url:
logger.info("[%s] Using proxy for Discord: %s", self.name, proxy_url)
# Create bot — proxy= for HTTP, connector= for SOCKS
self._client = commands.Bot(
command_prefix="!", # Not really used, we handle raw messages
intents=intents,
**proxy_kwargs_for_bot(proxy_url),
)
adapter_self = self # capture for closure
@ -1307,8 +1314,11 @@ class DiscordAdapter(BasePlatformAdapter):
# Download the image and send as a Discord file attachment
# (Discord renders attachments inline, unlike plain URLs)
async with aiohttp.ClientSession() as session:
async with session.get(image_url, timeout=aiohttp.ClientTimeout(total=30)) as resp:
from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp
_proxy = resolve_proxy_url(platform_env_var="DISCORD_PROXY")
_sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy)
async with aiohttp.ClientSession(**_sess_kw) as session:
async with session.get(image_url, timeout=aiohttp.ClientTimeout(total=30), **_req_kw) as resp:
if resp.status != 200:
raise Exception(f"Failed to download image: HTTP {resp.status}")
@ -1585,7 +1595,7 @@ class DiscordAdapter(BasePlatformAdapter):
await self._run_simple_slash(interaction, f"/model {name}".strip())
@tree.command(name="reasoning", description="Show or change reasoning effort")
@discord.app_commands.describe(effort="Reasoning effort: xhigh, high, medium, low, minimal, or none.")
@discord.app_commands.describe(effort="Reasoning effort: none, minimal, low, medium, high, or xhigh.")
async def slash_reasoning(interaction: discord.Interaction, effort: str = ""):
await self._run_simple_slash(interaction, f"/reasoning {effort}".strip())
@ -2391,10 +2401,14 @@ class DiscordAdapter(BasePlatformAdapter):
else:
try:
import aiohttp
async with aiohttp.ClientSession() as session:
from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp
_proxy = resolve_proxy_url(platform_env_var="DISCORD_PROXY")
_sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy)
async with aiohttp.ClientSession(**_sess_kw) as session:
async with session.get(
att.url,
timeout=aiohttp.ClientTimeout(total=30),
**_req_kw,
) as resp:
if resp.status != 200:
raise Exception(f"HTTP {resp.status}")

View file

@ -14,6 +14,7 @@ import logging
import os
import re
import time
from dataclasses import dataclass, field
from typing import Dict, Optional, Any, Tuple
try:
@ -45,6 +46,14 @@ from gateway.platforms.base import (
logger = logging.getLogger(__name__)
@dataclass
class _ThreadContextCache:
"""Cache entry for fetched thread context."""
content: str
fetched_at: float = field(default_factory=time.monotonic)
message_count: int = 0
def check_slack_requirements() -> bool:
"""Check if Slack dependencies are available."""
return SLACK_AVAILABLE
@ -101,6 +110,9 @@ class SlackAdapter(BasePlatformAdapter):
# session + memory scoping.
self._assistant_threads: Dict[Tuple[str, str], Dict[str, str]] = {}
self._ASSISTANT_THREADS_MAX = 5000
# Cache for _fetch_thread_context results: cache_key → _ThreadContextCache
self._thread_context_cache: Dict[str, _ThreadContextCache] = {}
self._THREAD_CACHE_TTL = 60.0
async def connect(self) -> bool:
"""Connect to Slack via Socket Mode."""
@ -281,6 +293,7 @@ class SlackAdapter(BasePlatformAdapter):
kwargs = {
"channel": chat_id,
"text": chunk,
"mrkdwn": True,
}
if thread_ts:
kwargs["thread_ts"] = thread_ts
@ -323,9 +336,7 @@ class SlackAdapter(BasePlatformAdapter):
if not self._app:
return SendResult(success=False, error="Not connected")
try:
# Convert standard markdown → Slack mrkdwn
formatted = self.format_message(content)
await self._get_client(chat_id).chat_update(
channel=chat_id,
ts=message_id,
@ -457,13 +468,36 @@ class SlackAdapter(BasePlatformAdapter):
text = re.sub(r'(`[^`]+`)', lambda m: _ph(m.group(0)), text)
# 3) Convert markdown links [text](url) → <url|text>
def _convert_markdown_link(m):
label = m.group(1)
url = m.group(2).strip()
if url.startswith('<') and url.endswith('>'):
url = url[1:-1].strip()
return _ph(f'<{url}|{label}>')
text = re.sub(
r'\[([^\]]+)\]\(([^)]+)\)',
lambda m: _ph(f'<{m.group(2)}|{m.group(1)}>'),
r'\[([^\]]+)\]\(([^()]*(?:\([^()]*\)[^()]*)*)\)',
_convert_markdown_link,
text,
)
# 4) Convert headers (## Title) → *Title* (bold)
# 4) Protect existing Slack entities/manual links so escaping and later
# formatting passes don't break them.
text = re.sub(
r'(<(?:[@#!]|(?:https?|mailto|tel):)[^>\n]+>)',
lambda m: _ph(m.group(1)),
text,
)
# 5) Protect blockquote markers before escaping
text = re.sub(r'^(>+\s)', lambda m: _ph(m.group(0)), text, flags=re.MULTILINE)
# 6) Escape Slack control characters in remaining plain text.
# Unescape first so already-escaped input doesn't get double-escaped.
text = text.replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>')
text = text.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
# 7) Convert headers (## Title) → *Title* (bold)
def _convert_header(m):
inner = m.group(1).strip()
# Strip redundant bold markers inside a header
@ -474,34 +508,39 @@ class SlackAdapter(BasePlatformAdapter):
r'^#{1,6}\s+(.+)$', _convert_header, text, flags=re.MULTILINE
)
# 5) Convert bold: **text** → *text* (Slack bold)
# 8) Convert bold+italic: ***text*** → *_text_* (Slack bold wrapping italic)
text = re.sub(
r'\*\*\*(.+?)\*\*\*',
lambda m: _ph(f'*_{m.group(1)}_*'),
text,
)
# 9) Convert bold: **text** → *text* (Slack bold)
text = re.sub(
r'\*\*(.+?)\*\*',
lambda m: _ph(f'*{m.group(1)}*'),
text,
)
# 6) Convert italic: _text_ stays as _text_ (already Slack italic)
# Single *text* → _text_ (Slack italic)
# 10) Convert italic: _text_ stays as _text_ (already Slack italic)
# Single *text* → _text_ (Slack italic)
text = re.sub(
r'(?<!\*)\*([^*\n]+)\*(?!\*)',
lambda m: _ph(f'_{m.group(1)}_'),
text,
)
# 7) Convert strikethrough: ~~text~~ → ~text~
# 11) Convert strikethrough: ~~text~~ → ~text~
text = re.sub(
r'~~(.+?)~~',
lambda m: _ph(f'~{m.group(1)}~'),
text,
)
# 8) Convert blockquotes: > text → > text (same syntax, just ensure
# no extra escaping happens to the > character)
# Slack uses the same > prefix, so this is a no-op for content.
# 12) Blockquotes: > prefix is already protected by step 5 above.
# 9) Restore placeholders in reverse order
for key in reversed(list(placeholders.keys())):
# 13) Restore placeholders in reverse order
for key in reversed(placeholders):
text = text.replace(key, placeholders[key])
return text
@ -914,9 +953,26 @@ class SlackAdapter(BasePlatformAdapter):
if v > cutoff
}
# Ignore bot messages (including our own)
# Bot message filtering (SLACK_ALLOW_BOTS / config allow_bots):
# "none" — ignore all bot messages (default, backward-compatible)
# "mentions" — accept bot messages only when they @mention us
# "all" — accept all bot messages (except our own)
if event.get("bot_id") or event.get("subtype") == "bot_message":
return
allow_bots = self.config.extra.get("allow_bots", "")
if not allow_bots:
allow_bots = os.getenv("SLACK_ALLOW_BOTS", "none")
allow_bots = str(allow_bots).lower().strip()
if allow_bots == "none":
return
elif allow_bots == "mentions":
text_check = event.get("text", "")
if self._bot_user_id and f"<@{self._bot_user_id}>" not in text_check:
return
# "all" falls through to process the message
# Always ignore our own messages to prevent echo loops
msg_user = event.get("user", "")
if msg_user and self._bot_user_id and msg_user == self._bot_user_id:
return
# Ignore message edits and deletions
subtype = event.get("subtype")
@ -948,7 +1004,7 @@ class SlackAdapter(BasePlatformAdapter):
channel_type = event.get("channel_type", "")
if not channel_type and channel_id.startswith("D"):
channel_type = "im"
is_dm = channel_type == "im"
is_dm = channel_type in ("im", "mpim") # Both 1:1 and group DMs
# Build thread_ts for session keying.
# In channels: fall back to ts so each top-level @mention starts a
@ -961,6 +1017,8 @@ class SlackAdapter(BasePlatformAdapter):
thread_ts = event.get("thread_ts") or ts # ts fallback for channels
# In channels, respond if:
# 0. Channel is in free_response_channels, OR require_mention is
# disabled — always process regardless of mention.
# 1. The bot is @mentioned in this message, OR
# 2. The message is a reply in a thread the bot started/participated in, OR
# 3. The message is in a thread where the bot was previously @mentioned, OR
@ -970,24 +1028,29 @@ class SlackAdapter(BasePlatformAdapter):
event_thread_ts = event.get("thread_ts")
is_thread_reply = bool(event_thread_ts and event_thread_ts != ts)
if not is_dm and bot_uid and not is_mentioned:
reply_to_bot_thread = (
is_thread_reply and event_thread_ts in self._bot_message_ts
)
in_mentioned_thread = (
event_thread_ts is not None
and event_thread_ts in self._mentioned_threads
)
has_session = (
is_thread_reply
and self._has_active_session_for_thread(
channel_id=channel_id,
thread_ts=event_thread_ts,
user_id=user_id,
if not is_dm and bot_uid:
if channel_id in self._slack_free_response_channels():
pass # Free-response channel — always process
elif not self._slack_require_mention():
pass # Mention requirement disabled globally for Slack
elif not is_mentioned:
reply_to_bot_thread = (
is_thread_reply and event_thread_ts in self._bot_message_ts
)
)
if not reply_to_bot_thread and not in_mentioned_thread and not has_session:
return
in_mentioned_thread = (
event_thread_ts is not None
and event_thread_ts in self._mentioned_threads
)
has_session = (
is_thread_reply
and self._has_active_session_for_thread(
channel_id=channel_id,
thread_ts=event_thread_ts,
user_id=user_id,
)
)
if not reply_to_bot_thread and not in_mentioned_thread and not has_session:
return
if is_mentioned:
# Strip the bot mention from the text
@ -1128,14 +1191,19 @@ class SlackAdapter(BasePlatformAdapter):
reply_to_message_id=thread_ts if thread_ts != ts else None,
)
# Add 👀 reaction to acknowledge receipt
await self._add_reaction(channel_id, ts, "eyes")
# Only react when bot is directly addressed (DM or @mention).
# In listen-all channels (require_mention=false), reacting to every
# casual message would be noisy.
_should_react = is_dm or is_mentioned
if _should_react:
await self._add_reaction(channel_id, ts, "eyes")
await self.handle_message(msg_event)
# Replace 👀 with ✅ when done
await self._remove_reaction(channel_id, ts, "eyes")
await self._add_reaction(channel_id, ts, "white_check_mark")
if _should_react:
await self._remove_reaction(channel_id, ts, "eyes")
await self._add_reaction(channel_id, ts, "white_check_mark")
# ----- Approval button support (Block Kit) -----
@ -1229,6 +1297,20 @@ class SlackAdapter(BasePlatformAdapter):
msg_ts = message.get("ts", "")
channel_id = body.get("channel", {}).get("id", "")
user_name = body.get("user", {}).get("name", "unknown")
user_id = body.get("user", {}).get("id", "")
# Only authorized users may click approval buttons. Button clicks
# bypass the normal message auth flow in gateway/run.py, so we must
# check here as well.
allowed_csv = os.getenv("SLACK_ALLOWED_USERS", "").strip()
if allowed_csv:
allowed_ids = {uid.strip() for uid in allowed_csv.split(",") if uid.strip()}
if "*" not in allowed_ids and user_id not in allowed_ids:
logger.warning(
"[Slack] Unauthorized approval click by %s (%s) — ignoring",
user_name, user_id,
)
return
# Map action_id to approval choice
choice_map = {
@ -1239,10 +1321,9 @@ class SlackAdapter(BasePlatformAdapter):
}
choice = choice_map.get(action_id, "deny")
# Prevent double-clicks
if self._approval_resolved.get(msg_ts, False):
# Prevent double-clicks — atomic pop; first caller gets False, others get True (default)
if self._approval_resolved.pop(msg_ts, True):
return
self._approval_resolved[msg_ts] = True
# Update the message to show the decision and remove buttons
label_map = {
@ -1297,8 +1378,7 @@ class SlackAdapter(BasePlatformAdapter):
except Exception as exc:
logger.error("Failed to resolve gateway approval from Slack button: %s", exc)
# Clean up stale approval state
self._approval_resolved.pop(msg_ts, None)
# (approval state already consumed by atomic pop above)
# ----- Thread context fetching -----
@ -1309,57 +1389,104 @@ class SlackAdapter(BasePlatformAdapter):
"""Fetch recent thread messages to provide context when the bot is
mentioned mid-thread for the first time.
Returns a formatted string with thread history, or empty string on
failure or if the thread is empty (just the parent message).
This method is only called when there is NO active session for the
thread (guarded at the call site by _has_active_session_for_thread).
That guard ensures thread messages are prepended only on the very
first turn after that the session history already holds them, so
there is no duplication across subsequent turns.
Results are cached for _THREAD_CACHE_TTL seconds per thread to avoid
hammering conversations.replies (Tier 3, ~50 req/min).
Returns a formatted string with prior thread history, or empty string
on failure or if the thread has no prior messages.
"""
cache_key = f"{channel_id}:{thread_ts}"
now = time.monotonic()
cached = self._thread_context_cache.get(cache_key)
if cached and (now - cached.fetched_at) < self._THREAD_CACHE_TTL:
return cached.content
try:
client = self._get_client(channel_id)
result = await client.conversations_replies(
channel=channel_id,
ts=thread_ts,
limit=limit + 1, # +1 because it includes the current message
inclusive=True,
)
# Retry with exponential backoff for Tier-3 rate limits (429).
result = None
for attempt in range(3):
try:
result = await client.conversations_replies(
channel=channel_id,
ts=thread_ts,
limit=limit + 1, # +1 because it includes the current message
inclusive=True,
)
break
except Exception as exc:
# Check for rate-limit error from slack_sdk
err_str = str(exc).lower()
is_rate_limit = (
"ratelimited" in err_str
or "429" in err_str
or "rate_limited" in err_str
)
if is_rate_limit and attempt < 2:
retry_after = 1.0 * (2 ** attempt) # 1s, 2s
logger.warning(
"[Slack] conversations.replies rate limited; retrying in %.1fs (attempt %d/3)",
retry_after, attempt + 1,
)
await asyncio.sleep(retry_after)
continue
raise
if result is None:
return ""
messages = result.get("messages", [])
if not messages:
return ""
bot_uid = self._team_bot_user_ids.get(team_id, self._bot_user_id)
context_parts = []
for msg in messages:
msg_ts = msg.get("ts", "")
# Skip the current message (the one that triggered this fetch)
# Exclude the current triggering message — it will be delivered
# as the user message itself, so including it here would duplicate it.
if msg_ts == current_ts:
continue
# Skip bot messages from ourselves
# Exclude our own bot messages to avoid circular context.
if msg.get("bot_id") or msg.get("subtype") == "bot_message":
continue
msg_user = msg.get("user", "unknown")
msg_text = msg.get("text", "").strip()
if not msg_text:
continue
# Strip bot mentions from context messages
bot_uid = self._team_bot_user_ids.get(team_id, self._bot_user_id)
if bot_uid:
msg_text = msg_text.replace(f"<@{bot_uid}>", "").strip()
# Mark the thread parent
msg_user = msg.get("user", "unknown")
is_parent = msg_ts == thread_ts
prefix = "[thread parent] " if is_parent else ""
# Resolve user name (cached)
name = await self._resolve_user_name(msg_user, chat_id=channel_id)
context_parts.append(f"{prefix}{name}: {msg_text}")
if not context_parts:
return ""
content = ""
if context_parts:
content = (
"[Thread context — prior messages in this thread (not yet in conversation history):]\n"
+ "\n".join(context_parts)
+ "\n[End of thread context]\n\n"
)
return (
"[Thread context — previous messages in this thread:]\n"
+ "\n".join(context_parts)
+ "\n[End of thread context]\n\n"
self._thread_context_cache[cache_key] = _ThreadContextCache(
content=content,
fetched_at=now,
message_count=len(context_parts),
)
return content
except Exception as e:
logger.warning("[Slack] Failed to fetch thread context: %s", e)
return ""
@ -1515,3 +1642,30 @@ class SlackAdapter(BasePlatformAdapter):
continue
raise
raise last_exc
# ── Channel mention gating ─────────────────────────────────────────────
def _slack_require_mention(self) -> bool:
"""Return whether channel messages require an explicit bot mention.
Uses explicit-false parsing (like Discord/Matrix) rather than
truthy parsing, since the safe default is True (gating on).
Unrecognised or empty values keep gating enabled.
"""
configured = self.config.extra.get("require_mention")
if configured is not None:
if isinstance(configured, str):
return configured.lower() not in ("false", "0", "no", "off")
return bool(configured)
return os.getenv("SLACK_REQUIRE_MENTION", "true").lower() not in ("false", "0", "no", "off")
def _slack_free_response_channels(self) -> set:
"""Return channel IDs where no @mention is required."""
raw = self.config.extra.get("free_response_channels")
if raw is None:
raw = os.getenv("SLACK_FREE_RESPONSE_CHANNELS", "")
if isinstance(raw, list):
return {str(part).strip() for part in raw if str(part).strip()}
if isinstance(raw, str) and raw.strip():
return {part.strip() for part in raw.split(",") if part.strip()}
return set()

View file

@ -1398,6 +1398,15 @@ class TelegramAdapter(BasePlatformAdapter):
await query.answer(text="Invalid approval data.")
return
# Only authorized users may click approval buttons.
caller_id = str(getattr(query.from_user, "id", ""))
allowed_csv = os.getenv("TELEGRAM_ALLOWED_USERS", "").strip()
if allowed_csv:
allowed_ids = {uid.strip() for uid in allowed_csv.split(",") if uid.strip()}
if "*" not in allowed_ids and caller_id not in allowed_ids:
await query.answer(text="⛔ You are not authorized to approve commands.")
return
session_key = self._approval_state.pop(approval_id, None)
if not session_key:
await query.answer(text="This approval has already been resolved.")

View file

@ -45,11 +45,9 @@ _SEED_FALLBACK_IPS: list[str] = ["149.154.167.220"]
def _resolve_proxy_url() -> str | None:
for key in ("HTTPS_PROXY", "HTTP_PROXY", "ALL_PROXY", "https_proxy", "http_proxy", "all_proxy"):
value = (os.environ.get(key) or "").strip()
if value:
return value
return None
# Delegate to shared implementation (env vars + macOS system proxy detection)
from gateway.platforms.base import resolve_proxy_url
return resolve_proxy_url()
class TelegramFallbackTransport(httpx.AsyncBaseTransport):

View file

@ -925,8 +925,8 @@ class GatewayRunner:
def _load_reasoning_config() -> dict | None:
"""Load reasoning effort from config.yaml.
Reads agent.reasoning_effort from config.yaml. Valid: "xhigh",
"high", "medium", "low", "minimal", "none". Returns None to use
Reads agent.reasoning_effort from config.yaml. Valid: "none",
"minimal", "low", "medium", "high", "xhigh". Returns None to use
default (medium).
"""
from hermes_constants import parse_reasoning_effort
@ -4937,7 +4937,7 @@ class GatewayRunner:
Usage:
/reasoning Show current effort level and display state
/reasoning <level> Set reasoning effort (none, low, medium, high, xhigh)
/reasoning <level> Set reasoning effort (none, minimal, low, medium, high, xhigh)
/reasoning show|on Show model reasoning in responses
/reasoning hide|off Hide model reasoning from responses
"""
@ -4982,7 +4982,7 @@ class GatewayRunner:
"🧠 **Reasoning Settings**\n\n"
f"**Effort:** `{level}`\n"
f"**Display:** {display_state}\n\n"
"_Usage:_ `/reasoning <none|low|medium|high|xhigh|show|hide>`"
"_Usage:_ `/reasoning <none|minimal|low|medium|high|xhigh|show|hide>`"
)
# Display toggle
@ -5000,12 +5000,12 @@ class GatewayRunner:
effort = args.strip()
if effort == "none":
parsed = {"enabled": False}
elif effort in ("xhigh", "high", "medium", "low", "minimal"):
elif effort in ("minimal", "low", "medium", "high", "xhigh"):
parsed = {"enabled": True, "effort": effort}
else:
return (
f"⚠️ Unknown argument: `{effort}`\n\n"
"**Valid levels:** none, low, minimal, medium, high, xhigh\n"
"**Valid levels:** none, minimal, low, medium, high, xhigh\n"
"**Display:** show, hide"
)

View file

@ -136,7 +136,34 @@ class GatewayStreamConsumer:
if should_edit and self._accumulated:
# Split overflow: if accumulated text exceeds the platform
# limit, finalize the current message and start a new one.
# limit, split into properly sized chunks.
if (
len(self._accumulated) > _safe_limit
and self._message_id is None
):
# No existing message to edit (first message or after a
# segment break). Use truncate_message — the same
# helper the non-streaming path uses — to split with
# proper word/code-fence boundaries and chunk
# indicators like "(1/2)".
chunks = self.adapter.truncate_message(
self._accumulated, _safe_limit
)
for chunk in chunks:
await self._send_new_chunk(chunk, self._message_id)
self._accumulated = ""
self._last_sent_text = ""
self._last_edit_time = time.monotonic()
if got_done:
return
if got_segment_break:
self._message_id = None
self._fallback_final_send = False
self._fallback_prefix = ""
continue
# Existing message: edit it with the first chunk, then
# start a new message for the overflow remainder.
while (
len(self._accumulated) > _safe_limit
and self._message_id is not None
@ -226,6 +253,34 @@ class GatewayStreamConsumer:
# Strip trailing whitespace/newlines but preserve leading content
return cleaned.rstrip()
async def _send_new_chunk(self, text: str, reply_to_id: Optional[str]) -> Optional[str]:
"""Send a new message chunk, optionally threaded to a previous message.
Returns the message_id so callers can thread subsequent chunks.
"""
text = self._clean_for_display(text)
if not text.strip():
return reply_to_id
try:
meta = dict(self.metadata) if self.metadata else {}
result = await self.adapter.send(
chat_id=self.chat_id,
content=text,
reply_to=reply_to_id,
metadata=meta,
)
if result.success and result.message_id:
self._message_id = str(result.message_id)
self._already_sent = True
self._last_sent_text = text
return str(result.message_id)
else:
self._edit_supported = False
return reply_to_id
except Exception as e:
logger.error("Stream send chunk error: %s", e)
return reply_to_id
def _visible_prefix(self) -> str:
"""Return the visible text already shown in the streamed message."""
prefix = self._last_sent_text or ""

View file

@ -250,7 +250,7 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
# Kimi Code Endpoint Detection
# =============================================================================
# Kimi Code (platform.kimi.ai) issues keys prefixed "sk-kimi-" that only work
# Kimi Code (kimi.com/code) issues keys prefixed "sk-kimi-" that only work
# on api.kimi.com/coding/v1. Legacy keys from platform.moonshot.ai work on
# api.moonshot.ai/v1 (the default). Auto-detect when user hasn't set
# KIMI_BASE_URL explicitly.
@ -3017,12 +3017,15 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
_save_provider_state(auth_store, "nous", auth_state)
saved_to = _save_auth_store(auth_store)
config_path = _update_config_for_provider("nous", inference_base_url)
print()
print("Login successful!")
print(f" Auth state: {saved_to}")
print(f" Config updated: {config_path} (model.provider=nous)")
# Resolve model BEFORE writing provider to config.yaml so we never
# leave the config in a half-updated state (provider=nous but model
# still set to the previous provider's model, e.g. opus from
# OpenRouter). The auth.json active_provider was already set above.
selected_model = None
try:
runtime_key = auth_state.get("agent_key") or auth_state.get("access_token")
if not isinstance(runtime_key, str) or not runtime_key:
@ -3056,9 +3059,6 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
unavailable_models=unavailable_models,
portal_url=_portal,
)
if selected_model:
_save_model_choice(selected_model)
print(f"Default model set to: {selected_model}")
elif unavailable_models:
_url = (_portal or DEFAULT_NOUS_PORTAL_URL).rstrip("/")
print("No free models currently available.")
@ -3070,6 +3070,15 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
print()
print(f"Login succeeded, but could not fetch available models. Reason: {message}")
# Write provider + model atomically so config is never mismatched.
config_path = _update_config_for_provider(
"nous", inference_base_url, default_model=selected_model,
)
if selected_model:
_save_model_choice(selected_model)
print(f"Default model set to: {selected_model}")
print(f" Config updated: {config_path} (model.provider=nous)")
except KeyboardInterrupt:
print("\nLogin cancelled.")
raise SystemExit(130)

View file

@ -89,8 +89,7 @@ COMMAND_REGISTRY: list[CommandDef] = [
CommandDef("model", "Switch model for this session", "Configuration", args_hint="[model] [--global]"),
CommandDef("provider", "Show available providers and current provider",
"Configuration"),
CommandDef("prompt", "View/set custom system prompt", "Configuration",
cli_only=True, args_hint="[text]", subcommands=("clear",)),
CommandDef("personality", "Set a predefined personality", "Configuration",
args_hint="[name]"),
CommandDef("statusbar", "Toggle the context/model status bar", "Configuration",
@ -102,7 +101,7 @@ COMMAND_REGISTRY: list[CommandDef] = [
"Configuration"),
CommandDef("reasoning", "Manage reasoning effort and display", "Configuration",
args_hint="[level|show|hide]",
subcommands=("none", "low", "minimal", "medium", "high", "xhigh", "show", "hide", "on", "off")),
subcommands=("none", "minimal", "low", "medium", "high", "xhigh", "show", "hide", "on", "off")),
CommandDef("skin", "Show or change the display skin/theme", "Configuration",
args_hint="[name]"),
CommandDef("voice", "Toggle voice mode", "Configuration",
@ -140,6 +139,8 @@ COMMAND_REGISTRY: list[CommandDef] = [
cli_only=True, args_hint="[number]"),
CommandDef("paste", "Check clipboard for an image and attach it", "Info",
cli_only=True),
CommandDef("image", "Attach a local image file for your next prompt", "Info",
cli_only=True, args_hint="<path>"),
CommandDef("update", "Update Hermes Agent to the latest version", "Info",
gateway_only=True),

View file

@ -197,14 +197,44 @@ def _ensure_default_soul_md(home: Path) -> None:
def ensure_hermes_home():
"""Ensure ~/.hermes directory structure exists with secure permissions."""
"""Ensure ~/.hermes directory structure exists with secure permissions.
In managed mode (NixOS), dirs are created by the activation script with
setgid + group-writable (2770). We skip mkdir and set umask(0o007) so
any files created (e.g. SOUL.md) are group-writable (0660).
"""
home = get_hermes_home()
home.mkdir(parents=True, exist_ok=True)
_secure_dir(home)
if is_managed():
old_umask = os.umask(0o007)
try:
_ensure_hermes_home_managed(home)
finally:
os.umask(old_umask)
else:
home.mkdir(parents=True, exist_ok=True)
_secure_dir(home)
for subdir in ("cron", "sessions", "logs", "memories"):
d = home / subdir
d.mkdir(parents=True, exist_ok=True)
_secure_dir(d)
_ensure_default_soul_md(home)
def _ensure_hermes_home_managed(home: Path):
"""Managed-mode variant: verify dirs exist (activation creates them), seed SOUL.md."""
if not home.is_dir():
raise RuntimeError(
f"HERMES_HOME {home} does not exist. "
"Run 'sudo nixos-rebuild switch' first."
)
for subdir in ("cron", "sessions", "logs", "memories"):
d = home / subdir
d.mkdir(parents=True, exist_ok=True)
_secure_dir(d)
if not d.is_dir():
raise RuntimeError(
f"{d} does not exist. "
"Run 'sudo nixos-rebuild switch' first."
)
# Inside umask(0o007) scope — SOUL.md will be created as 0660
_ensure_default_soul_md(home)

View file

@ -54,6 +54,32 @@ _PROVIDER_ENV_HINTS = (
)
from hermes_constants import is_termux as _is_termux
def _python_install_cmd() -> str:
return "python -m pip install" if _is_termux() else "uv pip install"
def _system_package_install_cmd(pkg: str) -> str:
if _is_termux():
return f"pkg install {pkg}"
if sys.platform == "darwin":
return f"brew install {pkg}"
return f"sudo apt install {pkg}"
def _termux_browser_setup_steps(node_installed: bool) -> list[str]:
steps: list[str] = []
step = 1
if not node_installed:
steps.append(f"{step}) pkg install nodejs")
step += 1
steps.append(f"{step}) npm install -g agent-browser")
steps.append(f"{step + 1}) agent-browser install")
return steps
def _has_provider_env_config(content: str) -> bool:
"""Return True when ~/.hermes/.env contains provider auth/base URL settings."""
return any(key in content for key in _PROVIDER_ENV_HINTS)
@ -200,7 +226,7 @@ def run_doctor(args):
check_ok(name)
except ImportError:
check_fail(name, "(missing)")
issues.append(f"Install {name}: uv pip install {module}")
issues.append(f"Install {name}: {_python_install_cmd()} {module}")
for module, name in optional_packages:
try:
@ -503,7 +529,7 @@ def run_doctor(args):
check_ok("ripgrep (rg)", "(faster file search)")
else:
check_warn("ripgrep (rg) not found", "(file search uses grep fallback)")
check_info("Install for faster search: sudo apt install ripgrep")
check_info(f"Install for faster search: {_system_package_install_cmd('ripgrep')}")
# Docker (optional)
terminal_env = os.getenv("TERMINAL_ENV", "local")
@ -526,7 +552,10 @@ def run_doctor(args):
if shutil.which("docker"):
check_ok("docker", "(optional)")
else:
check_warn("docker not found", "(optional)")
if _is_termux():
check_info("Docker backend is not available inside Termux (expected on Android)")
else:
check_warn("docker not found", "(optional)")
# SSH (if using ssh backend)
if terminal_env == "ssh":
@ -574,9 +603,23 @@ def run_doctor(args):
if agent_browser_path.exists():
check_ok("agent-browser (Node.js)", "(browser automation)")
else:
check_warn("agent-browser not installed", "(run: npm install)")
if _is_termux():
check_info("agent-browser is not installed (expected in the tested Termux path)")
check_info("Install it manually later with: npm install -g agent-browser && agent-browser install")
check_info("Termux browser setup:")
for step in _termux_browser_setup_steps(node_installed=True):
check_info(step)
else:
check_warn("agent-browser not installed", "(run: npm install)")
else:
check_warn("Node.js not found", "(optional, needed for browser tools)")
if _is_termux():
check_info("Node.js not found (browser tools are optional in the tested Termux path)")
check_info("Install Node.js on Termux with: pkg install nodejs")
check_info("Termux browser setup:")
for step in _termux_browser_setup_steps(node_installed=False):
check_info(step)
else:
check_warn("Node.js not found", "(optional, needed for browser tools)")
# npm audit for all Node.js packages
if shutil.which("npm"):
@ -739,8 +782,9 @@ def run_doctor(args):
__import__("tinker_atropos")
check_ok("tinker-atropos", "(RL training backend)")
except ImportError:
check_warn("tinker-atropos found but not installed", "(run: uv pip install -e ./tinker-atropos)")
issues.append("Install tinker-atropos: uv pip install -e ./tinker-atropos")
install_cmd = f"{_python_install_cmd()} -e ./tinker-atropos"
check_warn("tinker-atropos found but not installed", f"(run: {install_cmd})")
issues.append(f"Install tinker-atropos: {install_cmd}")
else:
check_warn("tinker-atropos requires Python 3.11+", f"(current: {py_version.major}.{py_version.minor})")
else:

View file

@ -39,7 +39,7 @@ def _get_service_pids() -> set:
pids: set = set()
# --- systemd (Linux): user and system scopes ---
if is_linux():
if supports_systemd_services():
for scope_args in [["systemctl", "--user"], ["systemctl"]]:
try:
result = subprocess.run(
@ -225,6 +225,14 @@ def stop_profile_gateway() -> bool:
def is_linux() -> bool:
return sys.platform.startswith('linux')
from hermes_constants import is_termux
def supports_systemd_services() -> bool:
return is_linux() and not is_termux()
def is_macos() -> bool:
return sys.platform == 'darwin'
@ -477,13 +485,15 @@ def install_linux_gateway_from_setup(force: bool = False) -> tuple[str | None, b
def get_systemd_linger_status() -> tuple[bool | None, str]:
"""Return whether systemd user lingering is enabled for the current user.
"""Return systemd linger status for the current user.
Returns:
(True, "") when linger is enabled.
(False, "") when linger is disabled.
(None, detail) when the status could not be determined.
"""
if is_termux():
return None, "not supported in Termux"
if not is_linux():
return None, "not supported on this platform"
@ -766,7 +776,7 @@ def _print_linger_enable_warning(username: str, detail: str | None = None) -> No
def _ensure_linger_enabled() -> None:
"""Enable linger when possible so the user gateway survives logout."""
if not is_linux():
if is_termux() or not is_linux():
return
import getpass
@ -1801,7 +1811,7 @@ def _setup_whatsapp():
def _is_service_installed() -> bool:
"""Check if the gateway is installed as a system service."""
if is_linux():
if supports_systemd_services():
return get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()
elif is_macos():
return get_launchd_plist_path().exists()
@ -1810,7 +1820,7 @@ def _is_service_installed() -> bool:
def _is_service_running() -> bool:
"""Check if the gateway service is currently running."""
if is_linux():
if supports_systemd_services():
user_unit_exists = get_systemd_unit_path(system=False).exists()
system_unit_exists = get_systemd_unit_path(system=True).exists()
@ -1983,7 +1993,7 @@ def gateway_setup():
service_installed = _is_service_installed()
service_running = _is_service_running()
if is_linux() and has_conflicting_systemd_units():
if supports_systemd_services() and has_conflicting_systemd_units():
print_systemd_scope_conflict_warning()
print()
@ -1993,7 +2003,7 @@ def gateway_setup():
print_warning("Gateway service is installed but not running.")
if prompt_yes_no(" Start it now?", True):
try:
if is_linux():
if supports_systemd_services():
systemd_start()
elif is_macos():
launchd_start()
@ -2044,7 +2054,7 @@ def gateway_setup():
if service_running:
if prompt_yes_no(" Restart the gateway to pick up changes?", True):
try:
if is_linux():
if supports_systemd_services():
systemd_restart()
elif is_macos():
launchd_restart()
@ -2056,7 +2066,7 @@ def gateway_setup():
elif service_installed:
if prompt_yes_no(" Start the gateway service?", True):
try:
if is_linux():
if supports_systemd_services():
systemd_start()
elif is_macos():
launchd_start()
@ -2064,13 +2074,13 @@ def gateway_setup():
print_error(f" Start failed: {e}")
else:
print()
if is_linux() or is_macos():
platform_name = "systemd" if is_linux() else "launchd"
if supports_systemd_services() or is_macos():
platform_name = "systemd" if supports_systemd_services() else "launchd"
if prompt_yes_no(f" Install the gateway as a {platform_name} service? (runs in background, starts on boot)", True):
try:
installed_scope = None
did_install = False
if is_linux():
if supports_systemd_services():
installed_scope, did_install = install_linux_gateway_from_setup(force=False)
else:
launchd_install(force=False)
@ -2078,7 +2088,7 @@ def gateway_setup():
print()
if did_install and prompt_yes_no(" Start the service now?", True):
try:
if is_linux():
if supports_systemd_services():
systemd_start(system=installed_scope == "system")
else:
launchd_start()
@ -2089,12 +2099,18 @@ def gateway_setup():
print_info(" You can try manually: hermes gateway install")
else:
print_info(" You can install later: hermes gateway install")
if is_linux():
if supports_systemd_services():
print_info(" Or as a boot-time service: sudo hermes gateway install --system")
print_info(" Or run in foreground: hermes gateway")
else:
print_info(" Service install not supported on this platform.")
print_info(" Run in foreground: hermes gateway")
if is_termux():
from hermes_constants import display_hermes_home as _dhh
print_info(" Termux does not use systemd/launchd services.")
print_info(" Run in foreground: hermes gateway")
print_info(f" Or start it manually in the background (best effort): nohup hermes gateway >{_dhh()}/logs/gateway.log 2>&1 &")
else:
print_info(" Service install not supported on this platform.")
print_info(" Run in foreground: hermes gateway")
else:
print()
print_info("No platforms configured. Run 'hermes gateway setup' when ready.")
@ -2130,7 +2146,11 @@ def gateway_command(args):
force = getattr(args, 'force', False)
system = getattr(args, 'system', False)
run_as_user = getattr(args, 'run_as_user', None)
if is_linux():
if is_termux():
print("Gateway service installation is not supported on Termux.")
print("Run manually: hermes gateway")
sys.exit(1)
if supports_systemd_services():
systemd_install(force=force, system=system, run_as_user=run_as_user)
elif is_macos():
launchd_install(force)
@ -2144,7 +2164,11 @@ def gateway_command(args):
managed_error("uninstall gateway service (managed by NixOS)")
return
system = getattr(args, 'system', False)
if is_linux():
if is_termux():
print("Gateway service uninstall is not supported on Termux because there is no managed service to remove.")
print("Stop manual runs with: hermes gateway stop")
sys.exit(1)
if supports_systemd_services():
systemd_uninstall(system=system)
elif is_macos():
launchd_uninstall()
@ -2154,7 +2178,11 @@ def gateway_command(args):
elif subcmd == "start":
system = getattr(args, 'system', False)
if is_linux():
if is_termux():
print("Gateway service start is not supported on Termux because there is no system service manager.")
print("Run manually: hermes gateway")
sys.exit(1)
if supports_systemd_services():
systemd_start(system=system)
elif is_macos():
launchd_start()
@ -2169,7 +2197,7 @@ def gateway_command(args):
if stop_all:
# --all: kill every gateway process on the machine
service_available = False
if is_linux() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()):
if supports_systemd_services() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()):
try:
systemd_stop(system=system)
service_available = True
@ -2190,7 +2218,7 @@ def gateway_command(args):
else:
# Default: stop only the current profile's gateway
service_available = False
if is_linux() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()):
if supports_systemd_services() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()):
try:
systemd_stop(system=system)
service_available = True
@ -2218,7 +2246,7 @@ def gateway_command(args):
system = getattr(args, 'system', False)
service_configured = False
if is_linux() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()):
if supports_systemd_services() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()):
service_configured = True
try:
systemd_restart(system=system)
@ -2235,7 +2263,7 @@ def gateway_command(args):
if not service_available:
# systemd/launchd restart failed — check if linger is the issue
if is_linux():
if supports_systemd_services():
linger_ok, _detail = get_systemd_linger_status()
if linger_ok is not True:
import getpass
@ -2272,7 +2300,7 @@ def gateway_command(args):
system = getattr(args, 'system', False)
# Check for service first
if is_linux() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()):
if supports_systemd_services() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()):
systemd_status(deep, system=system)
elif is_macos() and get_launchd_plist_path().exists():
launchd_status(deep)
@ -2289,9 +2317,13 @@ def gateway_command(args):
for line in runtime_lines:
print(f" {line}")
print()
print("To install as a service:")
print(" hermes gateway install")
print(" sudo hermes gateway install --system")
if is_termux():
print("Termux note:")
print(" Android may stop background jobs when Termux is suspended")
else:
print("To install as a service:")
print(" hermes gateway install")
print(" sudo hermes gateway install --system")
else:
print("✗ Gateway is not running")
runtime_lines = _runtime_health_lines()
@ -2303,5 +2335,8 @@ def gateway_command(args):
print()
print("To start:")
print(" hermes gateway # Run in foreground")
print(" hermes gateway install # Install as user service")
print(" sudo hermes gateway install --system # Install as boot-time system service")
if is_termux():
print(" nohup hermes gateway > ~/.hermes/logs/gateway.log 2>&1 & # Best-effort background start")
else:
print(" hermes gateway install # Install as user service")
print(" sudo hermes gateway install --system # Install as boot-time system service")

View file

@ -781,6 +781,7 @@ def cmd_chat(args):
"verbose": args.verbose,
"quiet": getattr(args, "quiet", False),
"query": args.query,
"image": getattr(args, "image", None),
"resume": getattr(args, "resume", None),
"worktree": getattr(args, "worktree", False),
"checkpoints": getattr(args, "checkpoints", False),
@ -1946,7 +1947,10 @@ def _set_reasoning_effort(config, effort: str) -> None:
def _prompt_reasoning_effort_selection(efforts, current_effort=""):
"""Prompt for a reasoning effort. Returns effort, 'none', or None to keep current."""
ordered = list(dict.fromkeys(str(effort).strip().lower() for effort in efforts if str(effort).strip()))
deduped = list(dict.fromkeys(str(effort).strip().lower() for effort in efforts if str(effort).strip()))
canonical_order = ("minimal", "low", "medium", "high", "xhigh")
ordered = [effort for effort in canonical_order if effort in deduped]
ordered.extend(effort for effort in deduped if effort not in canonical_order)
if not ordered:
return None
@ -3895,7 +3899,7 @@ def cmd_update(args):
# running gateway needs restarting to pick up the new code.
try:
from hermes_cli.gateway import (
is_macos, is_linux, _ensure_user_systemd_env,
is_macos, supports_systemd_services, _ensure_user_systemd_env,
find_gateway_pids,
_get_service_pids,
)
@ -3906,7 +3910,7 @@ def cmd_update(args):
# --- Systemd services (Linux) ---
# Discover all hermes-gateway* units (default + profiles)
if is_linux():
if supports_systemd_services():
try:
_ensure_user_systemd_env()
except Exception:
@ -4429,6 +4433,10 @@ For more help on a command:
"-q", "--query",
help="Single query (non-interactive mode)"
)
chat_parser.add_argument(
"--image",
help="Optional local image path to attach to a single query"
)
chat_parser.add_argument(
"-m", "--model",
help="Model to use (e.g., anthropic/claude-sonnet-4)"

View file

@ -733,6 +733,7 @@ def list_authenticated_providers(
fetch_models_dev,
get_provider_info as _mdev_pinfo,
)
from hermes_cli.auth import PROVIDER_REGISTRY
from hermes_cli.models import OPENROUTER_MODELS, _PROVIDER_MODELS
results: List[dict] = []
@ -753,9 +754,16 @@ def list_authenticated_providers(
if not isinstance(pdata, dict):
continue
env_vars = pdata.get("env", [])
if not isinstance(env_vars, list):
continue
# Prefer auth.py PROVIDER_REGISTRY for env var names — it's our
# source of truth. models.dev can have wrong mappings (e.g.
# minimax-cn → MINIMAX_API_KEY instead of MINIMAX_CN_API_KEY).
pconfig = PROVIDER_REGISTRY.get(hermes_id)
if pconfig and pconfig.api_key_env_vars:
env_vars = list(pconfig.api_key_env_vars)
else:
env_vars = pdata.get("env", [])
if not isinstance(env_vars, list):
continue
# Check if any env var is set
has_creds = any(os.environ.get(ev) for ev in env_vars)

View file

@ -2572,9 +2572,120 @@ _OPENCLAW_SCRIPT = (
)
def _load_openclaw_migration_module():
"""Load the openclaw_to_hermes migration script as a module.
Returns the loaded module, or None if the script can't be loaded.
"""
if not _OPENCLAW_SCRIPT.exists():
return None
spec = importlib.util.spec_from_file_location(
"openclaw_to_hermes", _OPENCLAW_SCRIPT
)
if spec is None or spec.loader is None:
return None
mod = importlib.util.module_from_spec(spec)
# Register in sys.modules so @dataclass can resolve the module
# (Python 3.11+ requires this for dynamically loaded modules)
import sys as _sys
_sys.modules[spec.name] = mod
try:
spec.loader.exec_module(mod)
except Exception:
_sys.modules.pop(spec.name, None)
raise
return mod
# Item kinds that represent high-impact changes warranting explicit warnings.
# Gateway tokens/channels can hijack messaging platforms from the old agent.
# Config values may have different semantics between OpenClaw and Hermes.
# Instruction/context files (.md) can contain incompatible setup procedures.
_HIGH_IMPACT_KIND_KEYWORDS = {
"gateway": "⚠ Gateway/messaging — this will configure Hermes to use your OpenClaw messaging channels",
"telegram": "⚠ Telegram — this will point Hermes at your OpenClaw Telegram bot",
"slack": "⚠ Slack — this will point Hermes at your OpenClaw Slack workspace",
"discord": "⚠ Discord — this will point Hermes at your OpenClaw Discord bot",
"whatsapp": "⚠ WhatsApp — this will point Hermes at your OpenClaw WhatsApp connection",
"config": "⚠ Config values — OpenClaw settings may not map 1:1 to Hermes equivalents",
"soul": "⚠ Instruction file — may contain OpenClaw-specific setup/restart procedures",
"memory": "⚠ Memory/context file — may reference OpenClaw-specific infrastructure",
"context": "⚠ Context file — may contain OpenClaw-specific instructions",
}
def _print_migration_preview(report: dict):
"""Print a detailed dry-run preview of what migration would do.
Groups items by category and adds explicit warnings for high-impact
changes like gateway token takeover and config value differences.
"""
items = report.get("items", [])
if not items:
print_info("Nothing to migrate.")
return
migrated_items = [i for i in items if i.get("status") == "migrated"]
conflict_items = [i for i in items if i.get("status") == "conflict"]
skipped_items = [i for i in items if i.get("status") == "skipped"]
warnings_shown = set()
if migrated_items:
print(color(" Would import:", Colors.GREEN))
for item in migrated_items:
kind = item.get("kind", "unknown")
dest = item.get("destination", "")
if dest:
dest_short = str(dest).replace(str(Path.home()), "~")
print(f" {kind:<22s}{dest_short}")
else:
print(f" {kind}")
# Check for high-impact items and collect warnings
kind_lower = kind.lower()
dest_lower = str(dest).lower()
for keyword, warning in _HIGH_IMPACT_KIND_KEYWORDS.items():
if keyword in kind_lower or keyword in dest_lower:
warnings_shown.add(warning)
print()
if conflict_items:
print(color(" Would overwrite (conflicts with existing Hermes config):", Colors.YELLOW))
for item in conflict_items:
kind = item.get("kind", "unknown")
reason = item.get("reason", "already exists")
print(f" {kind:<22s} {reason}")
print()
if skipped_items:
print(color(" Would skip:", Colors.DIM))
for item in skipped_items:
kind = item.get("kind", "unknown")
reason = item.get("reason", "")
print(f" {kind:<22s} {reason}")
print()
# Print collected warnings
if warnings_shown:
print(color(" ── Warnings ──", Colors.YELLOW))
for warning in sorted(warnings_shown):
print(color(f" {warning}", Colors.YELLOW))
print()
print(color(" Note: OpenClaw config values may have different semantics in Hermes.", Colors.YELLOW))
print(color(" For example, OpenClaw's tool_call_execution: \"auto\" ≠ Hermes's yolo mode.", Colors.YELLOW))
print(color(" Instruction files (.md) from OpenClaw may contain incompatible procedures.", Colors.YELLOW))
print()
def _offer_openclaw_migration(hermes_home: Path) -> bool:
"""Detect ~/.openclaw and offer to migrate during first-time setup.
Runs a dry-run first to show the user exactly what would be imported,
overwritten, or taken over. Only executes after explicit confirmation.
Returns True if migration ran successfully, False otherwise.
"""
openclaw_dir = Path.home() / ".openclaw"
@ -2587,12 +2698,12 @@ def _offer_openclaw_migration(hermes_home: Path) -> bool:
print()
print_header("OpenClaw Installation Detected")
print_info(f"Found OpenClaw data at {openclaw_dir}")
print_info("Hermes can import your settings, memories, skills, and API keys.")
print_info("Hermes can preview what would be imported before making any changes.")
print()
if not prompt_yes_no("Would you like to import from OpenClaw?", default=True):
if not prompt_yes_no("Would you like to see what can be imported?", default=True):
print_info(
"Skipping migration. You can run it later via the openclaw-migration skill."
"Skipping migration. You can run it later with: hermes claw migrate --dry-run"
)
return False
@ -2601,34 +2712,71 @@ def _offer_openclaw_migration(hermes_home: Path) -> bool:
if not config_path.exists():
save_config(load_config())
# Dynamically load the migration script
# Load the migration module
try:
spec = importlib.util.spec_from_file_location(
"openclaw_to_hermes", _OPENCLAW_SCRIPT
)
if spec is None or spec.loader is None:
mod = _load_openclaw_migration_module()
if mod is None:
print_warning("Could not load migration script.")
return False
except Exception as e:
print_warning(f"Could not load migration script: {e}")
logger.debug("OpenClaw migration module load error", exc_info=True)
return False
mod = importlib.util.module_from_spec(spec)
# Register in sys.modules so @dataclass can resolve the module
# (Python 3.11+ requires this for dynamically loaded modules)
import sys as _sys
_sys.modules[spec.name] = mod
try:
spec.loader.exec_module(mod)
except Exception:
_sys.modules.pop(spec.name, None)
raise
# Run migration with the "full" preset, execute mode, no overwrite
# ── Phase 1: Dry-run preview ──
try:
selected = mod.resolve_selected_options(None, None, preset="full")
dry_migrator = mod.Migrator(
source_root=openclaw_dir.resolve(),
target_root=hermes_home.resolve(),
execute=False, # dry-run — no files modified
workspace_target=None,
overwrite=True, # show everything including conflicts
migrate_secrets=True,
output_dir=None,
selected_options=selected,
preset_name="full",
)
preview_report = dry_migrator.migrate()
except Exception as e:
print_warning(f"Migration preview failed: {e}")
logger.debug("OpenClaw migration preview error", exc_info=True)
return False
# Display the full preview
preview_summary = preview_report.get("summary", {})
preview_count = preview_summary.get("migrated", 0)
if preview_count == 0:
print()
print_info("Nothing to import from OpenClaw.")
return False
print()
print_header(f"Migration Preview — {preview_count} item(s) would be imported")
print_info("No changes have been made yet. Review the list below:")
print()
_print_migration_preview(preview_report)
# ── Phase 2: Confirm and execute ──
if not prompt_yes_no("Proceed with migration?", default=False):
print_info(
"Migration cancelled. You can run it later with: hermes claw migrate"
)
print_info(
"Use --dry-run to preview again, or --preset minimal for a lighter import."
)
return False
# Execute the migration — overwrite=False so existing Hermes configs are
# preserved. The user saw the preview; conflicts are skipped by default.
try:
migrator = mod.Migrator(
source_root=openclaw_dir.resolve(),
target_root=hermes_home.resolve(),
execute=True,
workspace_target=None,
overwrite=True,
overwrite=False, # preserve existing Hermes config
migrate_secrets=True,
output_dir=None,
selected_options=selected,
@ -2640,7 +2788,7 @@ def _offer_openclaw_migration(hermes_home: Path) -> bool:
logger.debug("OpenClaw migration error", exc_info=True)
return False
# Print summary
# Print final summary
summary = report.get("summary", {})
migrated = summary.get("migrated", 0)
skipped = summary.get("skipped", 0)
@ -2651,7 +2799,7 @@ def _offer_openclaw_migration(hermes_home: Path) -> bool:
if migrated:
print_success(f"Imported {migrated} item(s) from OpenClaw.")
if conflicts:
print_info(f"Skipped {conflicts} item(s) that already exist in Hermes.")
print_info(f"Skipped {conflicts} item(s) that already exist in Hermes (use hermes claw migrate --overwrite to force).")
if skipped:
print_info(f"Skipped {skipped} item(s) (not found or unchanged).")
if errors:

View file

@ -79,6 +79,9 @@ def _effective_provider_label() -> str:
return provider_label(effective)
from hermes_constants import is_termux as _is_termux
def show_status(args):
"""Show status of all Hermes Agent components."""
show_all = getattr(args, 'all', False)
@ -325,7 +328,25 @@ def show_status(args):
print()
print(color("◆ Gateway Service", Colors.CYAN, Colors.BOLD))
if sys.platform.startswith('linux'):
if _is_termux():
try:
from hermes_cli.gateway import find_gateway_pids
gateway_pids = find_gateway_pids()
except Exception:
gateway_pids = []
is_running = bool(gateway_pids)
print(f" Status: {check_mark(is_running)} {'running' if is_running else 'stopped'}")
print(" Manager: Termux / manual process")
if gateway_pids:
rendered = ", ".join(str(pid) for pid in gateway_pids[:3])
if len(gateway_pids) > 3:
rendered += ", ..."
print(f" PID(s): {rendered}")
else:
print(" Start with: hermes gateway")
print(" Note: Android may stop background jobs when Termux is suspended")
elif sys.platform.startswith('linux'):
try:
from hermes_cli.gateway import get_service_name
_gw_svc = get_service_name()
@ -339,7 +360,7 @@ def show_status(args):
timeout=5
)
is_active = result.stdout.strip() == "active"
except subprocess.TimeoutExpired:
except (FileNotFoundError, subprocess.TimeoutExpired):
is_active = False
print(f" Status: {check_mark(is_active)} {'running' if is_active else 'stopped'}")
print(" Manager: systemd (user)")

View file

@ -122,6 +122,10 @@ def uninstall_gateway_service():
if platform.system() != "Linux":
return False
prefix = os.getenv("PREFIX", "")
if os.getenv("TERMUX_VERSION") or "com.termux/files/usr" in prefix:
return False
try:
from hermes_cli.gateway import get_service_name

View file

@ -72,13 +72,13 @@ def display_hermes_home() -> str:
return str(home)
VALID_REASONING_EFFORTS = ("xhigh", "high", "medium", "low", "minimal")
VALID_REASONING_EFFORTS = ("minimal", "low", "medium", "high", "xhigh")
def parse_reasoning_effort(effort: str) -> dict | None:
"""Parse a reasoning effort level into a config dict.
Valid levels: "xhigh", "high", "medium", "low", "minimal", "none".
Valid levels: "none", "minimal", "low", "medium", "high", "xhigh".
Returns None when the input is empty or unrecognized (caller uses default).
Returns {"enabled": False} for "none".
Returns {"enabled": True, "effort": <level>} for valid effort levels.
@ -93,6 +93,16 @@ def parse_reasoning_effort(effort: str) -> dict | None:
return None
def is_termux() -> bool:
"""Return True when running inside a Termux (Android) environment.
Checks ``TERMUX_VERSION`` (set by Termux) or the Termux-specific
``PREFIX`` path. Import-safe no heavy deps.
"""
prefix = os.getenv("PREFIX", "")
return bool(os.getenv("TERMUX_VERSION") or "com.termux/files/usr" in prefix)
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
OPENROUTER_MODELS_URL = f"{OPENROUTER_BASE_URL}/models"
OPENROUTER_CHAT_URL = f"{OPENROUTER_BASE_URL}/chat/completions"

View file

@ -13,6 +13,7 @@ secrets are never written to disk.
"""
import logging
import os
from logging.handlers import RotatingFileHandler
from pathlib import Path
from typing import Optional
@ -177,6 +178,38 @@ def setup_verbose_logging() -> None:
# Internal helpers
# ---------------------------------------------------------------------------
class _ManagedRotatingFileHandler(RotatingFileHandler):
"""RotatingFileHandler that ensures group-writable perms in managed mode.
In managed mode (NixOS), the stateDir uses setgid (2770) so new files
inherit the hermes group. However, both _open() (initial creation) and
doRollover() create files via open(), which uses the process umask
typically 0022, producing 0644. This subclass applies chmod 0660 after
both operations so the gateway and interactive users can share log files.
"""
def __init__(self, *args, **kwargs):
from hermes_cli.config import is_managed
self._managed = is_managed()
super().__init__(*args, **kwargs)
def _chmod_if_managed(self):
if self._managed:
try:
os.chmod(self.baseFilename, 0o660)
except OSError:
pass
def _open(self):
stream = super()._open()
self._chmod_if_managed()
return stream
def doRollover(self):
super().doRollover()
self._chmod_if_managed()
def _add_rotating_handler(
logger: logging.Logger,
path: Path,
@ -198,7 +231,7 @@ def _add_rotating_handler(
return # already attached
path.parent.mkdir(parents=True, exist_ok=True)
handler = RotatingFileHandler(
handler = _ManagedRotatingFileHandler(
str(path), maxBytes=max_bytes, backupCount=backup_count,
)
handler.setLevel(level)

View file

@ -944,7 +944,8 @@ class SessionDB:
try:
msg["tool_calls"] = json.loads(msg["tool_calls"])
except (json.JSONDecodeError, TypeError):
pass
logger.warning("Failed to deserialize tool_calls in get_messages, falling back to []")
msg["tool_calls"] = []
result.append(msg)
return result
@ -972,7 +973,8 @@ class SessionDB:
try:
msg["tool_calls"] = json.loads(row["tool_calls"])
except (json.JSONDecodeError, TypeError):
pass
logger.warning("Failed to deserialize tool_calls in conversation replay, falling back to []")
msg["tool_calls"] = []
# Restore reasoning fields on assistant messages so providers
# that replay reasoning (OpenRouter, OpenAI, Nous) receive
# coherent multi-turn reasoning context.
@ -983,12 +985,14 @@ class SessionDB:
try:
msg["reasoning_details"] = json.loads(row["reasoning_details"])
except (json.JSONDecodeError, TypeError):
pass
logger.warning("Failed to deserialize reasoning_details, falling back to None")
msg["reasoning_details"] = None
if row["codex_reasoning_items"]:
try:
msg["codex_reasoning_items"] = json.loads(row["codex_reasoning_items"])
except (json.JSONDecodeError, TypeError):
pass
logger.warning("Failed to deserialize codex_reasoning_items, falling back to None")
msg["codex_reasoning_items"] = None
messages.append(msg)
return messages

View file

@ -560,10 +560,14 @@
# ── Directories ───────────────────────────────────────────────────
{
systemd.tmpfiles.rules = [
"d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -"
"d ${cfg.stateDir}/.hermes 0750 ${cfg.user} ${cfg.group} - -"
"d ${cfg.stateDir} 2770 ${cfg.user} ${cfg.group} - -"
"d ${cfg.stateDir}/.hermes 2770 ${cfg.user} ${cfg.group} - -"
"d ${cfg.stateDir}/.hermes/cron 2770 ${cfg.user} ${cfg.group} - -"
"d ${cfg.stateDir}/.hermes/sessions 2770 ${cfg.user} ${cfg.group} - -"
"d ${cfg.stateDir}/.hermes/logs 2770 ${cfg.user} ${cfg.group} - -"
"d ${cfg.stateDir}/.hermes/memories 2770 ${cfg.user} ${cfg.group} - -"
"d ${cfg.stateDir}/home 0750 ${cfg.user} ${cfg.group} - -"
"d ${cfg.workingDirectory} 0750 ${cfg.user} ${cfg.group} - -"
"d ${cfg.workingDirectory} 2770 ${cfg.user} ${cfg.group} - -"
];
}
@ -575,7 +579,21 @@
mkdir -p ${cfg.stateDir}/home
mkdir -p ${cfg.workingDirectory}
chown ${cfg.user}:${cfg.group} ${cfg.stateDir} ${cfg.stateDir}/.hermes ${cfg.stateDir}/home ${cfg.workingDirectory}
chmod 0750 ${cfg.stateDir} ${cfg.stateDir}/.hermes ${cfg.stateDir}/home ${cfg.workingDirectory}
chmod 2770 ${cfg.stateDir} ${cfg.stateDir}/.hermes ${cfg.workingDirectory}
chmod 0750 ${cfg.stateDir}/home
# Create subdirs, set setgid + group-writable, migrate existing files.
# Nix-managed files (config.yaml, .env, .managed) stay 0640/0644.
find ${cfg.stateDir}/.hermes -maxdepth 1 \
\( -name "*.db" -o -name "*.db-wal" -o -name "*.db-shm" -o -name "SOUL.md" \) \
-exec chmod g+rw {} + 2>/dev/null || true
for _subdir in cron sessions logs memories; do
mkdir -p "${cfg.stateDir}/.hermes/$_subdir"
chown ${cfg.user}:${cfg.group} "${cfg.stateDir}/.hermes/$_subdir"
chmod 2770 "${cfg.stateDir}/.hermes/$_subdir"
find "${cfg.stateDir}/.hermes/$_subdir" -type f \
-exec chmod g+rw {} + 2>/dev/null || true
done
# Merge Nix settings into existing config.yaml.
# Preserves user-added keys (skills, streaming, etc.); Nix keys win.
@ -662,6 +680,10 @@ HERMES_NIX_ENV_EOF
Restart = cfg.restart;
RestartSec = cfg.restartSec;
# Shared-state: files created by the gateway should be group-writable
# so interactive users in the hermes group can read/write them.
UMask = "0007";
# Hardening
NoNewPrivileges = true;
ProtectSystem = "strict";

View file

@ -63,6 +63,17 @@ homeassistant = ["aiohttp>=3.9.0,<4"]
sms = ["aiohttp>=3.9.0,<4"]
acp = ["agent-client-protocol>=0.9.0,<1.0"]
mistral = ["mistralai>=2.3.0,<3"]
termux = [
# Tested Android / Termux path: keeps the core CLI feature-rich while
# avoiding extras that currently depend on non-Android wheels (notably
# faster-whisper -> ctranslate2 via the voice extra).
"hermes-agent[cron]",
"hermes-agent[cli]",
"hermes-agent[pty]",
"hermes-agent[mcp]",
"hermes-agent[honcho]",
"hermes-agent[acp]",
]
dingtalk = ["dingtalk-stream>=0.1.0,<1"]
feishu = ["lark-oapi>=1.5.3,<2"]
rl = [

View file

@ -87,6 +87,7 @@ from agent.model_metadata import (
fetch_model_metadata,
estimate_tokens_rough, estimate_messages_tokens_rough, estimate_request_tokens_rough,
get_next_probe_tier, parse_context_limit_from_error,
parse_available_output_tokens_from_error,
save_context_length, is_local_endpoint,
query_ollama_num_ctx,
)
@ -621,6 +622,7 @@ class AIAgent:
self.tool_progress_callback = tool_progress_callback
self.tool_start_callback = tool_start_callback
self.tool_complete_callback = tool_complete_callback
self.suppress_status_output = False
self.thinking_callback = thinking_callback
self.reasoning_callback = reasoning_callback
self._reasoning_deltas_fired = False # Set by _fire_reasoning_delta, reset per API call
@ -1459,7 +1461,14 @@ class AIAgent:
After the main response has been delivered and the remaining tool
calls are post-response housekeeping (``_mute_post_response``),
all non-forced output is suppressed.
``suppress_status_output`` is a stricter CLI automation mode used by
parseable single-query flows such as ``hermes chat -q``. In that mode,
all status/diagnostic prints routed through ``_vprint`` are suppressed
so stdout stays machine-readable.
"""
if getattr(self, "suppress_status_output", False):
return
if not force and getattr(self, "_mute_post_response", False):
return
if not force and self._has_stream_consumers() and not self._executing_tools:
@ -1485,6 +1494,17 @@ class AIAgent:
except (AttributeError, ValueError, OSError):
return False
def _should_emit_quiet_tool_messages(self) -> bool:
"""Return True when quiet-mode tool summaries should print directly.
When the caller provides ``tool_progress_callback`` (for example the CLI
TUI or a gateway progress renderer), that callback owns progress display.
Emitting quiet-mode summary lines here duplicates progress and leaks tool
previews into flows that are expected to stay silent, such as
``hermes chat -q``.
"""
return self.quiet_mode and not self.tool_progress_callback
def _emit_status(self, message: str) -> None:
"""Emit a lifecycle status message to both CLI and gateway channels.
@ -4969,9 +4989,21 @@ class AIAgent:
# Swap OpenAI client and config in-place
self.api_key = fb_client.api_key
self.client = fb_client
# Preserve provider-specific headers that
# resolve_provider_client() may have baked into
# fb_client via the default_headers kwarg. The OpenAI
# SDK stores these in _custom_headers. Without this,
# subsequent request-client rebuilds (via
# _create_request_openai_client) drop the headers,
# causing 403s from providers like Kimi Coding that
# require a User-Agent sentinel.
fb_headers = getattr(fb_client, "_custom_headers", None)
if not fb_headers:
fb_headers = getattr(fb_client, "default_headers", None)
self._client_kwargs = {
"api_key": fb_client.api_key,
"base_url": fb_base_url,
**({"default_headers": dict(fb_headers)} if fb_headers else {}),
}
# Re-evaluate prompt caching for the new provider/model
@ -5386,15 +5418,22 @@ class AIAgent:
if self.api_mode == "anthropic_messages":
from agent.anthropic_adapter import build_anthropic_kwargs
anthropic_messages = self._prepare_anthropic_messages_for_api(api_messages)
# Pass context_length so the adapter can clamp max_tokens if the
# user configured a smaller context window than the model's output limit.
# Pass context_length (total input+output window) so the adapter can
# clamp max_tokens (output cap) when the user configured a smaller
# context window than the model's native output limit.
ctx_len = getattr(self, "context_compressor", None)
ctx_len = ctx_len.context_length if ctx_len else None
# _ephemeral_max_output_tokens is set for one call when the API
# returns "max_tokens too large given prompt" — it caps output to
# the available window space without touching context_length.
ephemeral_out = getattr(self, "_ephemeral_max_output_tokens", None)
if ephemeral_out is not None:
self._ephemeral_max_output_tokens = None # consume immediately
return build_anthropic_kwargs(
model=self.model,
messages=anthropic_messages,
tools=self.tools,
max_tokens=self.max_tokens,
max_tokens=ephemeral_out if ephemeral_out is not None else self.max_tokens,
reasoning_config=self.reasoning_config,
is_oauth=self._is_anthropic_oauth,
preserve_dots=self._anthropic_preserve_dots(),
@ -6328,7 +6367,7 @@ class AIAgent:
# Start spinner for CLI mode (skip when TUI handles tool progress)
spinner = None
if self.quiet_mode and not self.tool_progress_callback and self._should_start_quiet_spinner():
if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner():
face = random.choice(KawaiiSpinner.KAWAII_WAITING)
spinner = KawaiiSpinner(f"{face} ⚡ running {num_tools} tools concurrently", spinner_type='dots', print_fn=self._print_fn)
spinner.start()
@ -6378,7 +6417,7 @@ class AIAgent:
logging.debug(f"Tool result ({len(function_result)} chars): {function_result}")
# Print cute message per tool
if self.quiet_mode:
if self._should_emit_quiet_tool_messages():
cute_msg = _get_cute_tool_message_impl(name, args, tool_duration, result=function_result)
self._safe_print(f" {cute_msg}")
elif not self.quiet_mode:
@ -6535,7 +6574,7 @@ class AIAgent:
store=self._todo_store,
)
tool_duration = time.time() - tool_start_time
if self.quiet_mode:
if self._should_emit_quiet_tool_messages():
self._vprint(f" {_get_cute_tool_message_impl('todo', function_args, tool_duration, result=function_result)}")
elif function_name == "session_search":
if not self._session_db:
@ -6550,7 +6589,7 @@ class AIAgent:
current_session_id=self.session_id,
)
tool_duration = time.time() - tool_start_time
if self.quiet_mode:
if self._should_emit_quiet_tool_messages():
self._vprint(f" {_get_cute_tool_message_impl('session_search', function_args, tool_duration, result=function_result)}")
elif function_name == "memory":
target = function_args.get("target", "memory")
@ -6563,7 +6602,7 @@ class AIAgent:
store=self._memory_store,
)
tool_duration = time.time() - tool_start_time
if self.quiet_mode:
if self._should_emit_quiet_tool_messages():
self._vprint(f" {_get_cute_tool_message_impl('memory', function_args, tool_duration, result=function_result)}")
elif function_name == "clarify":
from tools.clarify_tool import clarify_tool as _clarify_tool
@ -6573,7 +6612,7 @@ class AIAgent:
callback=self.clarify_callback,
)
tool_duration = time.time() - tool_start_time
if self.quiet_mode:
if self._should_emit_quiet_tool_messages():
self._vprint(f" {_get_cute_tool_message_impl('clarify', function_args, tool_duration, result=function_result)}")
elif function_name == "delegate_task":
from tools.delegate_tool import delegate_task as _delegate_task
@ -6584,7 +6623,7 @@ class AIAgent:
goal_preview = (function_args.get("goal") or "")[:30]
spinner_label = f"🔀 {goal_preview}" if goal_preview else "🔀 delegating"
spinner = None
if self.quiet_mode and not self.tool_progress_callback and self._should_start_quiet_spinner():
if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner():
face = random.choice(KawaiiSpinner.KAWAII_WAITING)
spinner = KawaiiSpinner(f"{face} {spinner_label}", spinner_type='dots', print_fn=self._print_fn)
spinner.start()
@ -6606,13 +6645,13 @@ class AIAgent:
cute_msg = _get_cute_tool_message_impl('delegate_task', function_args, tool_duration, result=_delegate_result)
if spinner:
spinner.stop(cute_msg)
elif self.quiet_mode:
elif self._should_emit_quiet_tool_messages():
self._vprint(f" {cute_msg}")
elif self._memory_manager and self._memory_manager.has_tool(function_name):
# Memory provider tools (hindsight_retain, honcho_search, etc.)
# These are not in the tool registry — route through MemoryManager.
spinner = None
if self.quiet_mode and not self.tool_progress_callback:
if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner():
face = random.choice(KawaiiSpinner.KAWAII_WAITING)
emoji = _get_tool_emoji(function_name)
preview = _build_tool_preview(function_name, function_args) or function_name
@ -6630,11 +6669,11 @@ class AIAgent:
cute_msg = _get_cute_tool_message_impl(function_name, function_args, tool_duration, result=_mem_result)
if spinner:
spinner.stop(cute_msg)
elif self.quiet_mode:
elif self._should_emit_quiet_tool_messages():
self._vprint(f" {cute_msg}")
elif self.quiet_mode:
spinner = None
if not self.tool_progress_callback:
if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner():
face = random.choice(KawaiiSpinner.KAWAII_WAITING)
emoji = _get_tool_emoji(function_name)
preview = _build_tool_preview(function_name, function_args) or function_name
@ -6657,7 +6696,7 @@ class AIAgent:
cute_msg = _get_cute_tool_message_impl(function_name, function_args, tool_duration, result=_spinner_result)
if spinner:
spinner.stop(cute_msg)
else:
elif self._should_emit_quiet_tool_messages():
self._vprint(f" {cute_msg}")
else:
try:
@ -8295,6 +8334,48 @@ class AIAgent:
compressor = self.context_compressor
old_ctx = compressor.context_length
# ── Distinguish two very different errors ───────────
# 1. "Prompt too long": the INPUT exceeds the context window.
# Fix: reduce context_length + compress history.
# 2. "max_tokens too large": input is fine, but
# input_tokens + requested max_tokens > context_window.
# Fix: reduce max_tokens (the OUTPUT cap) for this call.
# Do NOT shrink context_length — the window is unchanged.
#
# Note: max_tokens = output token cap (one response).
# context_length = total window (input + output combined).
available_out = parse_available_output_tokens_from_error(error_msg)
if available_out is not None:
# Error is purely about the output cap being too large.
# Cap output to the available space and retry without
# touching context_length or triggering compression.
safe_out = max(1, available_out - 64) # small safety margin
self._ephemeral_max_output_tokens = safe_out
self._vprint(
f"{self.log_prefix}⚠️ Output cap too large for current prompt — "
f"retrying with max_tokens={safe_out:,} "
f"(available_tokens={available_out:,}; context_length unchanged at {old_ctx:,})",
force=True,
)
# Still count against compression_attempts so we don't
# loop forever if the error keeps recurring.
compression_attempts += 1
if compression_attempts > max_compression_attempts:
self._vprint(f"{self.log_prefix}❌ Max compression attempts ({max_compression_attempts}) reached.", force=True)
self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True)
logging.error(f"{self.log_prefix}Context compression failed after {max_compression_attempts} attempts.")
self._persist_session(messages, conversation_history)
return {
"messages": messages,
"completed": False,
"api_calls": api_call_count,
"error": f"Context length exceeded: max compression attempts ({max_compression_attempts}) reached.",
"partial": True
}
restart_with_compressed_messages = True
break
# Error is about the INPUT being too large — reduce context_length.
# Try to parse the actual limit from the error message
parsed_limit = parse_context_limit_from_error(error_msg)
if parsed_limit and parsed_limit < old_ctx:

View file

@ -2,8 +2,8 @@
# ============================================================================
# Hermes Agent Installer
# ============================================================================
# Installation script for Linux and macOS.
# Uses uv for fast Python provisioning and package management.
# Installation script for Linux, macOS, and Android/Termux.
# Uses uv for desktop/server installs and Python's stdlib venv + pip on Termux.
#
# Usage:
# curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash
@ -117,6 +117,36 @@ log_error() {
echo -e "${RED}${NC} $1"
}
is_termux() {
[ -n "${TERMUX_VERSION:-}" ] || [[ "${PREFIX:-}" == *"com.termux/files/usr"* ]]
}
get_command_link_dir() {
if is_termux && [ -n "${PREFIX:-}" ]; then
echo "$PREFIX/bin"
else
echo "$HOME/.local/bin"
fi
}
get_command_link_display_dir() {
if is_termux && [ -n "${PREFIX:-}" ]; then
echo '$PREFIX/bin'
else
echo '~/.local/bin'
fi
}
get_hermes_command_path() {
local link_dir
link_dir="$(get_command_link_dir)"
if [ -x "$link_dir/hermes" ]; then
echo "$link_dir/hermes"
else
echo "hermes"
fi
}
# ============================================================================
# System detection
# ============================================================================
@ -124,12 +154,17 @@ log_error() {
detect_os() {
case "$(uname -s)" in
Linux*)
OS="linux"
if [ -f /etc/os-release ]; then
. /etc/os-release
DISTRO="$ID"
if is_termux; then
OS="android"
DISTRO="termux"
else
DISTRO="unknown"
OS="linux"
if [ -f /etc/os-release ]; then
. /etc/os-release
DISTRO="$ID"
else
DISTRO="unknown"
fi
fi
;;
Darwin*)
@ -158,6 +193,12 @@ detect_os() {
# ============================================================================
install_uv() {
if [ "$DISTRO" = "termux" ]; then
log_info "Termux detected — using Python's stdlib venv + pip instead of uv"
UV_CMD=""
return 0
fi
log_info "Checking for uv package manager..."
# Check common locations for uv
@ -209,6 +250,25 @@ install_uv() {
}
check_python() {
if [ "$DISTRO" = "termux" ]; then
log_info "Checking Termux Python..."
if command -v python >/dev/null 2>&1; then
PYTHON_PATH="$(command -v python)"
if "$PYTHON_PATH" -c 'import sys; raise SystemExit(0 if sys.version_info >= (3, 11) else 1)' 2>/dev/null; then
PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null)
log_success "Python found: $PYTHON_FOUND_VERSION"
return 0
fi
fi
log_info "Installing Python via pkg..."
pkg install -y python >/dev/null
PYTHON_PATH="$(command -v python)"
PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null)
log_success "Python installed: $PYTHON_FOUND_VERSION"
return 0
fi
log_info "Checking Python $PYTHON_VERSION..."
# Let uv handle Python — it can download and manage Python versions
@ -243,6 +303,17 @@ check_git() {
fi
log_error "Git not found"
if [ "$DISTRO" = "termux" ]; then
log_info "Installing Git via pkg..."
pkg install -y git >/dev/null
if command -v git >/dev/null 2>&1; then
GIT_VERSION=$(git --version | awk '{print $3}')
log_success "Git $GIT_VERSION installed"
return 0
fi
fi
log_info "Please install Git:"
case "$OS" in
@ -262,6 +333,9 @@ check_git() {
;;
esac
;;
android)
log_info " pkg install git"
;;
macos)
log_info " xcode-select --install"
log_info " Or: brew install git"
@ -290,11 +364,29 @@ check_node() {
return 0
fi
log_info "Node.js not found — installing Node.js $NODE_VERSION LTS..."
if [ "$DISTRO" = "termux" ]; then
log_info "Node.js not found — installing Node.js via pkg..."
else
log_info "Node.js not found — installing Node.js $NODE_VERSION LTS..."
fi
install_node
}
install_node() {
if [ "$DISTRO" = "termux" ]; then
log_info "Installing Node.js via pkg..."
if pkg install -y nodejs >/dev/null; then
local installed_ver
installed_ver=$(node --version 2>/dev/null)
log_success "Node.js $installed_ver installed via pkg"
HAS_NODE=true
else
log_warn "Failed to install Node.js via pkg"
HAS_NODE=false
fi
return 0
fi
local arch=$(uname -m)
local node_arch
case "$arch" in
@ -413,6 +505,30 @@ install_system_packages() {
need_ffmpeg=true
fi
# Termux always needs the Android build toolchain for the tested pip path,
# even when ripgrep/ffmpeg are already present.
if [ "$DISTRO" = "termux" ]; then
local termux_pkgs=(clang rust make pkg-config libffi openssl)
if [ "$need_ripgrep" = true ]; then
termux_pkgs+=("ripgrep")
fi
if [ "$need_ffmpeg" = true ]; then
termux_pkgs+=("ffmpeg")
fi
log_info "Installing Termux packages: ${termux_pkgs[*]}"
if pkg install -y "${termux_pkgs[@]}" >/dev/null; then
[ "$need_ripgrep" = true ] && HAS_RIPGREP=true && log_success "ripgrep installed"
[ "$need_ffmpeg" = true ] && HAS_FFMPEG=true && log_success "ffmpeg installed"
log_success "Termux build dependencies installed"
return 0
fi
log_warn "Could not auto-install all Termux packages"
log_info "Install manually: pkg install ${termux_pkgs[*]}"
return 0
fi
# Nothing to install — done
if [ "$need_ripgrep" = false ] && [ "$need_ffmpeg" = false ]; then
return 0
@ -550,6 +666,9 @@ show_manual_install_hint() {
*) log_info " Use your package manager or visit the project homepage" ;;
esac
;;
android)
log_info " pkg install $pkg"
;;
macos) log_info " brew install $pkg" ;;
esac
}
@ -646,6 +765,19 @@ setup_venv() {
return 0
fi
if [ "$DISTRO" = "termux" ]; then
log_info "Creating virtual environment with Termux Python..."
if [ -d "venv" ]; then
log_info "Virtual environment already exists, recreating..."
rm -rf venv
fi
"$PYTHON_PATH" -m venv venv
log_success "Virtual environment ready ($(./venv/bin/python --version 2>/dev/null))"
return 0
fi
log_info "Creating virtual environment with Python $PYTHON_VERSION..."
if [ -d "venv" ]; then
@ -662,6 +794,46 @@ setup_venv() {
install_deps() {
log_info "Installing dependencies..."
if [ "$DISTRO" = "termux" ]; then
if [ "$USE_VENV" = true ]; then
export VIRTUAL_ENV="$INSTALL_DIR/venv"
PIP_PYTHON="$INSTALL_DIR/venv/bin/python"
else
PIP_PYTHON="$PYTHON_PATH"
fi
if [ -z "${ANDROID_API_LEVEL:-}" ]; then
ANDROID_API_LEVEL="$(getprop ro.build.version.sdk 2>/dev/null || true)"
if [ -z "$ANDROID_API_LEVEL" ]; then
ANDROID_API_LEVEL=24
fi
export ANDROID_API_LEVEL
log_info "Using ANDROID_API_LEVEL=$ANDROID_API_LEVEL for Android wheel builds"
fi
"$PIP_PYTHON" -m pip install --upgrade pip setuptools wheel >/dev/null
if ! "$PIP_PYTHON" -m pip install -e '.[termux]' -c constraints-termux.txt; then
log_warn "Termux feature install (.[termux]) failed, trying base install..."
if ! "$PIP_PYTHON" -m pip install -e '.' -c constraints-termux.txt; then
log_error "Package installation failed on Termux."
log_info "Ensure these packages are installed: pkg install clang rust make pkg-config libffi openssl"
log_info "Then re-run: cd $INSTALL_DIR && python -m pip install -e '.[termux]' -c constraints-termux.txt"
exit 1
fi
fi
log_success "Main package installed"
log_info "Termux note: browser/WhatsApp tooling is not installed by default; see the Termux guide for optional follow-up steps."
if [ -d "tinker-atropos" ] && [ -f "tinker-atropos/pyproject.toml" ]; then
log_info "tinker-atropos submodule found — skipping install (optional, for RL training)"
log_info " To install later: $PIP_PYTHON -m pip install -e \"./tinker-atropos\""
fi
log_success "All dependencies installed"
return 0
fi
if [ "$USE_VENV" = true ]; then
# Tell uv to install into our venv (no need to activate)
export VIRTUAL_ENV="$INSTALL_DIR/venv"
@ -743,19 +915,35 @@ setup_path() {
if [ ! -x "$HERMES_BIN" ]; then
log_warn "hermes entry point not found at $HERMES_BIN"
log_info "This usually means the pip install didn't complete successfully."
log_info "Try: cd $INSTALL_DIR && uv pip install -e '.[all]'"
if [ "$DISTRO" = "termux" ]; then
log_info "Try: cd $INSTALL_DIR && python -m pip install -e '.[termux]' -c constraints-termux.txt"
else
log_info "Try: cd $INSTALL_DIR && uv pip install -e '.[all]'"
fi
return 0
fi
# Create symlink in ~/.local/bin (standard user binary location, usually on PATH)
mkdir -p "$HOME/.local/bin"
ln -sf "$HERMES_BIN" "$HOME/.local/bin/hermes"
log_success "Symlinked hermes → ~/.local/bin/hermes"
local command_link_dir
local command_link_display_dir
command_link_dir="$(get_command_link_dir)"
command_link_display_dir="$(get_command_link_display_dir)"
# Create a user-facing shim for the hermes command.
mkdir -p "$command_link_dir"
ln -sf "$HERMES_BIN" "$command_link_dir/hermes"
log_success "Symlinked hermes → $command_link_display_dir/hermes"
if [ "$DISTRO" = "termux" ]; then
export PATH="$command_link_dir:$PATH"
log_info "$command_link_display_dir is the native Termux command path"
log_success "hermes command ready"
return 0
fi
# Check if ~/.local/bin is on PATH; if not, add it to shell config.
# Detect the user's actual login shell (not the shell running this script,
# which is always bash when piped from curl).
if ! echo "$PATH" | tr ':' '\n' | grep -q "^$HOME/.local/bin$"; then
if ! echo "$PATH" | tr ':' '\n' | grep -q "^$command_link_dir$"; then
SHELL_CONFIGS=()
LOGIN_SHELL="$(basename "${SHELL:-/bin/bash}")"
case "$LOGIN_SHELL" in
@ -801,7 +989,7 @@ setup_path() {
fi
# Export for current session so hermes works immediately
export PATH="$HOME/.local/bin:$PATH"
export PATH="$command_link_dir:$PATH"
log_success "hermes command ready"
}
@ -878,6 +1066,13 @@ install_node_deps() {
return 0
fi
if [ "$DISTRO" = "termux" ]; then
log_info "Skipping automatic Node/browser dependency setup on Termux"
log_info "Browser automation and WhatsApp bridge are not part of the tested Termux install path yet."
log_info "If you want to experiment manually later, run: cd $INSTALL_DIR && npm install"
return 0
fi
if [ -f "$INSTALL_DIR/package.json" ]; then
log_info "Installing Node.js dependencies (browser tools)..."
cd "$INSTALL_DIR"
@ -1002,8 +1197,7 @@ maybe_start_gateway() {
read -p "Pair WhatsApp now? [Y/n] " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]] || [[ -z $REPLY ]]; then
HERMES_CMD="$HOME/.local/bin/hermes"
[ ! -x "$HERMES_CMD" ] && HERMES_CMD="hermes"
HERMES_CMD="$(get_hermes_command_path)"
$HERMES_CMD whatsapp || true
fi
else
@ -1017,16 +1211,17 @@ maybe_start_gateway() {
fi
echo ""
read -p "Would you like to install the gateway as a background service? [Y/n] " -n 1 -r < /dev/tty
if [ "$DISTRO" = "termux" ]; then
read -p "Would you like to start the gateway in the background? [Y/n] " -n 1 -r < /dev/tty
else
read -p "Would you like to install the gateway as a background service? [Y/n] " -n 1 -r < /dev/tty
fi
echo
if [[ $REPLY =~ ^[Yy]$ ]] || [[ -z $REPLY ]]; then
HERMES_CMD="$HOME/.local/bin/hermes"
if [ ! -x "$HERMES_CMD" ]; then
HERMES_CMD="hermes"
fi
HERMES_CMD="$(get_hermes_command_path)"
if command -v systemctl &> /dev/null; then
if [ "$DISTRO" != "termux" ] && command -v systemctl &> /dev/null; then
log_info "Installing systemd service..."
if $HERMES_CMD gateway install 2>/dev/null; then
log_success "Gateway service installed"
@ -1039,12 +1234,19 @@ maybe_start_gateway() {
log_warn "Systemd install failed. You can start manually: hermes gateway"
fi
else
log_info "systemd not available — starting gateway in background..."
if [ "$DISTRO" = "termux" ]; then
log_info "Termux detected — starting gateway in best-effort background mode..."
else
log_info "systemd not available — starting gateway in background..."
fi
nohup $HERMES_CMD gateway > "$HERMES_HOME/logs/gateway.log" 2>&1 &
GATEWAY_PID=$!
log_success "Gateway started (PID $GATEWAY_PID). Logs: ~/.hermes/logs/gateway.log"
log_info "To stop: kill $GATEWAY_PID"
log_info "To restart later: hermes gateway"
if [ "$DISTRO" = "termux" ]; then
log_warn "Android may stop background processes when Termux is suspended or the system reclaims resources."
fi
fi
else
log_info "Skipped. Start the gateway later with: hermes gateway"
@ -1083,24 +1285,33 @@ print_success() {
echo -e "${CYAN}─────────────────────────────────────────────────────────${NC}"
echo ""
echo -e "${YELLOW}⚡ Reload your shell to use 'hermes' command:${NC}"
echo ""
LOGIN_SHELL="$(basename "${SHELL:-/bin/bash}")"
if [ "$LOGIN_SHELL" = "zsh" ]; then
echo " source ~/.zshrc"
elif [ "$LOGIN_SHELL" = "bash" ]; then
echo " source ~/.bashrc"
if [ "$DISTRO" = "termux" ]; then
echo -e "${YELLOW}⚡ 'hermes' was linked into $(get_command_link_display_dir), which is already on PATH in Termux.${NC}"
echo ""
else
echo " source ~/.bashrc # or ~/.zshrc"
echo -e "${YELLOW}⚡ Reload your shell to use 'hermes' command:${NC}"
echo ""
LOGIN_SHELL="$(basename "${SHELL:-/bin/bash}")"
if [ "$LOGIN_SHELL" = "zsh" ]; then
echo " source ~/.zshrc"
elif [ "$LOGIN_SHELL" = "bash" ]; then
echo " source ~/.bashrc"
else
echo " source ~/.bashrc # or ~/.zshrc"
fi
echo ""
fi
echo ""
# Show Node.js warning if auto-install failed
if [ "$HAS_NODE" = false ]; then
echo -e "${YELLOW}"
echo "Note: Node.js could not be installed automatically."
echo "Browser tools need Node.js. Install manually:"
echo " https://nodejs.org/en/download/"
if [ "$DISTRO" = "termux" ]; then
echo " pkg install nodejs"
else
echo " https://nodejs.org/en/download/"
fi
echo -e "${NC}"
fi
@ -1109,7 +1320,11 @@ print_success() {
echo -e "${YELLOW}"
echo "Note: ripgrep (rg) was not found. File search will use"
echo "grep as a fallback. For faster search in large codebases,"
echo "install ripgrep: sudo apt install ripgrep (or brew install ripgrep)"
if [ "$DISTRO" = "termux" ]; then
echo "install ripgrep: pkg install ripgrep"
else
echo "install ripgrep: sudo apt install ripgrep (or brew install ripgrep)"
fi
echo -e "${NC}"
fi
}

View file

@ -3,17 +3,17 @@
# Hermes Agent Setup Script
# ============================================================================
# Quick setup for developers who cloned the repo manually.
# Uses uv for fast Python provisioning and package management.
# Uses uv for desktop/server setup and Python's stdlib venv + pip on Termux.
#
# Usage:
# ./setup-hermes.sh
#
# This script:
# 1. Installs uv if not present
# 2. Creates a virtual environment with Python 3.11 via uv
# 3. Installs all dependencies (main package + submodules)
# 1. Detects desktop/server vs Android/Termux setup path
# 2. Creates a Python 3.11 virtual environment
# 3. Installs the appropriate dependency set for the platform
# 4. Creates .env from template (if not exists)
# 5. Symlinks the 'hermes' CLI command into ~/.local/bin
# 5. Symlinks the 'hermes' CLI command into a user-facing bin dir
# 6. Runs the setup wizard (optional)
# ============================================================================
@ -31,6 +31,26 @@ cd "$SCRIPT_DIR"
PYTHON_VERSION="3.11"
is_termux() {
[ -n "${TERMUX_VERSION:-}" ] || [[ "${PREFIX:-}" == *"com.termux/files/usr"* ]]
}
get_command_link_dir() {
if is_termux && [ -n "${PREFIX:-}" ]; then
echo "$PREFIX/bin"
else
echo "$HOME/.local/bin"
fi
}
get_command_link_display_dir() {
if is_termux && [ -n "${PREFIX:-}" ]; then
echo '$PREFIX/bin'
else
echo '~/.local/bin'
fi
}
echo ""
echo -e "${CYAN}⚕ Hermes Agent Setup${NC}"
echo ""
@ -42,36 +62,40 @@ echo ""
echo -e "${CYAN}${NC} Checking for uv..."
UV_CMD=""
if command -v uv &> /dev/null; then
UV_CMD="uv"
elif [ -x "$HOME/.local/bin/uv" ]; then
UV_CMD="$HOME/.local/bin/uv"
elif [ -x "$HOME/.cargo/bin/uv" ]; then
UV_CMD="$HOME/.cargo/bin/uv"
fi
if [ -n "$UV_CMD" ]; then
UV_VERSION=$($UV_CMD --version 2>/dev/null)
echo -e "${GREEN}${NC} uv found ($UV_VERSION)"
if is_termux; then
echo -e "${CYAN}${NC} Termux detected — using Python's stdlib venv + pip instead of uv"
else
echo -e "${CYAN}${NC} Installing uv..."
if curl -LsSf https://astral.sh/uv/install.sh | sh 2>/dev/null; then
if [ -x "$HOME/.local/bin/uv" ]; then
UV_CMD="$HOME/.local/bin/uv"
elif [ -x "$HOME/.cargo/bin/uv" ]; then
UV_CMD="$HOME/.cargo/bin/uv"
fi
if [ -n "$UV_CMD" ]; then
UV_VERSION=$($UV_CMD --version 2>/dev/null)
echo -e "${GREEN}${NC} uv installed ($UV_VERSION)"
if command -v uv &> /dev/null; then
UV_CMD="uv"
elif [ -x "$HOME/.local/bin/uv" ]; then
UV_CMD="$HOME/.local/bin/uv"
elif [ -x "$HOME/.cargo/bin/uv" ]; then
UV_CMD="$HOME/.cargo/bin/uv"
fi
if [ -n "$UV_CMD" ]; then
UV_VERSION=$($UV_CMD --version 2>/dev/null)
echo -e "${GREEN}${NC} uv found ($UV_VERSION)"
else
echo -e "${CYAN}${NC} Installing uv..."
if curl -LsSf https://astral.sh/uv/install.sh | sh 2>/dev/null; then
if [ -x "$HOME/.local/bin/uv" ]; then
UV_CMD="$HOME/.local/bin/uv"
elif [ -x "$HOME/.cargo/bin/uv" ]; then
UV_CMD="$HOME/.cargo/bin/uv"
fi
if [ -n "$UV_CMD" ]; then
UV_VERSION=$($UV_CMD --version 2>/dev/null)
echo -e "${GREEN}${NC} uv installed ($UV_VERSION)"
else
echo -e "${RED}${NC} uv installed but not found. Add ~/.local/bin to PATH and retry."
exit 1
fi
else
echo -e "${RED}${NC} uv installed but not found. Add ~/.local/bin to PATH and retry."
echo -e "${RED}${NC} Failed to install uv. Visit https://docs.astral.sh/uv/"
exit 1
fi
else
echo -e "${RED}${NC} Failed to install uv. Visit https://docs.astral.sh/uv/"
exit 1
fi
fi
@ -81,16 +105,34 @@ fi
echo -e "${CYAN}${NC} Checking Python $PYTHON_VERSION..."
if $UV_CMD python find "$PYTHON_VERSION" &> /dev/null; then
PYTHON_PATH=$($UV_CMD python find "$PYTHON_VERSION")
PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null)
echo -e "${GREEN}${NC} $PYTHON_FOUND_VERSION found"
if is_termux; then
if command -v python >/dev/null 2>&1; then
PYTHON_PATH="$(command -v python)"
if "$PYTHON_PATH" -c 'import sys; raise SystemExit(0 if sys.version_info >= (3, 11) else 1)' 2>/dev/null; then
PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null)
echo -e "${GREEN}${NC} $PYTHON_FOUND_VERSION found"
else
echo -e "${RED}${NC} Termux Python must be 3.11+"
echo " Run: pkg install python"
exit 1
fi
else
echo -e "${RED}${NC} Python not found in Termux"
echo " Run: pkg install python"
exit 1
fi
else
echo -e "${CYAN}${NC} Python $PYTHON_VERSION not found, installing via uv..."
$UV_CMD python install "$PYTHON_VERSION"
PYTHON_PATH=$($UV_CMD python find "$PYTHON_VERSION")
PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null)
echo -e "${GREEN}${NC} $PYTHON_FOUND_VERSION installed"
if $UV_CMD python find "$PYTHON_VERSION" &> /dev/null; then
PYTHON_PATH=$($UV_CMD python find "$PYTHON_VERSION")
PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null)
echo -e "${GREEN}${NC} $PYTHON_FOUND_VERSION found"
else
echo -e "${CYAN}${NC} Python $PYTHON_VERSION not found, installing via uv..."
$UV_CMD python install "$PYTHON_VERSION"
PYTHON_PATH=$($UV_CMD python find "$PYTHON_VERSION")
PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null)
echo -e "${GREEN}${NC} $PYTHON_FOUND_VERSION installed"
fi
fi
# ============================================================================
@ -104,11 +146,16 @@ if [ -d "venv" ]; then
rm -rf venv
fi
$UV_CMD venv venv --python "$PYTHON_VERSION"
echo -e "${GREEN}${NC} venv created (Python $PYTHON_VERSION)"
if is_termux; then
"$PYTHON_PATH" -m venv venv
echo -e "${GREEN}${NC} venv created with stdlib venv"
else
$UV_CMD venv venv --python "$PYTHON_VERSION"
echo -e "${GREEN}${NC} venv created (Python $PYTHON_VERSION)"
fi
# Tell uv to install into this venv (no activation needed for uv)
export VIRTUAL_ENV="$SCRIPT_DIR/venv"
SETUP_PYTHON="$SCRIPT_DIR/venv/bin/python"
# ============================================================================
# Dependencies
@ -116,19 +163,34 @@ export VIRTUAL_ENV="$SCRIPT_DIR/venv"
echo -e "${CYAN}${NC} Installing dependencies..."
# Prefer uv sync with lockfile (hash-verified installs) when available,
# fall back to pip install for compatibility or when lockfile is stale.
if [ -f "uv.lock" ]; then
echo -e "${CYAN}${NC} Using uv.lock for hash-verified installation..."
UV_PROJECT_ENVIRONMENT="$SCRIPT_DIR/venv" $UV_CMD sync --all-extras --locked 2>/dev/null && \
echo -e "${GREEN}${NC} Dependencies installed (lockfile verified)" || {
echo -e "${YELLOW}${NC} Lockfile install failed (may be outdated), falling back to pip install..."
if is_termux; then
export ANDROID_API_LEVEL="$(getprop ro.build.version.sdk 2>/dev/null || printf '%s' "${ANDROID_API_LEVEL:-}")"
echo -e "${CYAN}${NC} Termux detected — installing the tested Android bundle"
"$SETUP_PYTHON" -m pip install --upgrade pip setuptools wheel
if [ -f "constraints-termux.txt" ]; then
"$SETUP_PYTHON" -m pip install -e ".[termux]" -c constraints-termux.txt || {
echo -e "${YELLOW}${NC} Termux bundle install failed, falling back to base install..."
"$SETUP_PYTHON" -m pip install -e "." -c constraints-termux.txt
}
else
"$SETUP_PYTHON" -m pip install -e ".[termux]" || "$SETUP_PYTHON" -m pip install -e "."
fi
echo -e "${GREEN}${NC} Dependencies installed"
else
# Prefer uv sync with lockfile (hash-verified installs) when available,
# fall back to pip install for compatibility or when lockfile is stale.
if [ -f "uv.lock" ]; then
echo -e "${CYAN}${NC} Using uv.lock for hash-verified installation..."
UV_PROJECT_ENVIRONMENT="$SCRIPT_DIR/venv" $UV_CMD sync --all-extras --locked 2>/dev/null && \
echo -e "${GREEN}${NC} Dependencies installed (lockfile verified)" || {
echo -e "${YELLOW}${NC} Lockfile install failed (may be outdated), falling back to pip install..."
$UV_CMD pip install -e ".[all]" || $UV_CMD pip install -e "."
echo -e "${GREEN}${NC} Dependencies installed"
}
else
$UV_CMD pip install -e ".[all]" || $UV_CMD pip install -e "."
echo -e "${GREEN}${NC} Dependencies installed"
}
else
$UV_CMD pip install -e ".[all]" || $UV_CMD pip install -e "."
echo -e "${GREEN}${NC} Dependencies installed"
fi
fi
# ============================================================================
@ -138,7 +200,9 @@ fi
echo -e "${CYAN}${NC} Installing optional submodules..."
# tinker-atropos (RL training backend)
if [ -d "tinker-atropos" ] && [ -f "tinker-atropos/pyproject.toml" ]; then
if is_termux; then
echo -e "${CYAN}${NC} Skipping tinker-atropos on Termux (not part of the tested Android path)"
elif [ -d "tinker-atropos" ] && [ -f "tinker-atropos/pyproject.toml" ]; then
$UV_CMD pip install -e "./tinker-atropos" && \
echo -e "${GREEN}${NC} tinker-atropos installed" || \
echo -e "${YELLOW}${NC} tinker-atropos install failed (RL tools may not work)"
@ -160,34 +224,42 @@ else
echo
if [[ $REPLY =~ ^[Yy]$ ]] || [[ -z $REPLY ]]; then
INSTALLED=false
# Check if sudo is available
if command -v sudo &> /dev/null && sudo -n true 2>/dev/null; then
if command -v apt &> /dev/null; then
sudo apt install -y ripgrep && INSTALLED=true
elif command -v dnf &> /dev/null; then
sudo dnf install -y ripgrep && INSTALLED=true
if is_termux; then
pkg install -y ripgrep && INSTALLED=true
else
# Check if sudo is available
if command -v sudo &> /dev/null && sudo -n true 2>/dev/null; then
if command -v apt &> /dev/null; then
sudo apt install -y ripgrep && INSTALLED=true
elif command -v dnf &> /dev/null; then
sudo dnf install -y ripgrep && INSTALLED=true
fi
fi
# Try brew (no sudo needed)
if [ "$INSTALLED" = false ] && command -v brew &> /dev/null; then
brew install ripgrep && INSTALLED=true
fi
# Try cargo (no sudo needed)
if [ "$INSTALLED" = false ] && command -v cargo &> /dev/null; then
echo -e "${CYAN}${NC} Trying cargo install (no sudo required)..."
cargo install ripgrep && INSTALLED=true
fi
fi
# Try brew (no sudo needed)
if [ "$INSTALLED" = false ] && command -v brew &> /dev/null; then
brew install ripgrep && INSTALLED=true
fi
# Try cargo (no sudo needed)
if [ "$INSTALLED" = false ] && command -v cargo &> /dev/null; then
echo -e "${CYAN}${NC} Trying cargo install (no sudo required)..."
cargo install ripgrep && INSTALLED=true
fi
if [ "$INSTALLED" = true ]; then
echo -e "${GREEN}${NC} ripgrep installed"
else
echo -e "${YELLOW}${NC} Auto-install failed. Install options:"
echo " sudo apt install ripgrep # Debian/Ubuntu"
echo " brew install ripgrep # macOS"
echo " cargo install ripgrep # With Rust (no sudo)"
if is_termux; then
echo " pkg install ripgrep # Termux / Android"
else
echo " sudo apt install ripgrep # Debian/Ubuntu"
echo " brew install ripgrep # macOS"
echo " cargo install ripgrep # With Rust (no sudo)"
fi
echo " https://github.com/BurntSushi/ripgrep#installation"
fi
fi
@ -207,49 +279,56 @@ else
fi
# ============================================================================
# PATH setup — symlink hermes into ~/.local/bin
# PATH setup — symlink hermes into a user-facing bin dir
# ============================================================================
echo -e "${CYAN}${NC} Setting up hermes command..."
HERMES_BIN="$SCRIPT_DIR/venv/bin/hermes"
mkdir -p "$HOME/.local/bin"
ln -sf "$HERMES_BIN" "$HOME/.local/bin/hermes"
echo -e "${GREEN}${NC} Symlinked hermes → ~/.local/bin/hermes"
COMMAND_LINK_DIR="$(get_command_link_dir)"
COMMAND_LINK_DISPLAY_DIR="$(get_command_link_display_dir)"
mkdir -p "$COMMAND_LINK_DIR"
ln -sf "$HERMES_BIN" "$COMMAND_LINK_DIR/hermes"
echo -e "${GREEN}${NC} Symlinked hermes → $COMMAND_LINK_DISPLAY_DIR/hermes"
# Determine the appropriate shell config file
SHELL_CONFIG=""
if [[ "$SHELL" == *"zsh"* ]]; then
SHELL_CONFIG="$HOME/.zshrc"
elif [[ "$SHELL" == *"bash"* ]]; then
SHELL_CONFIG="$HOME/.bashrc"
[ ! -f "$SHELL_CONFIG" ] && SHELL_CONFIG="$HOME/.bash_profile"
if is_termux; then
export PATH="$COMMAND_LINK_DIR:$PATH"
echo -e "${GREEN}${NC} $COMMAND_LINK_DISPLAY_DIR is already on PATH in Termux"
else
# Fallback to checking existing files
if [ -f "$HOME/.zshrc" ]; then
# Determine the appropriate shell config file
SHELL_CONFIG=""
if [[ "$SHELL" == *"zsh"* ]]; then
SHELL_CONFIG="$HOME/.zshrc"
elif [ -f "$HOME/.bashrc" ]; then
elif [[ "$SHELL" == *"bash"* ]]; then
SHELL_CONFIG="$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
SHELL_CONFIG="$HOME/.bash_profile"
fi
fi
if [ -n "$SHELL_CONFIG" ]; then
# Touch the file just in case it doesn't exist yet but was selected
touch "$SHELL_CONFIG" 2>/dev/null || true
if ! echo "$PATH" | tr ':' '\n' | grep -q "^$HOME/.local/bin$"; then
if ! grep -q '\.local/bin' "$SHELL_CONFIG" 2>/dev/null; then
echo "" >> "$SHELL_CONFIG"
echo "# Hermes Agent — ensure ~/.local/bin is on PATH" >> "$SHELL_CONFIG"
echo 'export PATH="$HOME/.local/bin:$PATH"' >> "$SHELL_CONFIG"
echo -e "${GREEN}${NC} Added ~/.local/bin to PATH in $SHELL_CONFIG"
else
echo -e "${GREEN}${NC} ~/.local/bin already in $SHELL_CONFIG"
fi
[ ! -f "$SHELL_CONFIG" ] && SHELL_CONFIG="$HOME/.bash_profile"
else
echo -e "${GREEN}${NC} ~/.local/bin already on PATH"
# Fallback to checking existing files
if [ -f "$HOME/.zshrc" ]; then
SHELL_CONFIG="$HOME/.zshrc"
elif [ -f "$HOME/.bashrc" ]; then
SHELL_CONFIG="$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
SHELL_CONFIG="$HOME/.bash_profile"
fi
fi
if [ -n "$SHELL_CONFIG" ]; then
# Touch the file just in case it doesn't exist yet but was selected
touch "$SHELL_CONFIG" 2>/dev/null || true
if ! echo "$PATH" | tr ':' '\n' | grep -q "^$HOME/.local/bin$"; then
if ! grep -q '\.local/bin' "$SHELL_CONFIG" 2>/dev/null; then
echo "" >> "$SHELL_CONFIG"
echo "# Hermes Agent — ensure ~/.local/bin is on PATH" >> "$SHELL_CONFIG"
echo 'export PATH="$HOME/.local/bin:$PATH"' >> "$SHELL_CONFIG"
echo -e "${GREEN}${NC} Added ~/.local/bin to PATH in $SHELL_CONFIG"
else
echo -e "${GREEN}${NC} ~/.local/bin already in $SHELL_CONFIG"
fi
else
echo -e "${GREEN}${NC} ~/.local/bin already on PATH"
fi
fi
fi
@ -281,18 +360,31 @@ echo -e "${GREEN}✓ Setup complete!${NC}"
echo ""
echo "Next steps:"
echo ""
echo " 1. Reload your shell:"
echo " source $SHELL_CONFIG"
echo ""
echo " 2. Run the setup wizard to configure API keys:"
echo " hermes setup"
echo ""
echo " 3. Start chatting:"
echo " hermes"
echo ""
if is_termux; then
echo " 1. Run the setup wizard to configure API keys:"
echo " hermes setup"
echo ""
echo " 2. Start chatting:"
echo " hermes"
echo ""
else
echo " 1. Reload your shell:"
echo " source $SHELL_CONFIG"
echo ""
echo " 2. Run the setup wizard to configure API keys:"
echo " hermes setup"
echo ""
echo " 3. Start chatting:"
echo " hermes"
echo ""
fi
echo "Other commands:"
echo " hermes status # Check configuration"
echo " hermes gateway install # Install gateway service (messaging + cron)"
if is_termux; then
echo " hermes gateway # Run gateway in foreground"
else
echo " hermes gateway install # Install gateway service (messaging + cron)"
fi
echo " hermes cron list # View scheduled jobs"
echo " hermes doctor # Diagnose issues"
echo ""

View file

@ -249,9 +249,8 @@ Type these during an interactive chat session.
/config Show config (CLI)
/model [name] Show or change model
/provider Show provider info
/prompt [text] View/set system prompt (CLI)
/personality [name] Set personality
/reasoning [level] Set reasoning (none|low|medium|high|xhigh|show|hide)
/reasoning [level] Set reasoning (none|minimal|low|medium|high|xhigh|show|hide)
/verbose Cycle: off → new → all → verbose
/voice [on|off|tts] Voice mode
/yolo Toggle approval bypass

View file

@ -1,7 +1,7 @@
---
name: google-workspace
description: Gmail, Calendar, Drive, Contacts, Sheets, and Docs integration via Python. Uses OAuth2 with automatic token refresh. No external binaries needed — runs entirely with Google's Python client libraries in the Hermes venv.
version: 1.0.0
description: Gmail, Calendar, Drive, Contacts, Sheets, and Docs integration via gws CLI (googleworkspace/cli). Uses OAuth2 with automatic token refresh via bridge script. Requires gws binary.
version: 2.0.0
author: Nous Research
license: MIT
required_credential_files:
@ -11,14 +11,25 @@ required_credential_files:
description: Google OAuth2 client credentials (downloaded from Google Cloud Console)
metadata:
hermes:
tags: [Google, Gmail, Calendar, Drive, Sheets, Docs, Contacts, Email, OAuth]
tags: [Google, Gmail, Calendar, Drive, Sheets, Docs, Contacts, Email, OAuth, gws]
homepage: https://github.com/NousResearch/hermes-agent
related_skills: [himalaya]
---
# Google Workspace
Gmail, Calendar, Drive, Contacts, Sheets, and Docs — all through Python scripts in this skill. No external binaries to install.
Gmail, Calendar, Drive, Contacts, Sheets, and Docs — powered by `gws` (Google's official Rust CLI). The skill provides a backward-compatible Python wrapper that handles OAuth token refresh and delegates to `gws`.
## Architecture
```
google_api.py → gws_bridge.py → gws CLI
(argparse compat) (token refresh) (Google APIs)
```
- `setup.py` handles OAuth2 (headless-compatible, works on CLI/Telegram/Discord)
- `gws_bridge.py` refreshes the Hermes token and injects it into `gws` via `GOOGLE_WORKSPACE_CLI_TOKEN`
- `google_api.py` provides the same CLI interface as v1 but delegates to `gws`
## References
@ -27,7 +38,22 @@ Gmail, Calendar, Drive, Contacts, Sheets, and Docs — all through Python script
## Scripts
- `scripts/setup.py` — OAuth2 setup (run once to authorize)
- `scripts/google_api.py` — API wrapper CLI (agent uses this for all operations)
- `scripts/gws_bridge.py` — Token refresh bridge to gws CLI
- `scripts/google_api.py` — Backward-compatible API wrapper (delegates to gws)
## Prerequisites
Install `gws`:
```bash
cargo install google-workspace-cli
# or via npm (recommended, downloads prebuilt binary):
npm install -g @googleworkspace/cli
# or via Homebrew:
brew install googleworkspace-cli
```
Verify: `gws --version`
## First-Time Setup
@ -56,42 +82,29 @@ If it prints `AUTHENTICATED`, skip to Usage — setup is already done.
### Step 1: Triage — ask the user what they need
Before starting OAuth setup, ask the user TWO questions:
**Question 1: "What Google services do you need? Just email, or also
Calendar/Drive/Sheets/Docs?"**
- **Email only** → They don't need this skill at all. Use the `himalaya` skill
instead — it works with a Gmail App Password (Settings → Security → App
Passwords) and takes 2 minutes to set up. No Google Cloud project needed.
Load the himalaya skill and follow its setup instructions.
- **Email only** → Use the `himalaya` skill instead — simpler setup.
- **Calendar, Drive, Sheets, Docs (or email + these)** → Continue below.
- **Calendar, Drive, Sheets, Docs (or email + these)** → Continue with this
skill's OAuth setup below.
**Partial scopes**: Users can authorize only a subset of services. The setup
script accepts partial scopes and warns about missing ones.
**Question 2: "Does your Google account use Advanced Protection (hardware
security keys required to sign in)? If you're not sure, you probably don't
— it's something you would have explicitly enrolled in."**
**Question 2: "Does your Google account use Advanced Protection?"**
- **No / Not sure** → Normal setup. Continue below.
- **Yes** → Their Workspace admin must add the OAuth client ID to the org's
allowed apps list before Step 4 will work. Let them know upfront.
- **No / Not sure** → Normal setup.
- **Yes** → Workspace admin must add the OAuth client ID to allowed apps first.
### Step 2: Create OAuth credentials (one-time, ~5 minutes)
Tell the user:
> You need a Google Cloud OAuth client. This is a one-time setup:
>
> 1. Go to https://console.cloud.google.com/apis/credentials
> 2. Create a project (or use an existing one)
> 3. Click "Enable APIs" and enable: Gmail API, Google Calendar API,
> Google Drive API, Google Sheets API, Google Docs API, People API
> 4. Go to Credentials → Create Credentials → OAuth 2.0 Client ID
> 5. Application type: "Desktop app" → Create
> 6. Click "Download JSON" and tell me the file path
Once they provide the path:
> 3. Enable the APIs you need (Gmail, Calendar, Drive, Sheets, Docs, People)
> 4. Credentials → Create Credentials → OAuth 2.0 Client ID → Desktop app
> 5. Download JSON and tell me the file path
```bash
$GSETUP --client-secret /path/to/client_secret.json
@ -103,20 +116,10 @@ $GSETUP --client-secret /path/to/client_secret.json
$GSETUP --auth-url
```
This prints a URL. **Send the URL to the user** and tell them:
> Open this link in your browser, sign in with your Google account, and
> authorize access. After authorizing, you'll be redirected to a page that
> may show an error — that's expected. Copy the ENTIRE URL from your
> browser's address bar and paste it back to me.
Send the URL to the user. After authorizing, they paste back the redirect URL or code.
### Step 4: Exchange the code
The user will paste back either a URL like `http://localhost:1/?code=4/0A...&scope=...`
or just the code string. Either works. The `--auth-url` step stores a temporary
pending OAuth session locally so `--auth-code` can complete the PKCE exchange
later, even on headless systems:
```bash
$GSETUP --auth-code "THE_URL_OR_CODE_THE_USER_PASTED"
```
@ -127,18 +130,11 @@ $GSETUP --auth-code "THE_URL_OR_CODE_THE_USER_PASTED"
$GSETUP --check
```
Should print `AUTHENTICATED`. Setup is complete — token refreshes automatically from now on.
### Notes
- Token is stored at `google_token.json` under the active profile's `HERMES_HOME` and auto-refreshes.
- Pending OAuth session state/verifier are stored temporarily at `google_oauth_pending.json` under the active profile's `HERMES_HOME` until exchange completes.
- Hermes now refuses to overwrite a full Google Workspace token with a narrower re-auth token missing Gmail scopes, so one profile's partial consent cannot silently break email actions later.
- To revoke: `$GSETUP --revoke`
Should print `AUTHENTICATED`. Token refreshes automatically from now on.
## Usage
All commands go through the API script. Set `GAPI` as a shorthand:
All commands go through the API script:
```bash
HERMES_HOME="${HERMES_HOME:-$HOME/.hermes}"
@ -153,40 +149,21 @@ GAPI="$PYTHON_BIN $GWORKSPACE_SKILL_DIR/scripts/google_api.py"
### Gmail
```bash
# Search (returns JSON array with id, from, subject, date, snippet)
$GAPI gmail search "is:unread" --max 10
$GAPI gmail search "from:boss@company.com newer_than:1d"
$GAPI gmail search "has:attachment filename:pdf newer_than:7d"
# Read full message (returns JSON with body text)
$GAPI gmail get MESSAGE_ID
# Send
$GAPI gmail send --to user@example.com --subject "Hello" --body "Message text"
$GAPI gmail send --to user@example.com --subject "Report" --body "<h1>Q4</h1><p>Details...</p>" --html
# Reply (automatically threads and sets In-Reply-To)
$GAPI gmail send --to user@example.com --subject "Report" --body "<h1>Q4</h1>" --html
$GAPI gmail reply MESSAGE_ID --body "Thanks, that works for me."
# Labels
$GAPI gmail labels
$GAPI gmail modify MESSAGE_ID --add-labels LABEL_ID
$GAPI gmail modify MESSAGE_ID --remove-labels UNREAD
```
### Calendar
```bash
# List events (defaults to next 7 days)
$GAPI calendar list
$GAPI calendar list --start 2026-03-01T00:00:00Z --end 2026-03-07T23:59:59Z
# Create event (ISO 8601 with timezone required)
$GAPI calendar create --summary "Team Standup" --start 2026-03-01T10:00:00-06:00 --end 2026-03-01T10:30:00-06:00
$GAPI calendar create --summary "Lunch" --start 2026-03-01T12:00:00Z --end 2026-03-01T13:00:00Z --location "Cafe"
$GAPI calendar create --summary "Review" --start 2026-03-01T14:00:00Z --end 2026-03-01T15:00:00Z --attendees "alice@co.com,bob@co.com"
# Delete event
$GAPI calendar create --summary "Standup" --start 2026-03-01T10:00:00+01:00 --end 2026-03-01T10:30:00+01:00
$GAPI calendar create --summary "Review" --start ... --end ... --attendees "alice@co.com,bob@co.com"
$GAPI calendar delete EVENT_ID
```
@ -206,13 +183,8 @@ $GAPI contacts list --max 20
### Sheets
```bash
# Read
$GAPI sheets get SHEET_ID "Sheet1!A1:D10"
# Write
$GAPI sheets update SHEET_ID "Sheet1!A1:B2" --values '[["Name","Score"],["Alice","95"]]'
# Append rows
$GAPI sheets append SHEET_ID "Sheet1!A:C" --values '[["new","row","data"]]'
```
@ -222,37 +194,52 @@ $GAPI sheets append SHEET_ID "Sheet1!A:C" --values '[["new","row","data"]]'
$GAPI docs get DOC_ID
```
### Direct gws access (advanced)
For operations not covered by the wrapper, use `gws_bridge.py` directly:
```bash
GBRIDGE="$PYTHON_BIN $GWORKSPACE_SKILL_DIR/scripts/gws_bridge.py"
$GBRIDGE calendar +agenda --today --format table
$GBRIDGE gmail +triage --labels --format json
$GBRIDGE drive +upload ./report.pdf
$GBRIDGE sheets +read --spreadsheet SHEET_ID --range "Sheet1!A1:D10"
```
## Output Format
All commands return JSON. Parse with `jq` or read directly. Key fields:
All commands return JSON via `gws --format json`. Key output shapes:
- **Gmail search**: `[{id, threadId, from, to, subject, date, snippet, labels}]`
- **Gmail get**: `{id, threadId, from, to, subject, date, labels, body}`
- **Gmail send/reply**: `{status: "sent", id, threadId}`
- **Calendar list**: `[{id, summary, start, end, location, description, htmlLink}]`
- **Calendar create**: `{status: "created", id, summary, htmlLink}`
- **Drive search**: `[{id, name, mimeType, modifiedTime, webViewLink}]`
- **Contacts list**: `[{name, emails: [...], phones: [...]}]`
- **Sheets get**: `[[cell, cell, ...], ...]`
- **Gmail search/triage**: Array of message summaries (sender, subject, date, snippet)
- **Gmail get/read**: Message object with headers and body text
- **Gmail send/reply**: Confirmation with message ID
- **Calendar list/agenda**: Array of event objects (summary, start, end, location)
- **Calendar create**: Confirmation with event ID and htmlLink
- **Drive search**: Array of file objects (id, name, mimeType, webViewLink)
- **Sheets get/read**: 2D array of cell values
- **Docs get**: Full document JSON (use `body.content` for text extraction)
- **Contacts list**: Array of person objects with names, emails, phones
Parse output with `jq` or read JSON directly.
## Rules
1. **Never send email or create/delete events without confirming with the user first.** Show the draft content and ask for approval.
2. **Check auth before first use** — run `setup.py --check`. If it fails, guide the user through setup.
3. **Use the Gmail search syntax reference** for complex queries — load it with `skill_view("google-workspace", file_path="references/gmail-search-syntax.md")`.
4. **Calendar times must include timezone**always use ISO 8601 with offset (e.g., `2026-03-01T10:00:00-06:00`) or UTC (`Z`).
5. **Respect rate limits** — avoid rapid-fire sequential API calls. Batch reads when possible.
1. **Never send email or create/delete events without confirming with the user first.**
2. **Check auth before first use** — run `setup.py --check`.
3. **Use the Gmail search syntax reference** for complex queries.
4. **Calendar times must include timezone**ISO 8601 with offset or UTC.
5. **Respect rate limits** — avoid rapid-fire sequential API calls.
## Troubleshooting
| Problem | Fix |
|---------|-----|
| `NOT_AUTHENTICATED` | Run setup Steps 2-5 above |
| `REFRESH_FAILED` | Token revoked or expired — redo Steps 3-5 |
| `HttpError 403: Insufficient Permission` | Missing API scope — `$GSETUP --revoke` then redo Steps 3-5 |
| `HttpError 403: Access Not Configured` | API not enabled — user needs to enable it in Google Cloud Console |
| `ModuleNotFoundError` | Run `$GSETUP --install-deps` |
| Advanced Protection blocks auth | Workspace admin must allowlist the OAuth client ID |
| `NOT_AUTHENTICATED` | Run setup Steps 2-5 |
| `REFRESH_FAILED` | Token revoked — redo Steps 3-5 |
| `gws: command not found` | Install: `npm install -g @googleworkspace/cli` |
| `HttpError 403` | Missing scope — `$GSETUP --revoke` then redo Steps 3-5 |
| `HttpError 403: Access Not Configured` | Enable API in Google Cloud Console |
| Advanced Protection blocks auth | Admin must allowlist the OAuth client ID |
## Revoking Access

View file

@ -1,16 +1,17 @@
#!/usr/bin/env python3
"""Google Workspace API CLI for Hermes Agent.
A thin CLI wrapper around Google's Python client libraries.
Authenticates using the token stored by setup.py.
Thin wrapper that delegates to gws (googleworkspace/cli) via gws_bridge.py.
Maintains the same CLI interface for backward compatibility with Hermes skills.
Usage:
python google_api.py gmail search "is:unread" [--max 10]
python google_api.py gmail get MESSAGE_ID
python google_api.py gmail send --to user@example.com --subject "Hi" --body "Hello"
python google_api.py gmail reply MESSAGE_ID --body "Thanks"
python google_api.py calendar list [--from DATE] [--to DATE] [--calendar primary]
python google_api.py calendar list [--start DATE] [--end DATE] [--calendar primary]
python google_api.py calendar create --summary "Meeting" --start DATETIME --end DATETIME
python google_api.py calendar delete EVENT_ID
python google_api.py drive search "budget report" [--max 10]
python google_api.py contacts list [--max 20]
python google_api.py sheets get SHEET_ID RANGE
@ -20,386 +21,193 @@ Usage:
"""
import argparse
import base64
import json
import os
import subprocess
import sys
from datetime import datetime, timedelta, timezone
from email.mime.text import MIMEText
from pathlib import Path
try:
from hermes_constants import display_hermes_home, get_hermes_home
except ModuleNotFoundError:
HERMES_AGENT_ROOT = Path(__file__).resolve().parents[4]
if HERMES_AGENT_ROOT.exists():
sys.path.insert(0, str(HERMES_AGENT_ROOT))
from hermes_constants import display_hermes_home, get_hermes_home
HERMES_HOME = get_hermes_home()
TOKEN_PATH = HERMES_HOME / "google_token.json"
SCOPES = [
"https://www.googleapis.com/auth/gmail.readonly",
"https://www.googleapis.com/auth/gmail.send",
"https://www.googleapis.com/auth/gmail.modify",
"https://www.googleapis.com/auth/calendar",
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/contacts.readonly",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/documents.readonly",
]
BRIDGE = Path(__file__).parent / "gws_bridge.py"
PYTHON = sys.executable
def _missing_scopes() -> list[str]:
try:
payload = json.loads(TOKEN_PATH.read_text())
except Exception:
return []
raw = payload.get("scopes") or payload.get("scope")
if not raw:
return []
granted = {s.strip() for s in (raw.split() if isinstance(raw, str) else raw) if s.strip()}
return sorted(scope for scope in SCOPES if scope not in granted)
def gws(*args: str) -> None:
"""Call gws via the bridge and exit with its return code."""
result = subprocess.run(
[PYTHON, str(BRIDGE)] + list(args),
env={**os.environ, "HERMES_HOME": os.environ.get("HERMES_HOME", str(Path.home() / ".hermes"))},
)
sys.exit(result.returncode)
def get_credentials():
"""Load and refresh credentials from token file."""
if not TOKEN_PATH.exists():
print("Not authenticated. Run the setup script first:", file=sys.stderr)
print(f" python {Path(__file__).parent / 'setup.py'}", file=sys.stderr)
sys.exit(1)
from google.oauth2.credentials import Credentials
from google.auth.transport.requests import Request
creds = Credentials.from_authorized_user_file(str(TOKEN_PATH), SCOPES)
if creds.expired and creds.refresh_token:
creds.refresh(Request())
TOKEN_PATH.write_text(creds.to_json())
if not creds.valid:
print("Token is invalid. Re-run setup.", file=sys.stderr)
sys.exit(1)
missing_scopes = _missing_scopes()
if missing_scopes:
print(
"Token is valid but missing Google Workspace scopes required by this skill.",
file=sys.stderr,
)
for scope in missing_scopes:
print(f" - {scope}", file=sys.stderr)
print(
f"Re-run setup.py from the active Hermes profile ({display_hermes_home()}) to restore full access.",
file=sys.stderr,
)
sys.exit(1)
return creds
def build_service(api, version):
from googleapiclient.discovery import build
return build(api, version, credentials=get_credentials())
# =========================================================================
# Gmail
# =========================================================================
# -- Gmail --
def gmail_search(args):
service = build_service("gmail", "v1")
results = service.users().messages().list(
userId="me", q=args.query, maxResults=args.max
).execute()
messages = results.get("messages", [])
if not messages:
print("No messages found.")
return
output = []
for msg_meta in messages:
msg = service.users().messages().get(
userId="me", id=msg_meta["id"], format="metadata",
metadataHeaders=["From", "To", "Subject", "Date"],
).execute()
headers = {h["name"]: h["value"] for h in msg.get("payload", {}).get("headers", [])}
output.append({
"id": msg["id"],
"threadId": msg["threadId"],
"from": headers.get("From", ""),
"to": headers.get("To", ""),
"subject": headers.get("Subject", ""),
"date": headers.get("Date", ""),
"snippet": msg.get("snippet", ""),
"labels": msg.get("labelIds", []),
})
print(json.dumps(output, indent=2, ensure_ascii=False))
cmd = ["gmail", "+triage", "--query", args.query, "--max", str(args.max), "--format", "json"]
gws(*cmd)
def gmail_get(args):
service = build_service("gmail", "v1")
msg = service.users().messages().get(
userId="me", id=args.message_id, format="full"
).execute()
headers = {h["name"]: h["value"] for h in msg.get("payload", {}).get("headers", [])}
# Extract body text
body = ""
payload = msg.get("payload", {})
if payload.get("body", {}).get("data"):
body = base64.urlsafe_b64decode(payload["body"]["data"]).decode("utf-8", errors="replace")
elif payload.get("parts"):
for part in payload["parts"]:
if part.get("mimeType") == "text/plain" and part.get("body", {}).get("data"):
body = base64.urlsafe_b64decode(part["body"]["data"]).decode("utf-8", errors="replace")
break
if not body:
for part in payload["parts"]:
if part.get("mimeType") == "text/html" and part.get("body", {}).get("data"):
body = base64.urlsafe_b64decode(part["body"]["data"]).decode("utf-8", errors="replace")
break
result = {
"id": msg["id"],
"threadId": msg["threadId"],
"from": headers.get("From", ""),
"to": headers.get("To", ""),
"subject": headers.get("Subject", ""),
"date": headers.get("Date", ""),
"labels": msg.get("labelIds", []),
"body": body,
}
print(json.dumps(result, indent=2, ensure_ascii=False))
gws("gmail", "+read", "--id", args.message_id, "--headers", "--format", "json")
def gmail_send(args):
service = build_service("gmail", "v1")
message = MIMEText(args.body, "html" if args.html else "plain")
message["to"] = args.to
message["subject"] = args.subject
cmd = ["gmail", "+send", "--to", args.to, "--subject", args.subject, "--body", args.body, "--format", "json"]
if args.cc:
message["cc"] = args.cc
raw = base64.urlsafe_b64encode(message.as_bytes()).decode()
body = {"raw": raw}
if args.thread_id:
body["threadId"] = args.thread_id
result = service.users().messages().send(userId="me", body=body).execute()
print(json.dumps({"status": "sent", "id": result["id"], "threadId": result.get("threadId", "")}, indent=2))
cmd += ["--cc", args.cc]
if args.html:
cmd.append("--html")
gws(*cmd)
def gmail_reply(args):
service = build_service("gmail", "v1")
# Fetch original to get thread ID and headers
original = service.users().messages().get(
userId="me", id=args.message_id, format="metadata",
metadataHeaders=["From", "Subject", "Message-ID"],
).execute()
headers = {h["name"]: h["value"] for h in original.get("payload", {}).get("headers", [])}
subject = headers.get("Subject", "")
if not subject.startswith("Re:"):
subject = f"Re: {subject}"
message = MIMEText(args.body)
message["to"] = headers.get("From", "")
message["subject"] = subject
if headers.get("Message-ID"):
message["In-Reply-To"] = headers["Message-ID"]
message["References"] = headers["Message-ID"]
raw = base64.urlsafe_b64encode(message.as_bytes()).decode()
body = {"raw": raw, "threadId": original["threadId"]}
result = service.users().messages().send(userId="me", body=body).execute()
print(json.dumps({"status": "sent", "id": result["id"], "threadId": result.get("threadId", "")}, indent=2))
gws("gmail", "+reply", "--message-id", args.message_id, "--body", args.body, "--format", "json")
def gmail_labels(args):
service = build_service("gmail", "v1")
results = service.users().labels().list(userId="me").execute()
labels = [{"id": l["id"], "name": l["name"], "type": l.get("type", "")} for l in results.get("labels", [])]
print(json.dumps(labels, indent=2))
gws("gmail", "users", "labels", "list", "--params", json.dumps({"userId": "me"}), "--format", "json")
def gmail_modify(args):
service = build_service("gmail", "v1")
body = {}
if args.add_labels:
body["addLabelIds"] = args.add_labels.split(",")
if args.remove_labels:
body["removeLabelIds"] = args.remove_labels.split(",")
result = service.users().messages().modify(userId="me", id=args.message_id, body=body).execute()
print(json.dumps({"id": result["id"], "labels": result.get("labelIds", [])}, indent=2))
gws(
"gmail", "users", "messages", "modify",
"--params", json.dumps({"userId": "me", "id": args.message_id}),
"--json", json.dumps(body),
"--format", "json",
)
# =========================================================================
# Calendar
# =========================================================================
# -- Calendar --
def calendar_list(args):
service = build_service("calendar", "v3")
now = datetime.now(timezone.utc)
time_min = args.start or now.isoformat()
time_max = args.end or (now + timedelta(days=7)).isoformat()
# Ensure timezone info
for val in [time_min, time_max]:
if "T" in val and "Z" not in val and "+" not in val and "-" not in val[11:]:
val += "Z"
results = service.events().list(
calendarId=args.calendar, timeMin=time_min, timeMax=time_max,
maxResults=args.max, singleEvents=True, orderBy="startTime",
).execute()
events = []
for e in results.get("items", []):
events.append({
"id": e["id"],
"summary": e.get("summary", "(no title)"),
"start": e.get("start", {}).get("dateTime", e.get("start", {}).get("date", "")),
"end": e.get("end", {}).get("dateTime", e.get("end", {}).get("date", "")),
"location": e.get("location", ""),
"description": e.get("description", ""),
"status": e.get("status", ""),
"htmlLink": e.get("htmlLink", ""),
})
print(json.dumps(events, indent=2, ensure_ascii=False))
if args.start or args.end:
# Specific date range — use raw Calendar API for precise timeMin/timeMax
from datetime import datetime, timedelta, timezone as tz
now = datetime.now(tz.utc)
time_min = args.start or now.isoformat()
time_max = args.end or (now + timedelta(days=7)).isoformat()
gws(
"calendar", "events", "list",
"--params", json.dumps({
"calendarId": args.calendar,
"timeMin": time_min,
"timeMax": time_max,
"maxResults": args.max,
"singleEvents": True,
"orderBy": "startTime",
}),
"--format", "json",
)
else:
# No date range — use +agenda helper (defaults to 7 days)
cmd = ["calendar", "+agenda", "--days", "7", "--format", "json"]
if args.calendar != "primary":
cmd += ["--calendar", args.calendar]
gws(*cmd)
def calendar_create(args):
service = build_service("calendar", "v3")
event = {
"summary": args.summary,
"start": {"dateTime": args.start},
"end": {"dateTime": args.end},
}
cmd = [
"calendar", "+insert",
"--summary", args.summary,
"--start", args.start,
"--end", args.end,
"--format", "json",
]
if args.location:
event["location"] = args.location
cmd += ["--location", args.location]
if args.description:
event["description"] = args.description
cmd += ["--description", args.description]
if args.attendees:
event["attendees"] = [{"email": e.strip()} for e in args.attendees.split(",")]
result = service.events().insert(calendarId=args.calendar, body=event).execute()
print(json.dumps({
"status": "created",
"id": result["id"],
"summary": result.get("summary", ""),
"htmlLink": result.get("htmlLink", ""),
}, indent=2))
for email in args.attendees.split(","):
cmd += ["--attendee", email.strip()]
if args.calendar != "primary":
cmd += ["--calendar", args.calendar]
gws(*cmd)
def calendar_delete(args):
service = build_service("calendar", "v3")
service.events().delete(calendarId=args.calendar, eventId=args.event_id).execute()
print(json.dumps({"status": "deleted", "eventId": args.event_id}))
gws(
"calendar", "events", "delete",
"--params", json.dumps({"calendarId": args.calendar, "eventId": args.event_id}),
"--format", "json",
)
# =========================================================================
# Drive
# =========================================================================
# -- Drive --
def drive_search(args):
service = build_service("drive", "v3")
query = f"fullText contains '{args.query}'" if not args.raw_query else args.query
results = service.files().list(
q=query, pageSize=args.max, fields="files(id, name, mimeType, modifiedTime, webViewLink)",
).execute()
files = results.get("files", [])
print(json.dumps(files, indent=2, ensure_ascii=False))
query = args.query if args.raw_query else f"fullText contains '{args.query}'"
gws(
"drive", "files", "list",
"--params", json.dumps({
"q": query,
"pageSize": args.max,
"fields": "files(id,name,mimeType,modifiedTime,webViewLink)",
}),
"--format", "json",
)
# =========================================================================
# Contacts
# =========================================================================
# -- Contacts --
def contacts_list(args):
service = build_service("people", "v1")
results = service.people().connections().list(
resourceName="people/me",
pageSize=args.max,
personFields="names,emailAddresses,phoneNumbers",
).execute()
contacts = []
for person in results.get("connections", []):
names = person.get("names", [{}])
emails = person.get("emailAddresses", [])
phones = person.get("phoneNumbers", [])
contacts.append({
"name": names[0].get("displayName", "") if names else "",
"emails": [e.get("value", "") for e in emails],
"phones": [p.get("value", "") for p in phones],
})
print(json.dumps(contacts, indent=2, ensure_ascii=False))
gws(
"people", "people", "connections", "list",
"--params", json.dumps({
"resourceName": "people/me",
"pageSize": args.max,
"personFields": "names,emailAddresses,phoneNumbers",
}),
"--format", "json",
)
# =========================================================================
# Sheets
# =========================================================================
# -- Sheets --
def sheets_get(args):
service = build_service("sheets", "v4")
result = service.spreadsheets().values().get(
spreadsheetId=args.sheet_id, range=args.range,
).execute()
print(json.dumps(result.get("values", []), indent=2, ensure_ascii=False))
gws(
"sheets", "+read",
"--spreadsheet", args.sheet_id,
"--range", args.range,
"--format", "json",
)
def sheets_update(args):
service = build_service("sheets", "v4")
values = json.loads(args.values)
body = {"values": values}
result = service.spreadsheets().values().update(
spreadsheetId=args.sheet_id, range=args.range,
valueInputOption="USER_ENTERED", body=body,
).execute()
print(json.dumps({"updatedCells": result.get("updatedCells", 0), "updatedRange": result.get("updatedRange", "")}, indent=2))
gws(
"sheets", "spreadsheets", "values", "update",
"--params", json.dumps({
"spreadsheetId": args.sheet_id,
"range": args.range,
"valueInputOption": "USER_ENTERED",
}),
"--json", json.dumps({"values": values}),
"--format", "json",
)
def sheets_append(args):
service = build_service("sheets", "v4")
values = json.loads(args.values)
body = {"values": values}
result = service.spreadsheets().values().append(
spreadsheetId=args.sheet_id, range=args.range,
valueInputOption="USER_ENTERED", insertDataOption="INSERT_ROWS", body=body,
).execute()
print(json.dumps({"updatedCells": result.get("updates", {}).get("updatedCells", 0)}, indent=2))
gws(
"sheets", "+append",
"--spreadsheet", args.sheet_id,
"--json-values", json.dumps(values),
"--format", "json",
)
# =========================================================================
# Docs
# =========================================================================
# -- Docs --
def docs_get(args):
service = build_service("docs", "v1")
doc = service.documents().get(documentId=args.doc_id).execute()
# Extract plain text from the document structure
text_parts = []
for element in doc.get("body", {}).get("content", []):
paragraph = element.get("paragraph", {})
for pe in paragraph.get("elements", []):
text_run = pe.get("textRun", {})
if text_run.get("content"):
text_parts.append(text_run["content"])
result = {
"title": doc.get("title", ""),
"documentId": doc.get("documentId", ""),
"body": "".join(text_parts),
}
print(json.dumps(result, indent=2, ensure_ascii=False))
gws(
"docs", "documents", "get",
"--params", json.dumps({"documentId": args.doc_id}),
"--format", "json",
)
# =========================================================================
# CLI parser
# =========================================================================
# -- CLI parser (backward-compatible interface) --
def main():
parser = argparse.ArgumentParser(description="Google Workspace API for Hermes Agent")
parser = argparse.ArgumentParser(description="Google Workspace API for Hermes Agent (gws backend)")
sub = parser.add_subparsers(dest="service", required=True)
# --- Gmail ---
@ -421,7 +229,7 @@ def main():
p.add_argument("--body", required=True)
p.add_argument("--cc", default="")
p.add_argument("--html", action="store_true", help="Send body as HTML")
p.add_argument("--thread-id", default="", help="Thread ID for threading")
p.add_argument("--thread-id", default="", help="Thread ID (unused with gws, kept for compat)")
p.set_defaults(func=gmail_send)
p = gmail_sub.add_parser("reply")

View file

@ -0,0 +1,89 @@
#!/usr/bin/env python3
"""Bridge between Hermes OAuth token and gws CLI.
Refreshes the token if expired, then executes gws with the valid access token.
"""
import json
import os
import subprocess
import sys
from datetime import datetime, timezone
from pathlib import Path
def get_hermes_home() -> Path:
return Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
def get_token_path() -> Path:
return get_hermes_home() / "google_token.json"
def refresh_token(token_data: dict) -> dict:
"""Refresh the access token using the refresh token."""
import urllib.error
import urllib.parse
import urllib.request
params = urllib.parse.urlencode({
"client_id": token_data["client_id"],
"client_secret": token_data["client_secret"],
"refresh_token": token_data["refresh_token"],
"grant_type": "refresh_token",
}).encode()
req = urllib.request.Request(token_data["token_uri"], data=params)
try:
with urllib.request.urlopen(req) as resp:
result = json.loads(resp.read())
except urllib.error.HTTPError as e:
body = e.read().decode("utf-8", errors="replace")
print(f"ERROR: Token refresh failed (HTTP {e.code}): {body}", file=sys.stderr)
print("Re-run setup.py to re-authenticate.", file=sys.stderr)
sys.exit(1)
token_data["token"] = result["access_token"]
token_data["expiry"] = datetime.fromtimestamp(
datetime.now(timezone.utc).timestamp() + result["expires_in"],
tz=timezone.utc,
).isoformat()
get_token_path().write_text(json.dumps(token_data, indent=2))
return token_data
def get_valid_token() -> str:
"""Return a valid access token, refreshing if needed."""
token_path = get_token_path()
if not token_path.exists():
print("ERROR: No Google token found. Run setup.py --auth-url first.", file=sys.stderr)
sys.exit(1)
token_data = json.loads(token_path.read_text())
expiry = token_data.get("expiry", "")
if expiry:
exp_dt = datetime.fromisoformat(expiry.replace("Z", "+00:00"))
now = datetime.now(timezone.utc)
if now >= exp_dt:
token_data = refresh_token(token_data)
return token_data["token"]
def main():
"""Refresh token if needed, then exec gws with remaining args."""
if len(sys.argv) < 2:
print("Usage: gws_bridge.py <gws args...>", file=sys.stderr)
sys.exit(1)
access_token = get_valid_token()
env = os.environ.copy()
env["GOOGLE_WORKSPACE_CLI_TOKEN"] = access_token
result = subprocess.run(["gws"] + sys.argv[1:], env=env)
sys.exit(result.returncode)
if __name__ == "__main__":
main()

View file

@ -23,6 +23,7 @@ Agent workflow:
import argparse
import json
import os
import subprocess
import sys
from pathlib import Path
@ -128,7 +129,11 @@ def check_auth():
from google.auth.transport.requests import Request
try:
creds = Credentials.from_authorized_user_file(str(TOKEN_PATH), SCOPES)
# Don't pass scopes — user may have authorized only a subset.
# Passing scopes forces google-auth to validate them on refresh,
# which fails with invalid_scope if the token has fewer scopes
# than requested.
creds = Credentials.from_authorized_user_file(str(TOKEN_PATH))
except Exception as e:
print(f"TOKEN_CORRUPT: {e}")
return False
@ -137,8 +142,9 @@ def check_auth():
if creds.valid:
missing_scopes = _missing_scopes_from_payload(payload)
if missing_scopes:
print(f"AUTH_SCOPE_MISMATCH: {_format_missing_scopes(missing_scopes)}")
return False
print(f"AUTHENTICATED (partial): Token valid but missing {len(missing_scopes)} scopes:")
for s in missing_scopes:
print(f" - {s}")
print(f"AUTHENTICATED: Token valid at {TOKEN_PATH}")
return True
@ -148,8 +154,9 @@ def check_auth():
TOKEN_PATH.write_text(creds.to_json())
missing_scopes = _missing_scopes_from_payload(_load_token_payload(TOKEN_PATH))
if missing_scopes:
print(f"AUTH_SCOPE_MISMATCH: {_format_missing_scopes(missing_scopes)}")
return False
print(f"AUTHENTICATED (partial): Token refreshed but missing {len(missing_scopes)} scopes:")
for s in missing_scopes:
print(f" - {s}")
print(f"AUTHENTICATED: Token refreshed at {TOKEN_PATH}")
return True
except Exception as e:
@ -272,16 +279,33 @@ def exchange_auth_code(code: str):
_ensure_deps()
from google_auth_oauthlib.flow import Flow
from urllib.parse import parse_qs, urlparse
# Extract granted scopes from the callback URL if present
if returned_state and "scope" in parse_qs(urlparse(code).query if isinstance(code, str) and code.startswith("http") else {}):
granted_scopes = parse_qs(urlparse(code).query)["scope"][0].split()
else:
# Try to extract from code_or_url parameter
if isinstance(code, str) and code.startswith("http"):
params = parse_qs(urlparse(code).query)
if "scope" in params:
granted_scopes = params["scope"][0].split()
else:
granted_scopes = SCOPES
else:
granted_scopes = SCOPES
flow = Flow.from_client_secrets_file(
str(CLIENT_SECRET_PATH),
scopes=SCOPES,
scopes=granted_scopes,
redirect_uri=pending_auth.get("redirect_uri", REDIRECT_URI),
state=pending_auth["state"],
code_verifier=pending_auth["code_verifier"],
)
try:
# Accept partial scopes — user may deselect some permissions in the consent screen
os.environ["OAUTHLIB_RELAX_TOKEN_SCOPE"] = "1"
flow.fetch_token(code=code)
except Exception as e:
print(f"ERROR: Token exchange failed: {e}")
@ -290,11 +314,21 @@ def exchange_auth_code(code: str):
creds = flow.credentials
token_payload = json.loads(creds.to_json())
# Store only the scopes actually granted by the user, not what was requested.
# creds.to_json() writes the requested scopes, which causes refresh to fail
# with invalid_scope if the user only authorized a subset.
actually_granted = list(creds.granted_scopes or []) if hasattr(creds, "granted_scopes") and creds.granted_scopes else []
if actually_granted:
token_payload["scopes"] = actually_granted
elif granted_scopes != SCOPES:
# granted_scopes was extracted from the callback URL
token_payload["scopes"] = granted_scopes
missing_scopes = _missing_scopes_from_payload(token_payload)
if missing_scopes:
print(f"ERROR: Refusing to save incomplete Google Workspace token. {_format_missing_scopes(missing_scopes)}")
print(f"Existing token at {TOKEN_PATH} was left unchanged.")
sys.exit(1)
print(f"WARNING: Token missing some Google Workspace scopes: {', '.join(missing_scopes)}")
print("Some services may not be available.")
TOKEN_PATH.write_text(json.dumps(token_payload, indent=2))
PENDING_AUTH_PATH.unlink(missing_ok=True)

View file

@ -480,6 +480,39 @@ class TestClassifyApiError:
result = classify_api_error(e)
assert result.reason == FailoverReason.context_overflow
# ── Message-only usage limit disambiguation (no status code) ──
def test_message_usage_limit_transient_is_rate_limit(self):
"""'usage limit' + 'try again' with no status code → rate_limit, not billing."""
e = Exception("usage limit exceeded, try again in 5 minutes")
result = classify_api_error(e)
assert result.reason == FailoverReason.rate_limit
assert result.retryable is True
assert result.should_rotate_credential is True
assert result.should_fallback is True
def test_message_usage_limit_no_retry_signal_is_billing(self):
"""'usage limit' with no transient signal and no status code → billing."""
e = Exception("usage limit reached")
result = classify_api_error(e)
assert result.reason == FailoverReason.billing
assert result.retryable is False
assert result.should_rotate_credential is True
def test_message_quota_with_reset_window_is_rate_limit(self):
"""'quota' + 'resets at' with no status code → rate_limit."""
e = Exception("quota exceeded, resets at midnight UTC")
result = classify_api_error(e)
assert result.reason == FailoverReason.rate_limit
assert result.retryable is True
def test_message_limit_exceeded_with_wait_is_rate_limit(self):
"""'limit exceeded' + 'wait' with no status code → rate_limit."""
e = Exception("key limit exceeded, please wait before retrying")
result = classify_api_error(e)
assert result.reason == FailoverReason.rate_limit
assert result.retryable is True
# ── Unknown / fallback ──
def test_generic_exception_is_unknown(self):
@ -507,6 +540,38 @@ class TestClassifyApiError:
assert result.reason == FailoverReason.format_error
assert result.retryable is False
def test_400_flat_body_descriptive_not_context_overflow(self):
"""Responses API flat body with descriptive error + large session → format error.
The Codex Responses API returns errors in flat body format:
{"message": "...", "type": "..."} without an "error" wrapper.
A descriptive 400 must NOT be misclassified as context overflow
just because the session is large.
"""
e = MockAPIError(
"Invalid 'input[index].name': string does not match pattern.",
status_code=400,
body={"message": "Invalid 'input[index].name': string does not match pattern.",
"type": "invalid_request_error"},
)
result = classify_api_error(e, approx_tokens=200000, context_length=400000, num_messages=500)
assert result.reason == FailoverReason.format_error
assert result.retryable is False
def test_400_flat_body_generic_large_session_still_context_overflow(self):
"""Flat body with generic 'Error' message + large session → context overflow.
Regression: the flat-body fallback must not break the existing heuristic
for genuinely generic errors from providers that use flat bodies.
"""
e = MockAPIError(
"Error",
status_code=400,
body={"message": "Error"},
)
result = classify_api_error(e, approx_tokens=100000, context_length=200000)
assert result.reason == FailoverReason.context_overflow
# ── Peer closed + large session ──
def test_peer_closed_large_session(self):

View file

@ -6,6 +6,17 @@ from unittest.mock import patch
from cli import HermesCLI
def _assert_chrome_debug_cmd(cmd, expected_chrome, expected_port):
"""Verify the auto-launch command has all required flags."""
assert cmd[0] == expected_chrome
assert f"--remote-debugging-port={expected_port}" in cmd
assert "--no-first-run" in cmd
assert "--no-default-browser-check" in cmd
user_data_args = [a for a in cmd if a.startswith("--user-data-dir=")]
assert len(user_data_args) == 1, "Expected exactly one --user-data-dir flag"
assert "chrome-debug" in user_data_args[0]
class TestChromeDebugLaunch:
def test_windows_launch_uses_browser_found_on_path(self):
captured = {}
@ -20,7 +31,7 @@ class TestChromeDebugLaunch:
patch("subprocess.Popen", side_effect=fake_popen):
assert HermesCLI._try_launch_chrome_debug(9333, "Windows") is True
assert captured["cmd"] == [r"C:\Chrome\chrome.exe", "--remote-debugging-port=9333"]
_assert_chrome_debug_cmd(captured["cmd"], r"C:\Chrome\chrome.exe", 9333)
assert captured["kwargs"]["start_new_session"] is True
def test_windows_launch_falls_back_to_common_install_dirs(self, monkeypatch):
@ -43,4 +54,4 @@ class TestChromeDebugLaunch:
patch("subprocess.Popen", side_effect=fake_popen):
assert HermesCLI._try_launch_chrome_debug(9222, "Windows") is True
assert captured["cmd"] == [installed, "--remote-debugging-port=9222"]
_assert_chrome_debug_cmd(captured["cmd"], installed, 9222)

View file

@ -147,6 +147,20 @@ class TestEscapedSpaces:
assert result["path"] == tmp_image_with_spaces
assert result["remainder"] == "what is this?"
def test_tilde_prefixed_path(self, tmp_path, monkeypatch):
home = tmp_path / "home"
img = home / "storage" / "shared" / "Pictures" / "cat.png"
img.parent.mkdir(parents=True, exist_ok=True)
img.write_bytes(b"\x89PNG\r\n\x1a\n")
monkeypatch.setenv("HOME", str(home))
result = _detect_file_drop("~/storage/shared/Pictures/cat.png what is this?")
assert result is not None
assert result["path"] == img
assert result["is_image"] is True
assert result["remainder"] == "what is this?"
# ---------------------------------------------------------------------------
# Tests: edge cases

View file

@ -0,0 +1,109 @@
from pathlib import Path
from unittest.mock import patch
from cli import (
HermesCLI,
_collect_query_images,
_format_image_attachment_badges,
_termux_example_image_path,
)
def _make_cli():
cli_obj = HermesCLI.__new__(HermesCLI)
cli_obj._attached_images = []
return cli_obj
def _make_image(path: Path) -> Path:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(b"\x89PNG\r\n\x1a\n")
return path
class TestImageCommand:
def test_handle_image_command_attaches_local_image(self, tmp_path):
img = _make_image(tmp_path / "photo.png")
cli_obj = _make_cli()
with patch("cli._cprint"):
cli_obj._handle_image_command(f"/image {img}")
assert cli_obj._attached_images == [img]
def test_handle_image_command_supports_quoted_path_with_spaces(self, tmp_path):
img = _make_image(tmp_path / "my photo.png")
cli_obj = _make_cli()
with patch("cli._cprint"):
cli_obj._handle_image_command(f'/image "{img}"')
assert cli_obj._attached_images == [img]
def test_handle_image_command_rejects_non_image_file(self, tmp_path):
file_path = tmp_path / "notes.txt"
file_path.write_text("hello\n", encoding="utf-8")
cli_obj = _make_cli()
with patch("cli._cprint") as mock_print:
cli_obj._handle_image_command(f"/image {file_path}")
assert cli_obj._attached_images == []
rendered = " ".join(str(arg) for call in mock_print.call_args_list for arg in call.args)
assert "Not a supported image file" in rendered
class TestCollectQueryImages:
def test_collect_query_images_accepts_explicit_image_arg(self, tmp_path):
img = _make_image(tmp_path / "diagram.png")
message, images = _collect_query_images("describe this", str(img))
assert message == "describe this"
assert images == [img]
def test_collect_query_images_extracts_leading_path(self, tmp_path):
img = _make_image(tmp_path / "camera.png")
message, images = _collect_query_images(f"{img} what do you see?")
assert message == "what do you see?"
assert images == [img]
def test_collect_query_images_supports_tilde_paths(self, tmp_path, monkeypatch):
home = tmp_path / "home"
img = _make_image(home / "storage" / "shared" / "Pictures" / "cat.png")
monkeypatch.setenv("HOME", str(home))
message, images = _collect_query_images("describe this", "~/storage/shared/Pictures/cat.png")
assert message == "describe this"
assert images == [img]
class TestTermuxImageHints:
def test_termux_example_image_path_prefers_real_shared_storage_root(self, monkeypatch):
existing = {"/sdcard", "/storage/emulated/0"}
monkeypatch.setattr("cli.os.path.isdir", lambda path: path in existing)
hint = _termux_example_image_path()
assert hint == "/sdcard/Pictures/cat.png"
class TestImageBadgeFormatting:
def test_compact_badges_use_filename_on_narrow_terminals(self, tmp_path):
img = _make_image(tmp_path / "Screenshot 2026-04-09 at 11.22.33 AM.png")
badges = _format_image_attachment_badges([img], image_counter=1, width=40)
assert badges.startswith("[📎 ")
assert "Image #1" not in badges
def test_compact_badges_summarize_multiple_images(self, tmp_path):
img1 = _make_image(tmp_path / "one.png")
img2 = _make_image(tmp_path / "two.png")
badges = _format_image_attachment_badges([img1, img2], image_counter=2, width=45)
assert badges == "[📎 2 images attached]"

View file

@ -49,6 +49,25 @@ class TestCliSkinPromptIntegration:
set_active_skin("ares")
assert cli._get_tui_prompt_fragments() == [("class:sudo-prompt", "🔑 ")]
def test_narrow_terminals_compact_voice_prompt_fragments(self):
cli = _make_cli_stub()
cli._voice_mode = True
with patch.object(HermesCLI, "_get_tui_terminal_width", return_value=50):
assert cli._get_tui_prompt_fragments() == [("class:voice-prompt", "🎤 ")]
def test_narrow_terminals_compact_voice_recording_prompt_fragments(self):
cli = _make_cli_stub()
cli._voice_recording = True
cli._voice_recorder = SimpleNamespace(current_rms=3000)
with patch.object(HermesCLI, "_get_tui_terminal_width", return_value=50):
frags = cli._get_tui_prompt_fragments()
assert frags[0][0] == "class:voice-recording"
assert frags[0][1].startswith("")
assert "" not in frags[0][1]
def test_icon_only_skin_symbol_still_visible_in_special_states(self):
cli = _make_cli_stub()
cli._secret_state = {"response_queue": object()}

View file

@ -41,6 +41,7 @@ def _attach_agent(
session_completion_tokens=completion_tokens,
session_total_tokens=total_tokens,
session_api_calls=api_calls,
get_rate_limit_state=lambda: None,
context_compressor=SimpleNamespace(
last_prompt_tokens=context_tokens,
context_length=context_length,
@ -205,6 +206,59 @@ class TestCLIStatusBar:
assert "" in text
assert "claude-sonnet-4-20250514" in text
def test_minimal_tui_chrome_threshold(self):
cli_obj = _make_cli()
assert cli_obj._use_minimal_tui_chrome(width=63) is True
assert cli_obj._use_minimal_tui_chrome(width=64) is False
def test_bottom_input_rule_hides_on_narrow_terminals(self):
cli_obj = _make_cli()
assert cli_obj._tui_input_rule_height("top", width=50) == 1
assert cli_obj._tui_input_rule_height("bottom", width=50) == 0
assert cli_obj._tui_input_rule_height("bottom", width=90) == 1
def test_agent_spacer_reclaimed_on_narrow_terminals(self):
cli_obj = _make_cli()
cli_obj._agent_running = True
assert cli_obj._agent_spacer_height(width=50) == 0
assert cli_obj._agent_spacer_height(width=90) == 1
cli_obj._agent_running = False
assert cli_obj._agent_spacer_height(width=90) == 0
def test_spinner_line_hidden_on_narrow_terminals(self):
cli_obj = _make_cli()
cli_obj._spinner_text = "thinking"
assert cli_obj._spinner_widget_height(width=50) == 0
assert cli_obj._spinner_widget_height(width=90) == 1
cli_obj._spinner_text = ""
assert cli_obj._spinner_widget_height(width=90) == 0
def test_voice_status_bar_compacts_on_narrow_terminals(self):
cli_obj = _make_cli()
cli_obj._voice_mode = True
cli_obj._voice_recording = False
cli_obj._voice_processing = False
cli_obj._voice_tts = True
cli_obj._voice_continuous = True
fragments = cli_obj._get_voice_status_fragments(width=50)
assert fragments == [("class:voice-status", " 🎤 Ctrl+B ")]
def test_voice_recording_status_bar_compacts_on_narrow_terminals(self):
cli_obj = _make_cli()
cli_obj._voice_mode = True
cli_obj._voice_recording = True
cli_obj._voice_processing = False
fragments = cli_obj._get_voice_status_fragments(width=50)
assert fragments == [("class:voice-status-recording", " ● REC ")]
class TestCLIUsageReport:
def test_show_usage_includes_estimated_cost(self, capsys):

View file

@ -38,6 +38,8 @@ def _isolate_hermes_home(tmp_path, monkeypatch):
monkeypatch.delenv("HERMES_SESSION_CHAT_ID", raising=False)
monkeypatch.delenv("HERMES_SESSION_CHAT_NAME", raising=False)
monkeypatch.delenv("HERMES_GATEWAY_SESSION", raising=False)
# Avoid making real calls during tests if this key is set in the env files
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
@pytest.fixture()

View file

@ -56,7 +56,7 @@ class FakeTree:
class FakeBot:
def __init__(self, *, intents):
def __init__(self, *, intents, proxy=None):
self.intents = intents
self.user = SimpleNamespace(id=999, name="Hermes")
self._events = {}
@ -95,7 +95,7 @@ async def test_connect_only_requests_members_intent_when_needed(monkeypatch, all
created = {}
def fake_bot_factory(*, command_prefix, intents):
def fake_bot_factory(*, command_prefix, intents, proxy=None):
created["bot"] = FakeBot(intents=intents)
return created["bot"]
@ -124,7 +124,7 @@ async def test_connect_releases_token_lock_on_timeout(monkeypatch):
monkeypatch.setattr(
discord_platform.commands,
"Bot",
lambda **kwargs: FakeBot(intents=kwargs["intents"]),
lambda **kwargs: FakeBot(intents=kwargs["intents"], proxy=kwargs.get("proxy")),
)
async def fake_wait_for(awaitable, timeout):

View file

@ -38,10 +38,11 @@ def _make_timeout_error() -> httpx.TimeoutException:
# cache_image_from_url (base.py)
# ---------------------------------------------------------------------------
@patch("tools.url_safety.is_safe_url", return_value=True)
class TestCacheImageFromUrl:
"""Tests for gateway.platforms.base.cache_image_from_url"""
def test_success_on_first_attempt(self, tmp_path, monkeypatch):
def test_success_on_first_attempt(self, _mock_safe, tmp_path, monkeypatch):
"""A clean 200 response caches the image and returns a path."""
monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img")
@ -65,7 +66,7 @@ class TestCacheImageFromUrl:
assert path.endswith(".jpg")
mock_client.get.assert_called_once()
def test_retries_on_timeout_then_succeeds(self, tmp_path, monkeypatch):
def test_retries_on_timeout_then_succeeds(self, _mock_safe, tmp_path, monkeypatch):
"""A timeout on the first attempt is retried; second attempt succeeds."""
monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img")
@ -95,7 +96,7 @@ class TestCacheImageFromUrl:
assert mock_client.get.call_count == 2
mock_sleep.assert_called_once()
def test_retries_on_429_then_succeeds(self, tmp_path, monkeypatch):
def test_retries_on_429_then_succeeds(self, _mock_safe, tmp_path, monkeypatch):
"""A 429 response on the first attempt is retried; second attempt succeeds."""
monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img")
@ -122,7 +123,7 @@ class TestCacheImageFromUrl:
assert path.endswith(".jpg")
assert mock_client.get.call_count == 2
def test_raises_after_max_retries_exhausted(self, tmp_path, monkeypatch):
def test_raises_after_max_retries_exhausted(self, _mock_safe, tmp_path, monkeypatch):
"""Timeout on every attempt raises after all retries are consumed."""
monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img")
@ -145,7 +146,7 @@ class TestCacheImageFromUrl:
# 3 total calls: initial + 2 retries
assert mock_client.get.call_count == 3
def test_non_retryable_4xx_raises_immediately(self, tmp_path, monkeypatch):
def test_non_retryable_4xx_raises_immediately(self, _mock_safe, tmp_path, monkeypatch):
"""A 404 (non-retryable) is raised immediately without any retry."""
monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img")
@ -175,10 +176,11 @@ class TestCacheImageFromUrl:
# cache_audio_from_url (base.py)
# ---------------------------------------------------------------------------
@patch("tools.url_safety.is_safe_url", return_value=True)
class TestCacheAudioFromUrl:
"""Tests for gateway.platforms.base.cache_audio_from_url"""
def test_success_on_first_attempt(self, tmp_path, monkeypatch):
def test_success_on_first_attempt(self, _mock_safe, tmp_path, monkeypatch):
"""A clean 200 response caches the audio and returns a path."""
monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio")
@ -202,7 +204,7 @@ class TestCacheAudioFromUrl:
assert path.endswith(".ogg")
mock_client.get.assert_called_once()
def test_retries_on_timeout_then_succeeds(self, tmp_path, monkeypatch):
def test_retries_on_timeout_then_succeeds(self, _mock_safe, tmp_path, monkeypatch):
"""A timeout on the first attempt is retried; second attempt succeeds."""
monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio")
@ -232,7 +234,7 @@ class TestCacheAudioFromUrl:
assert mock_client.get.call_count == 2
mock_sleep.assert_called_once()
def test_retries_on_429_then_succeeds(self, tmp_path, monkeypatch):
def test_retries_on_429_then_succeeds(self, _mock_safe, tmp_path, monkeypatch):
"""A 429 response on the first attempt is retried; second attempt succeeds."""
monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio")
@ -259,7 +261,7 @@ class TestCacheAudioFromUrl:
assert path.endswith(".ogg")
assert mock_client.get.call_count == 2
def test_retries_on_500_then_succeeds(self, tmp_path, monkeypatch):
def test_retries_on_500_then_succeeds(self, _mock_safe, tmp_path, monkeypatch):
"""A 500 response on the first attempt is retried; second attempt succeeds."""
monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio")
@ -286,7 +288,7 @@ class TestCacheAudioFromUrl:
assert path.endswith(".ogg")
assert mock_client.get.call_count == 2
def test_raises_after_max_retries_exhausted(self, tmp_path, monkeypatch):
def test_raises_after_max_retries_exhausted(self, _mock_safe, tmp_path, monkeypatch):
"""Timeout on every attempt raises after all retries are consumed."""
monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio")
@ -309,7 +311,7 @@ class TestCacheAudioFromUrl:
# 3 total calls: initial + 2 retries
assert mock_client.get.call_count == 3
def test_non_retryable_4xx_raises_immediately(self, tmp_path, monkeypatch):
def test_non_retryable_4xx_raises_immediately(self, _mock_safe, tmp_path, monkeypatch):
"""A 404 (non-retryable) is raised immediately without any retry."""
monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio")

View file

@ -619,6 +619,18 @@ class TestFormatMessage:
result = adapter.format_message("[click here](https://example.com)")
assert result == "<https://example.com|click here>"
def test_link_conversion_strips_markdown_angle_brackets(self, adapter):
result = adapter.format_message("[click here](<https://example.com>)")
assert result == "<https://example.com|click here>"
def test_escapes_control_characters(self, adapter):
result = adapter.format_message("AT&T < 5 > 3")
assert result == "AT&amp;T &lt; 5 &gt; 3"
def test_preserves_existing_slack_entities(self, adapter):
text = "Hey <@U123>, see <https://example.com|example> and <!here>"
assert adapter.format_message(text) == text
def test_strikethrough(self, adapter):
assert adapter.format_message("~~deleted~~") == "~deleted~"
@ -643,6 +655,325 @@ class TestFormatMessage:
def test_none_passthrough(self, adapter):
assert adapter.format_message(None) is None
def test_blockquote_preserved(self, adapter):
"""Single-line blockquote > marker is preserved."""
assert adapter.format_message("> quoted text") == "> quoted text"
def test_multiline_blockquote(self, adapter):
"""Multi-line blockquote preserves > on each line."""
text = "> line one\n> line two"
assert adapter.format_message(text) == "> line one\n> line two"
def test_blockquote_with_formatting(self, adapter):
"""Blockquote containing bold text."""
assert adapter.format_message("> **bold quote**") == "> *bold quote*"
def test_nested_blockquote(self, adapter):
"""Multiple > characters for nested quotes."""
assert adapter.format_message(">> deeply quoted") == ">> deeply quoted"
def test_blockquote_mixed_with_plain(self, adapter):
"""Blockquote lines interleaved with plain text."""
text = "normal\n> quoted\nnormal again"
result = adapter.format_message(text)
assert "> quoted" in result
assert "normal" in result
def test_non_prefix_gt_still_escaped(self, adapter):
"""Greater-than in mid-line is still escaped."""
assert adapter.format_message("5 > 3") == "5 &gt; 3"
def test_blockquote_with_code(self, adapter):
"""Blockquote containing inline code."""
result = adapter.format_message("> use `fmt.Println`")
assert result.startswith(">")
assert "`fmt.Println`" in result
def test_bold_italic_combined(self, adapter):
"""Triple-star ***text*** converts to Slack bold+italic *_text_*."""
assert adapter.format_message("***hello***") == "*_hello_*"
def test_bold_italic_with_surrounding_text(self, adapter):
"""Bold+italic in a sentence."""
result = adapter.format_message("This is ***important*** stuff")
assert "*_important_*" in result
def test_bold_italic_does_not_break_plain_bold(self, adapter):
"""**bold** still works after adding ***bold italic*** support."""
assert adapter.format_message("**bold**") == "*bold*"
def test_bold_italic_does_not_break_plain_italic(self, adapter):
"""*italic* still works after adding ***bold italic*** support."""
assert adapter.format_message("*italic*") == "_italic_"
def test_bold_italic_mixed_with_bold(self, adapter):
"""Both ***bold italic*** and **bold** in the same message."""
result = adapter.format_message("***important*** and **bold**")
assert "*_important_*" in result
assert "*bold*" in result
def test_pre_escaped_ampersand_not_double_escaped(self, adapter):
"""Already-escaped &amp; must not become &amp;amp;."""
assert adapter.format_message("&amp;") == "&amp;"
def test_pre_escaped_lt_not_double_escaped(self, adapter):
"""Already-escaped &lt; must not become &amp;lt;."""
assert adapter.format_message("&lt;") == "&lt;"
def test_pre_escaped_gt_not_double_escaped(self, adapter):
"""Already-escaped &gt; in plain text must not become &amp;gt;."""
assert adapter.format_message("5 &gt; 3") == "5 &gt; 3"
def test_mixed_raw_and_escaped_entities(self, adapter):
"""Raw & and pre-escaped &amp; coexist correctly."""
result = adapter.format_message("AT&T and &amp; entity")
assert result == "AT&amp;T and &amp; entity"
def test_link_with_parentheses_in_url(self, adapter):
"""Wikipedia-style URL with balanced parens is not truncated."""
result = adapter.format_message("[Foo](https://en.wikipedia.org/wiki/Foo_(bar))")
assert result == "<https://en.wikipedia.org/wiki/Foo_(bar)|Foo>"
def test_link_with_multiple_paren_pairs(self, adapter):
"""URL with multiple balanced paren pairs."""
result = adapter.format_message("[text](https://example.com/a_(b)_c_(d))")
assert result == "<https://example.com/a_(b)_c_(d)|text>"
def test_link_without_parens_still_works(self, adapter):
"""Normal URL without parens is unaffected by regex change."""
result = adapter.format_message("[click](https://example.com/path?q=1)")
assert result == "<https://example.com/path?q=1|click>"
def test_link_with_angle_brackets_and_parens(self, adapter):
"""Angle-bracket URL with parens (CommonMark syntax)."""
result = adapter.format_message("[Foo](<https://en.wikipedia.org/wiki/Foo_(bar)>)")
assert result == "<https://en.wikipedia.org/wiki/Foo_(bar)|Foo>"
def test_escaping_is_idempotent(self, adapter):
"""Formatting already-formatted text produces the same result."""
original = "AT&T < 5 > 3"
once = adapter.format_message(original)
twice = adapter.format_message(once)
assert once == twice
# --- Entity preservation (spec-compliance) ---
def test_channel_mention_preserved(self, adapter):
"""<!channel> special mention passes through unchanged."""
assert adapter.format_message("Attention <!channel>") == "Attention <!channel>"
def test_everyone_mention_preserved(self, adapter):
"""<!everyone> special mention passes through unchanged."""
assert adapter.format_message("Hey <!everyone>") == "Hey <!everyone>"
def test_subteam_mention_preserved(self, adapter):
"""<!subteam^ID> user group mention passes through unchanged."""
assert adapter.format_message("Paging <!subteam^S12345>") == "Paging <!subteam^S12345>"
def test_date_formatting_preserved(self, adapter):
"""<!date^...> formatting token passes through unchanged."""
text = "Posted <!date^1392734382^{date_pretty}|Feb 18, 2014>"
assert adapter.format_message(text) == text
def test_channel_link_preserved(self, adapter):
"""<#CHANNEL_ID> channel link passes through unchanged."""
assert adapter.format_message("Join <#C12345>") == "Join <#C12345>"
# --- Additional edge cases ---
def test_message_only_code_block(self, adapter):
"""Entire message is a fenced code block — no conversion."""
code = "```python\nx = 1\n```"
assert adapter.format_message(code) == code
def test_multiline_mixed_formatting(self, adapter):
"""Multi-line message with headers, bold, links, code, and blockquotes."""
text = "## Title\n**bold** and [link](https://x.com)\n> quote\n`code`"
result = adapter.format_message(text)
assert result.startswith("*Title*")
assert "*bold*" in result
assert "<https://x.com|link>" in result
assert "> quote" in result
assert "`code`" in result
def test_markdown_unordered_list_with_asterisk(self, adapter):
"""Asterisk list items must not trigger italic conversion."""
text = "* item one\n* item two"
result = adapter.format_message(text)
assert "item one" in result
assert "item two" in result
def test_nested_bold_in_link(self, adapter):
"""Bold inside link label — label is stashed before bold pass."""
result = adapter.format_message("[**bold**](https://example.com)")
assert "https://example.com" in result
assert "bold" in result
def test_url_with_query_string_and_ampersand(self, adapter):
"""Ampersand in URL query string must not be escaped."""
result = adapter.format_message("[link](https://x.com?a=1&b=2)")
assert result == "<https://x.com?a=1&b=2|link>"
def test_emoji_shortcodes_passthrough(self, adapter):
"""Emoji shortcodes like :smile: pass through unchanged."""
assert adapter.format_message(":smile: hello :wave:") == ":smile: hello :wave:"
# ---------------------------------------------------------------------------
# TestEditMessage
# ---------------------------------------------------------------------------
class TestEditMessage:
"""Verify that edit_message() applies mrkdwn formatting before sending."""
@pytest.mark.asyncio
async def test_edit_message_formats_bold(self, adapter):
"""edit_message converts **bold** to Slack *bold*."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
await adapter.edit_message("C123", "1234.5678", "**hello world**")
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert kwargs["text"] == "*hello world*"
@pytest.mark.asyncio
async def test_edit_message_formats_links(self, adapter):
"""edit_message converts markdown links to Slack format."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
await adapter.edit_message("C123", "1234.5678", "[click](https://example.com)")
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert kwargs["text"] == "<https://example.com|click>"
@pytest.mark.asyncio
async def test_edit_message_preserves_blockquotes(self, adapter):
"""edit_message preserves blockquote > markers."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
await adapter.edit_message("C123", "1234.5678", "> quoted text")
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert kwargs["text"] == "> quoted text"
@pytest.mark.asyncio
async def test_edit_message_escapes_control_chars(self, adapter):
"""edit_message escapes & < > in plain text."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
await adapter.edit_message("C123", "1234.5678", "AT&T < 5 > 3")
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert kwargs["text"] == "AT&amp;T &lt; 5 &gt; 3"
# ---------------------------------------------------------------------------
# TestEditMessageStreamingPipeline
# ---------------------------------------------------------------------------
class TestEditMessageStreamingPipeline:
"""E2E: verify that sequential streaming edits all go through format_message.
Simulates the GatewayStreamConsumer pattern where edit_message is called
repeatedly with progressively longer accumulated text. Every call must
produce properly formatted mrkdwn in the chat_update payload.
"""
@pytest.mark.asyncio
async def test_edit_message_formats_streaming_updates(self, adapter):
"""Simulates streaming: multiple edits, each should be formatted."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
# First streaming update — bold
result1 = await adapter.edit_message("C123", "ts1", "**Processing**...")
assert result1.success is True
kwargs1 = adapter._app.client.chat_update.call_args.kwargs
assert kwargs1["text"] == "*Processing*..."
# Second streaming update — bold + link
result2 = await adapter.edit_message(
"C123", "ts1", "**Done!** See [results](https://example.com)"
)
assert result2.success is True
kwargs2 = adapter._app.client.chat_update.call_args.kwargs
assert kwargs2["text"] == "*Done!* See <https://example.com|results>"
@pytest.mark.asyncio
async def test_edit_message_formats_code_and_bold(self, adapter):
"""Streaming update with code block and bold — code must be preserved."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
content = "**Result:**\n```python\nprint('hello')\n```"
result = await adapter.edit_message("C123", "ts1", content)
assert result.success is True
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert kwargs["text"].startswith("*Result:*")
assert "```python\nprint('hello')\n```" in kwargs["text"]
@pytest.mark.asyncio
async def test_edit_message_formats_blockquote_in_stream(self, adapter):
"""Streaming update with blockquote — '>' marker must survive."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
content = "> **Important:** do this\nnormal line"
result = await adapter.edit_message("C123", "ts1", content)
assert result.success is True
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert kwargs["text"].startswith("> *Important:*")
assert "normal line" in kwargs["text"]
@pytest.mark.asyncio
async def test_edit_message_formats_progressive_accumulation(self, adapter):
"""Simulate real streaming: text grows with each edit, all formatted."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
updates = [
("**Step 1**", "*Step 1*"),
("**Step 1**\n**Step 2**", "*Step 1*\n*Step 2*"),
(
"**Step 1**\n**Step 2**\nSee [docs](https://docs.example.com)",
"*Step 1*\n*Step 2*\nSee <https://docs.example.com|docs>",
),
]
for raw, expected in updates:
result = await adapter.edit_message("C123", "ts1", raw)
assert result.success is True
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert kwargs["text"] == expected, f"Failed for input: {raw!r}"
# Total edit count should match number of updates
assert adapter._app.client.chat_update.call_count == len(updates)
@pytest.mark.asyncio
async def test_edit_message_formats_bold_italic(self, adapter):
"""Bold+italic ***text*** is formatted as *_text_* in edited messages."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
await adapter.edit_message("C123", "ts1", "***important*** update")
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert "*_important_*" in kwargs["text"]
@pytest.mark.asyncio
async def test_edit_message_does_not_double_escape(self, adapter):
"""Pre-escaped entities in edited messages must not get double-escaped."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
await adapter.edit_message("C123", "ts1", "5 &gt; 3 and &amp; entity")
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert "&amp;gt;" not in kwargs["text"]
assert "&amp;amp;" not in kwargs["text"]
assert "&gt;" in kwargs["text"]
assert "&amp;" in kwargs["text"]
@pytest.mark.asyncio
async def test_edit_message_formats_url_with_parens(self, adapter):
"""Wikipedia-style URL with parens survives edit pipeline."""
adapter._app.client.chat_update = AsyncMock(return_value={"ok": True})
await adapter.edit_message("C123", "ts1", "See [Foo](https://en.wikipedia.org/wiki/Foo_(bar))")
kwargs = adapter._app.client.chat_update.call_args.kwargs
assert "<https://en.wikipedia.org/wiki/Foo_(bar)|Foo>" in kwargs["text"]
@pytest.mark.asyncio
async def test_edit_message_not_connected(self, adapter):
"""edit_message returns failure when adapter is not connected."""
adapter._app = None
result = await adapter.edit_message("C123", "ts1", "**hello**")
assert result.success is False
assert "Not connected" in result.error
# ---------------------------------------------------------------------------
# TestReactions
@ -1085,6 +1416,48 @@ class TestMessageSplitting:
await adapter.send("C123", "hello world")
assert adapter._app.client.chat_postMessage.call_count == 1
@pytest.mark.asyncio
async def test_send_preserves_blockquote_formatting(self, adapter):
"""Blockquote '>' markers must survive format → chunk → send pipeline."""
adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"})
await adapter.send("C123", "> quoted text\nnormal text")
kwargs = adapter._app.client.chat_postMessage.call_args.kwargs
sent_text = kwargs["text"]
assert sent_text.startswith("> quoted text")
assert "normal text" in sent_text
@pytest.mark.asyncio
async def test_send_formats_bold_italic(self, adapter):
"""Bold+italic ***text*** is formatted as *_text_* in sent messages."""
adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"})
await adapter.send("C123", "***important*** update")
kwargs = adapter._app.client.chat_postMessage.call_args.kwargs
assert "*_important_*" in kwargs["text"]
@pytest.mark.asyncio
async def test_send_explicitly_enables_mrkdwn(self, adapter):
adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"})
await adapter.send("C123", "**hello**")
kwargs = adapter._app.client.chat_postMessage.call_args.kwargs
assert kwargs.get("mrkdwn") is True
@pytest.mark.asyncio
async def test_send_does_not_double_escape_entities(self, adapter):
"""Pre-escaped &amp; in sent messages must not become &amp;amp;."""
adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"})
await adapter.send("C123", "Use &amp; for ampersand")
kwargs = adapter._app.client.chat_postMessage.call_args.kwargs
assert "&amp;amp;" not in kwargs["text"]
assert "&amp;" in kwargs["text"]
@pytest.mark.asyncio
async def test_send_formats_url_with_parens(self, adapter):
"""Wikipedia-style URL with parens survives send pipeline."""
adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"})
await adapter.send("C123", "See [Foo](https://en.wikipedia.org/wiki/Foo_(bar))")
kwargs = adapter._app.client.chat_postMessage.call_args.kwargs
assert "<https://en.wikipedia.org/wiki/Foo_(bar)|Foo>" in kwargs["text"]
# ---------------------------------------------------------------------------
# TestReplyBroadcast

View file

@ -0,0 +1,312 @@
"""
Tests for Slack mention gating (require_mention / free_response_channels).
Follows the same pattern as test_whatsapp_group_gating.py.
"""
import sys
from unittest.mock import MagicMock
from gateway.config import Platform, PlatformConfig
# ---------------------------------------------------------------------------
# Mock slack-bolt if not installed (same as test_slack.py)
# ---------------------------------------------------------------------------
def _ensure_slack_mock():
if "slack_bolt" in sys.modules and hasattr(sys.modules["slack_bolt"], "__file__"):
return
slack_bolt = MagicMock()
slack_bolt.async_app.AsyncApp = MagicMock
slack_bolt.adapter.socket_mode.async_handler.AsyncSocketModeHandler = MagicMock
slack_sdk = MagicMock()
slack_sdk.web.async_client.AsyncWebClient = MagicMock
for name, mod in [
("slack_bolt", slack_bolt),
("slack_bolt.async_app", slack_bolt.async_app),
("slack_bolt.adapter", slack_bolt.adapter),
("slack_bolt.adapter.socket_mode", slack_bolt.adapter.socket_mode),
("slack_bolt.adapter.socket_mode.async_handler", slack_bolt.adapter.socket_mode.async_handler),
("slack_sdk", slack_sdk),
("slack_sdk.web", slack_sdk.web),
("slack_sdk.web.async_client", slack_sdk.web.async_client),
]:
sys.modules.setdefault(name, mod)
_ensure_slack_mock()
import gateway.platforms.slack as _slack_mod
_slack_mod.SLACK_AVAILABLE = True
from gateway.platforms.slack import SlackAdapter # noqa: E402
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
BOT_USER_ID = "U_BOT_123"
CHANNEL_ID = "C0AQWDLHY9M"
OTHER_CHANNEL_ID = "C9999999999"
def _make_adapter(require_mention=None, free_response_channels=None):
extra = {}
if require_mention is not None:
extra["require_mention"] = require_mention
if free_response_channels is not None:
extra["free_response_channels"] = free_response_channels
adapter = object.__new__(SlackAdapter)
adapter.platform = Platform.SLACK
adapter.config = PlatformConfig(enabled=True, extra=extra)
adapter._bot_user_id = BOT_USER_ID
adapter._team_bot_user_ids = {}
return adapter
# ---------------------------------------------------------------------------
# Tests: _slack_require_mention
# ---------------------------------------------------------------------------
def test_require_mention_defaults_to_true(monkeypatch):
monkeypatch.delenv("SLACK_REQUIRE_MENTION", raising=False)
adapter = _make_adapter()
assert adapter._slack_require_mention() is True
def test_require_mention_false():
adapter = _make_adapter(require_mention=False)
assert adapter._slack_require_mention() is False
def test_require_mention_true():
adapter = _make_adapter(require_mention=True)
assert adapter._slack_require_mention() is True
def test_require_mention_string_true():
adapter = _make_adapter(require_mention="true")
assert adapter._slack_require_mention() is True
def test_require_mention_string_false():
adapter = _make_adapter(require_mention="false")
assert adapter._slack_require_mention() is False
def test_require_mention_string_no():
adapter = _make_adapter(require_mention="no")
assert adapter._slack_require_mention() is False
def test_require_mention_string_yes():
adapter = _make_adapter(require_mention="yes")
assert adapter._slack_require_mention() is True
def test_require_mention_empty_string_stays_true():
"""Empty/malformed strings keep gating ON (explicit-false parser)."""
adapter = _make_adapter(require_mention="")
assert adapter._slack_require_mention() is True
def test_require_mention_malformed_string_stays_true():
"""Unrecognised values keep gating ON (fail-closed)."""
adapter = _make_adapter(require_mention="maybe")
assert adapter._slack_require_mention() is True
def test_require_mention_env_var_fallback(monkeypatch):
monkeypatch.setenv("SLACK_REQUIRE_MENTION", "false")
adapter = _make_adapter() # no config value -> falls back to env
assert adapter._slack_require_mention() is False
def test_require_mention_env_var_default_true(monkeypatch):
monkeypatch.delenv("SLACK_REQUIRE_MENTION", raising=False)
adapter = _make_adapter()
assert adapter._slack_require_mention() is True
# ---------------------------------------------------------------------------
# Tests: _slack_free_response_channels
# ---------------------------------------------------------------------------
def test_free_response_channels_default_empty(monkeypatch):
monkeypatch.delenv("SLACK_FREE_RESPONSE_CHANNELS", raising=False)
adapter = _make_adapter()
assert adapter._slack_free_response_channels() == set()
def test_free_response_channels_list():
adapter = _make_adapter(free_response_channels=[CHANNEL_ID, OTHER_CHANNEL_ID])
result = adapter._slack_free_response_channels()
assert CHANNEL_ID in result
assert OTHER_CHANNEL_ID in result
def test_free_response_channels_csv_string():
adapter = _make_adapter(free_response_channels=f"{CHANNEL_ID}, {OTHER_CHANNEL_ID}")
result = adapter._slack_free_response_channels()
assert CHANNEL_ID in result
assert OTHER_CHANNEL_ID in result
def test_free_response_channels_empty_string():
adapter = _make_adapter(free_response_channels="")
assert adapter._slack_free_response_channels() == set()
def test_free_response_channels_env_var_fallback(monkeypatch):
monkeypatch.setenv("SLACK_FREE_RESPONSE_CHANNELS", f"{CHANNEL_ID},{OTHER_CHANNEL_ID}")
adapter = _make_adapter() # no config value → falls back to env
result = adapter._slack_free_response_channels()
assert CHANNEL_ID in result
assert OTHER_CHANNEL_ID in result
# ---------------------------------------------------------------------------
# Tests: mention gating integration (simulating _handle_slack_message logic)
# ---------------------------------------------------------------------------
def _would_process(adapter, *, is_dm=False, channel_id=CHANNEL_ID,
text="hello", mentioned=False, thread_reply=False,
active_session=False):
"""Simulate the mention gating logic from _handle_slack_message.
Returns True if the message would be processed, False if it would be
skipped (returned early).
"""
bot_uid = adapter._team_bot_user_ids.get("T1", adapter._bot_user_id)
if mentioned:
text = f"<@{bot_uid}> {text}"
is_mentioned = bot_uid and f"<@{bot_uid}>" in text
if not is_dm:
if channel_id in adapter._slack_free_response_channels():
return True
elif not adapter._slack_require_mention():
return True
elif not is_mentioned:
if thread_reply and active_session:
return True
else:
return False
return True
def test_default_require_mention_channel_without_mention_ignored():
adapter = _make_adapter() # default: require_mention=True
assert _would_process(adapter, text="hello everyone") is False
def test_require_mention_false_channel_without_mention_processed():
adapter = _make_adapter(require_mention=False)
assert _would_process(adapter, text="hello everyone") is True
def test_channel_in_free_response_processed_without_mention():
adapter = _make_adapter(
require_mention=True,
free_response_channels=[CHANNEL_ID],
)
assert _would_process(adapter, channel_id=CHANNEL_ID, text="hello") is True
def test_other_channel_not_in_free_response_still_gated():
adapter = _make_adapter(
require_mention=True,
free_response_channels=[CHANNEL_ID],
)
assert _would_process(adapter, channel_id=OTHER_CHANNEL_ID, text="hello") is False
def test_dm_always_processed_regardless_of_setting():
adapter = _make_adapter(require_mention=True)
assert _would_process(adapter, is_dm=True, text="hello") is True
def test_mentioned_message_always_processed():
adapter = _make_adapter(require_mention=True)
assert _would_process(adapter, mentioned=True, text="what's up") is True
def test_thread_reply_with_active_session_processed():
adapter = _make_adapter(require_mention=True)
assert _would_process(
adapter, text="followup",
thread_reply=True, active_session=True,
) is True
def test_thread_reply_without_active_session_ignored():
adapter = _make_adapter(require_mention=True)
assert _would_process(
adapter, text="followup",
thread_reply=True, active_session=False,
) is False
def test_bot_uid_none_processes_channel_message():
"""When bot_uid is None (before auth_test), channel messages pass through.
This preserves the old behavior: the gating block is skipped entirely
when bot_uid is falsy, so messages are not silently dropped during
startup or for new workspaces.
"""
adapter = _make_adapter(require_mention=True)
adapter._bot_user_id = None
adapter._team_bot_user_ids = {}
# With bot_uid=None, the `if not is_dm and bot_uid:` condition is False,
# so the gating block is skipped — message passes through.
bot_uid = adapter._team_bot_user_ids.get("T1", adapter._bot_user_id)
assert bot_uid is None
# Simulate: gating block not entered when bot_uid is falsy
is_dm = False
if not is_dm and bot_uid:
result = False # would enter gating
else:
result = True # gating skipped, message processed
assert result is True
# ---------------------------------------------------------------------------
# Tests: config bridging
# ---------------------------------------------------------------------------
def test_config_bridges_slack_free_response_channels(monkeypatch, tmp_path):
from gateway.config import load_gateway_config
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
(hermes_home / "config.yaml").write_text(
"slack:\n"
" require_mention: false\n"
" free_response_channels:\n"
" - C0AQWDLHY9M\n"
" - C9999999999\n",
encoding="utf-8",
)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.delenv("SLACK_REQUIRE_MENTION", raising=False)
monkeypatch.delenv("SLACK_FREE_RESPONSE_CHANNELS", raising=False)
config = load_gateway_config()
assert config is not None
slack_extra = config.platforms[Platform.SLACK].extra
assert slack_extra.get("require_mention") is False
assert slack_extra.get("free_response_channels") == ["C0AQWDLHY9M", "C9999999999"]
# Verify env vars were set by config bridging
import os as _os
assert _os.environ["SLACK_REQUIRE_MENTION"] == "false"
assert _os.environ["SLACK_FREE_RESPONSE_CHANNELS"] == "C0AQWDLHY9M,C9999999999"

View file

@ -4,7 +4,7 @@ import base64
import os
from pathlib import Path
from types import SimpleNamespace
from unittest.mock import AsyncMock
from unittest.mock import AsyncMock, patch
import pytest
@ -355,7 +355,8 @@ class TestMediaUpload:
assert calls[3][1]["chunk_index"] == 2
@pytest.mark.asyncio
async def test_download_remote_bytes_rejects_large_content_length(self):
@patch("tools.url_safety.is_safe_url", return_value=True)
async def test_download_remote_bytes_rejects_large_content_length(self, _mock_safe):
from gateway.platforms.wecom import WeComAdapter
class FakeResponse:

View file

@ -628,14 +628,21 @@ class TestHasAnyProviderConfigured:
def test_claude_code_creds_ignored_on_fresh_install(self, monkeypatch, tmp_path):
"""Claude Code credentials should NOT skip the wizard when Hermes is unconfigured."""
from hermes_cli import config as config_module
from hermes_cli.auth import PROVIDER_REGISTRY
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env")
monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home)
# Clear all provider env vars so earlier checks don't short-circuit
for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"ANTHROPIC_TOKEN", "OPENAI_BASE_URL"):
_all_vars = {"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"ANTHROPIC_TOKEN", "OPENAI_BASE_URL"}
for pconfig in PROVIDER_REGISTRY.values():
if pconfig.auth_type == "api_key":
_all_vars.update(pconfig.api_key_env_vars)
for var in _all_vars:
monkeypatch.delenv(var, raising=False)
# Prevent gh-cli / copilot auth fallback from leaking in
monkeypatch.setattr("hermes_cli.auth.get_auth_status", lambda _pid: {})
# Simulate valid Claude Code credentials
monkeypatch.setattr(
"agent.anthropic_adapter.read_claude_code_credentials",
@ -710,6 +717,7 @@ class TestHasAnyProviderConfigured:
"""config.yaml model dict with empty default and no creds stays false."""
import yaml
from hermes_cli import config as config_module
from hermes_cli.auth import PROVIDER_REGISTRY
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
config_file = hermes_home / "config.yaml"
@ -719,9 +727,15 @@ class TestHasAnyProviderConfigured:
monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env")
monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home)
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"ANTHROPIC_TOKEN", "OPENAI_BASE_URL"):
_all_vars = {"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"ANTHROPIC_TOKEN", "OPENAI_BASE_URL"}
for pconfig in PROVIDER_REGISTRY.values():
if pconfig.auth_type == "api_key":
_all_vars.update(pconfig.api_key_env_vars)
for var in _all_vars:
monkeypatch.delenv(var, raising=False)
# Prevent gh-cli / copilot auth fallback from leaking in
monkeypatch.setattr("hermes_cli.auth.get_auth_status", lambda _pid: {})
from hermes_cli.main import _has_any_provider_configured
assert _has_any_provider_configured() is False
@ -941,9 +955,10 @@ class TestHuggingFaceModels:
"""Every HF model should have a context length entry."""
from hermes_cli.models import _PROVIDER_MODELS
from agent.model_metadata import DEFAULT_CONTEXT_LENGTHS
lower_keys = {k.lower() for k in DEFAULT_CONTEXT_LENGTHS}
hf_models = _PROVIDER_MODELS["huggingface"]
for model in hf_models:
assert model in DEFAULT_CONTEXT_LENGTHS, (
assert model.lower() in lower_keys, (
f"HF model {model!r} missing from DEFAULT_CONTEXT_LENGTHS"
)

View file

@ -49,6 +49,30 @@ def test_chat_subcommand_accepts_skills_flag(monkeypatch):
}
def test_chat_subcommand_accepts_image_flag(monkeypatch):
import hermes_cli.main as main_mod
captured = {}
def fake_cmd_chat(args):
captured["query"] = args.query
captured["image"] = args.image
monkeypatch.setattr(main_mod, "cmd_chat", fake_cmd_chat)
monkeypatch.setattr(
sys,
"argv",
["hermes", "chat", "-q", "hello", "--image", "~/storage/shared/Pictures/cat.png"],
)
main_mod.main()
assert captured == {
"query": "hello",
"image": "~/storage/shared/Pictures/cat.png",
}
def test_continue_worktree_and_skills_flags_work_together(monkeypatch):
import hermes_cli.main as main_mod

View file

@ -68,6 +68,17 @@ class TestCommandRegistry:
for cmd in COMMAND_REGISTRY:
assert cmd.category in valid_categories, f"{cmd.name} has invalid category '{cmd.category}'"
def test_reasoning_subcommands_are_in_logical_order(self):
reasoning = next(cmd for cmd in COMMAND_REGISTRY if cmd.name == "reasoning")
assert reasoning.subcommands[:6] == (
"none",
"minimal",
"low",
"medium",
"high",
"xhigh",
)
def test_cli_only_and_gateway_only_are_mutually_exclusive(self):
for cmd in COMMAND_REGISTRY:
assert not (cmd.cli_only and cmd.gateway_only), \
@ -428,8 +439,8 @@ class TestSlashCommandCompleter:
class TestSubcommands:
def test_explicit_subcommands_extracted(self):
"""Commands with explicit subcommands on CommandDef are extracted."""
assert "/prompt" in SUBCOMMANDS
assert "clear" in SUBCOMMANDS["/prompt"]
assert "/skills" in SUBCOMMANDS
assert "install" in SUBCOMMANDS["/skills"]
def test_reasoning_has_subcommands(self):
assert "/reasoning" in SUBCOMMANDS

View file

@ -14,6 +14,23 @@ from hermes_cli import doctor as doctor_mod
from hermes_cli.doctor import _has_provider_env_config
class TestDoctorPlatformHints:
def test_termux_package_hint(self, monkeypatch):
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
assert doctor._is_termux() is True
assert doctor._python_install_cmd() == "python -m pip install"
assert doctor._system_package_install_cmd("ripgrep") == "pkg install ripgrep"
def test_non_termux_package_hint_defaults_to_apt(self, monkeypatch):
monkeypatch.delenv("TERMUX_VERSION", raising=False)
monkeypatch.setenv("PREFIX", "/usr")
monkeypatch.setattr(sys, "platform", "linux")
assert doctor._is_termux() is False
assert doctor._python_install_cmd() == "uv pip install"
assert doctor._system_package_install_cmd("ripgrep") == "sudo apt install ripgrep"
class TestProviderEnvDetection:
def test_detects_openai_api_key(self):
content = "OPENAI_BASE_URL=http://localhost:1234/v1\nOPENAI_API_KEY=***"
@ -206,3 +223,72 @@ class TestDoctorMemoryProviderSection:
out = self._run_doctor_and_capture(monkeypatch, tmp_path, provider="mem0")
assert "Memory Provider" in out
assert "Built-in memory active" not in out
def test_run_doctor_termux_treats_docker_and_browser_warnings_as_expected(monkeypatch, tmp_path):
helper = TestDoctorMemoryProviderSection()
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
real_which = doctor_mod.shutil.which
def fake_which(cmd):
if cmd in {"docker", "node", "npm"}:
return None
return real_which(cmd)
monkeypatch.setattr(doctor_mod.shutil, "which", fake_which)
out = helper._run_doctor_and_capture(monkeypatch, tmp_path, provider="")
assert "Docker backend is not available inside Termux" in out
assert "Node.js not found (browser tools are optional in the tested Termux path)" in out
assert "Install Node.js on Termux with: pkg install nodejs" in out
assert "Termux browser setup:" in out
assert "1) pkg install nodejs" in out
assert "2) npm install -g agent-browser" in out
assert "3) agent-browser install" in out
assert "docker not found (optional)" not in out
def test_run_doctor_termux_does_not_mark_browser_available_without_agent_browser(monkeypatch, tmp_path):
home = tmp_path / ".hermes"
home.mkdir(parents=True, exist_ok=True)
(home / "config.yaml").write_text("memory: {}\n", encoding="utf-8")
project = tmp_path / "project"
project.mkdir(exist_ok=True)
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.setattr(doctor_mod, "HERMES_HOME", home)
monkeypatch.setattr(doctor_mod, "PROJECT_ROOT", project)
monkeypatch.setattr(doctor_mod, "_DHH", str(home))
monkeypatch.setattr(doctor_mod.shutil, "which", lambda cmd: "/data/data/com.termux/files/usr/bin/node" if cmd in {"node", "npm"} else None)
fake_model_tools = types.SimpleNamespace(
check_tool_availability=lambda *a, **kw: (["terminal"], [{"name": "browser", "env_vars": [], "tools": ["browser_navigate"]}]),
TOOLSET_REQUIREMENTS={
"terminal": {"name": "terminal"},
"browser": {"name": "browser"},
},
)
monkeypatch.setitem(sys.modules, "model_tools", fake_model_tools)
try:
from hermes_cli import auth as _auth_mod
monkeypatch.setattr(_auth_mod, "get_nous_auth_status", lambda: {})
monkeypatch.setattr(_auth_mod, "get_codex_auth_status", lambda: {})
except Exception:
pass
import io, contextlib
buf = io.StringIO()
with contextlib.redirect_stdout(buf):
doctor_mod.run_doctor(Namespace(fix=False))
out = buf.getvalue()
assert "✓ browser" not in out
assert "browser" in out
assert "system dependency not met" in out
assert "agent-browser is not installed (expected in the tested Termux path)" in out
assert "npm install -g agent-browser && agent-browser install" in out

View file

@ -10,6 +10,7 @@ import hermes_cli.gateway as gateway
class TestSystemdLingerStatus:
def test_reports_enabled(self, monkeypatch):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setenv("USER", "alice")
monkeypatch.setattr(
gateway.subprocess,
@ -22,6 +23,7 @@ class TestSystemdLingerStatus:
def test_reports_disabled(self, monkeypatch):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setenv("USER", "alice")
monkeypatch.setattr(
gateway.subprocess,
@ -32,6 +34,11 @@ class TestSystemdLingerStatus:
assert gateway.get_systemd_linger_status() == (False, "")
def test_reports_termux_as_not_supported(self, monkeypatch):
monkeypatch.setattr(gateway, "is_termux", lambda: True)
assert gateway.get_systemd_linger_status() == (None, "not supported in Termux")
def test_systemd_status_warns_when_linger_disabled(monkeypatch, tmp_path, capsys):
unit_path = tmp_path / "hermes-gateway.service"

View file

@ -8,6 +8,7 @@ import hermes_cli.gateway as gateway
class TestEnsureLingerEnabled:
def test_linger_already_enabled_via_file(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: True))
@ -22,6 +23,7 @@ class TestEnsureLingerEnabled:
def test_status_enabled_skips_enable(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False))
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (True, ""))
@ -37,6 +39,7 @@ class TestEnsureLingerEnabled:
def test_loginctl_success_enables_linger(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False))
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (False, ""))
@ -59,6 +62,7 @@ class TestEnsureLingerEnabled:
def test_missing_loginctl_shows_manual_guidance(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False))
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (None, "loginctl not found"))
@ -76,6 +80,7 @@ class TestEnsureLingerEnabled:
def test_loginctl_failure_shows_manual_guidance(self, monkeypatch, capsys):
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr("getpass.getuser", lambda: "testuser")
monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False))
monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (False, ""))

View file

@ -109,7 +109,8 @@ class TestGatewayStopCleanup:
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("unit\n", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path)
@ -134,7 +135,8 @@ class TestGatewayStopCleanup:
unit_path = tmp_path / "hermes-gateway.service"
unit_path.write_text("unit\n", encoding="utf-8")
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path)
@ -256,7 +258,8 @@ class TestGatewayServiceDetection:
user_unit = SimpleNamespace(exists=lambda: True)
system_unit = SimpleNamespace(exists=lambda: True)
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(
gateway_cli,
@ -278,7 +281,8 @@ class TestGatewayServiceDetection:
class TestGatewaySystemServiceRouting:
def test_gateway_install_passes_system_flags(self, monkeypatch):
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
calls = []
@ -294,11 +298,30 @@ class TestGatewaySystemServiceRouting:
assert calls == [(True, True, "alice")]
def test_gateway_install_reports_termux_manual_mode(self, monkeypatch, capsys):
monkeypatch.setattr(gateway_cli, "is_termux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
try:
gateway_cli.gateway_command(
SimpleNamespace(gateway_command="install", force=False, system=False, run_as_user=None)
)
except SystemExit as exc:
assert exc.code == 1
else:
raise AssertionError("Expected gateway_command to exit on unsupported Termux service install")
out = capsys.readouterr().out
assert "not supported on Termux" in out
assert "Run manually: hermes gateway" in out
def test_gateway_status_prefers_system_service_when_only_system_unit_exists(self, monkeypatch):
user_unit = SimpleNamespace(exists=lambda: False)
system_unit = SimpleNamespace(exists=lambda: True)
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(
gateway_cli,
@ -313,6 +336,20 @@ class TestGatewaySystemServiceRouting:
assert calls == [(False, False)]
def test_gateway_status_on_termux_shows_manual_guidance(self, monkeypatch, capsys):
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: True)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "find_gateway_pids", lambda exclude_pids=None: [])
monkeypatch.setattr(gateway_cli, "_runtime_health_lines", lambda: [])
gateway_cli.gateway_command(SimpleNamespace(gateway_command="status", deep=False, system=False))
out = capsys.readouterr().out
assert "Gateway is not running" in out
assert "nohup hermes gateway" in out
assert "install as user service" not in out
def test_gateway_restart_does_not_fallback_to_foreground_when_launchd_restart_fails(self, tmp_path, monkeypatch):
plist_path = tmp_path / "ai.hermes.gateway.plist"
plist_path.write_text("plist\n", encoding="utf-8")
@ -513,12 +550,22 @@ class TestGeneratedUnitUsesDetectedVenv:
class TestGeneratedUnitIncludesLocalBin:
"""~/.local/bin must be in PATH so uvx/pipx tools are discoverable."""
def test_user_unit_includes_local_bin_in_path(self):
def test_user_unit_includes_local_bin_in_path(self, monkeypatch):
home = Path.home()
monkeypatch.setattr(
gateway_cli,
"_build_user_local_paths",
lambda home_path, existing: [str(home / ".local" / "bin")],
)
unit = gateway_cli.generate_systemd_unit(system=False)
home = str(Path.home())
assert f"{home}/.local/bin" in unit
def test_system_unit_includes_local_bin_in_path(self):
def test_system_unit_includes_local_bin_in_path(self, monkeypatch):
monkeypatch.setattr(
gateway_cli,
"_build_user_local_paths",
lambda home_path, existing: [str(home_path / ".local" / "bin")],
)
unit = gateway_cli.generate_systemd_unit(system=True)
# System unit uses the resolved home dir from _system_service_identity
assert "/.local/bin" in unit

View file

@ -0,0 +1,34 @@
import sys
import types
from hermes_cli.main import _prompt_reasoning_effort_selection
class _FakeTerminalMenu:
last_choices = None
def __init__(self, choices, **kwargs):
_FakeTerminalMenu.last_choices = choices
self._cursor_index = kwargs.get("cursor_index")
def show(self):
return self._cursor_index
def test_reasoning_menu_orders_minimal_before_low(monkeypatch):
fake_module = types.SimpleNamespace(TerminalMenu=_FakeTerminalMenu)
monkeypatch.setitem(sys.modules, "simple_term_menu", fake_module)
selected = _prompt_reasoning_effort_selection(
["low", "minimal", "medium", "high"],
current_effort="medium",
)
assert selected == "medium"
assert _FakeTerminalMenu.last_choices[:4] == [
" minimal",
" low",
" medium ← currently in use",
" high",
]

View file

@ -0,0 +1,21 @@
from pathlib import Path
import subprocess
REPO_ROOT = Path(__file__).resolve().parents[2]
SETUP_SCRIPT = REPO_ROOT / "setup-hermes.sh"
def test_setup_hermes_script_is_valid_shell():
result = subprocess.run(["bash", "-n", str(SETUP_SCRIPT)], capture_output=True, text=True)
assert result.returncode == 0, result.stderr
def test_setup_hermes_script_has_termux_path():
content = SETUP_SCRIPT.read_text(encoding="utf-8")
assert "is_termux()" in content
assert ".[termux]" in content
assert "constraints-termux.txt" in content
assert "$PREFIX/bin" in content
assert "Skipping tinker-atropos on Termux" in content

View file

@ -44,7 +44,7 @@ class TestOfferOpenclawMigration:
assert setup_mod._offer_openclaw_migration(tmp_path / ".hermes") is False
def test_runs_migration_when_user_accepts(self, tmp_path):
"""Should dynamically load the script and run the Migrator."""
"""Should run dry-run preview first, then execute after confirmation."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
@ -60,6 +60,7 @@ class TestOfferOpenclawMigration:
fake_migrator = MagicMock()
fake_migrator.migrate.return_value = {
"summary": {"migrated": 3, "skipped": 1, "conflict": 0, "error": 0},
"items": [{"kind": "config", "status": "migrated", "destination": "/tmp/x"}],
"output_dir": str(hermes_home / "migration"),
}
fake_mod.Migrator = MagicMock(return_value=fake_migrator)
@ -70,6 +71,7 @@ class TestOfferOpenclawMigration:
with (
patch("hermes_cli.setup.Path.home", return_value=tmp_path),
patch.object(setup_mod, "_OPENCLAW_SCRIPT", script),
# Both prompts answered Yes: preview offer + proceed confirmation
patch.object(setup_mod, "prompt_yes_no", return_value=True),
patch.object(setup_mod, "get_config_path", return_value=config_path),
patch("importlib.util.spec_from_file_location") as mock_spec_fn,
@ -91,13 +93,75 @@ class TestOfferOpenclawMigration:
fake_mod.resolve_selected_options.assert_called_once_with(
None, None, preset="full"
)
fake_mod.Migrator.assert_called_once()
call_kwargs = fake_mod.Migrator.call_args[1]
assert call_kwargs["execute"] is True
assert call_kwargs["overwrite"] is True
assert call_kwargs["migrate_secrets"] is True
assert call_kwargs["preset_name"] == "full"
fake_migrator.migrate.assert_called_once()
# Migrator called twice: once for dry-run preview, once for execution
assert fake_mod.Migrator.call_count == 2
# First call: dry-run preview (execute=False, overwrite=True to show all)
preview_kwargs = fake_mod.Migrator.call_args_list[0][1]
assert preview_kwargs["execute"] is False
assert preview_kwargs["overwrite"] is True
assert preview_kwargs["migrate_secrets"] is True
assert preview_kwargs["preset_name"] == "full"
# Second call: actual execution (execute=True, overwrite=False to preserve)
exec_kwargs = fake_mod.Migrator.call_args_list[1][1]
assert exec_kwargs["execute"] is True
assert exec_kwargs["overwrite"] is False
assert exec_kwargs["migrate_secrets"] is True
assert exec_kwargs["preset_name"] == "full"
# migrate() called twice (once per Migrator instance)
assert fake_migrator.migrate.call_count == 2
def test_user_declines_after_preview(self, tmp_path):
"""Should return False when user sees preview but declines to proceed."""
openclaw_dir = tmp_path / ".openclaw"
openclaw_dir.mkdir()
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
config_path = hermes_home / "config.yaml"
config_path.write_text("agent:\n max_turns: 90\n")
fake_mod = ModuleType("openclaw_to_hermes")
fake_mod.resolve_selected_options = MagicMock(return_value={"soul", "memory"})
fake_migrator = MagicMock()
fake_migrator.migrate.return_value = {
"summary": {"migrated": 3, "skipped": 0, "conflict": 0, "error": 0},
"items": [{"kind": "config", "status": "migrated", "destination": "/tmp/x"}],
}
fake_mod.Migrator = MagicMock(return_value=fake_migrator)
script = tmp_path / "openclaw_to_hermes.py"
script.write_text("# placeholder")
# First prompt (preview): Yes, Second prompt (proceed): No
prompt_responses = iter([True, False])
with (
patch("hermes_cli.setup.Path.home", return_value=tmp_path),
patch.object(setup_mod, "_OPENCLAW_SCRIPT", script),
patch.object(setup_mod, "prompt_yes_no", side_effect=prompt_responses),
patch.object(setup_mod, "get_config_path", return_value=config_path),
patch("importlib.util.spec_from_file_location") as mock_spec_fn,
):
mock_spec = MagicMock()
mock_spec.loader = MagicMock()
mock_spec_fn.return_value = mock_spec
def exec_module(mod):
mod.resolve_selected_options = fake_mod.resolve_selected_options
mod.Migrator = fake_mod.Migrator
mock_spec.loader.exec_module = exec_module
result = setup_mod._offer_openclaw_migration(hermes_home)
assert result is False
# Only dry-run Migrator was created, not the execute one
assert fake_mod.Migrator.call_count == 1
preview_kwargs = fake_mod.Migrator.call_args[1]
assert preview_kwargs["execute"] is False
def test_handles_migration_error_gracefully(self, tmp_path):
"""Should catch exceptions and return False."""

View file

@ -12,3 +12,33 @@ def test_show_status_includes_tavily_key(monkeypatch, capsys, tmp_path):
output = capsys.readouterr().out
assert "Tavily" in output
assert "tvly...cdef" in output
def test_show_status_termux_gateway_section_skips_systemctl(monkeypatch, capsys, tmp_path):
from hermes_cli import status as status_mod
import hermes_cli.auth as auth_mod
import hermes_cli.gateway as gateway_mod
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.setattr(status_mod, "get_env_path", lambda: tmp_path / ".env", raising=False)
monkeypatch.setattr(status_mod, "get_hermes_home", lambda: tmp_path, raising=False)
monkeypatch.setattr(status_mod, "load_config", lambda: {"model": "gpt-5.4"}, raising=False)
monkeypatch.setattr(status_mod, "resolve_requested_provider", lambda requested=None: "openai-codex", raising=False)
monkeypatch.setattr(status_mod, "resolve_provider", lambda requested=None, **kwargs: "openai-codex", raising=False)
monkeypatch.setattr(status_mod, "provider_label", lambda provider: "OpenAI Codex", raising=False)
monkeypatch.setattr(auth_mod, "get_nous_auth_status", lambda: {}, raising=False)
monkeypatch.setattr(auth_mod, "get_codex_auth_status", lambda: {}, raising=False)
monkeypatch.setattr(gateway_mod, "find_gateway_pids", lambda exclude_pids=None: [], raising=False)
def _unexpected_systemctl(*args, **kwargs):
raise AssertionError("systemctl should not be called in the Termux status view")
monkeypatch.setattr(status_mod.subprocess, "run", _unexpected_systemctl)
status_mod.show_status(SimpleNamespace(all=False, deep=False))
output = capsys.readouterr().out
assert "Manager: Termux / manual process" in output
assert "Start with: hermes gateway" in output
assert "systemd (user)" not in output

View file

@ -354,6 +354,14 @@ def test_first_install_nous_auto_configures_managed_defaults(monkeypatch):
lambda *args, **kwargs: {"web", "image_gen", "tts", "browser"},
)
monkeypatch.setattr("hermes_cli.tools_config.save_config", lambda config: None)
# Prevent leaked platform tokens (e.g. DISCORD_BOT_TOKEN from gateway.run
# import) from adding extra platforms. The loop in tools_command runs
# apply_nous_managed_defaults per platform; a second iteration sees values
# set by the first as "explicit" and skips them.
monkeypatch.setattr(
"hermes_cli.tools_config._get_enabled_platforms",
lambda: ["cli"],
)
monkeypatch.setattr(
"hermes_cli.nous_subscription.get_nous_auth_status",
lambda: {"logged_in": True},

View file

@ -368,6 +368,8 @@ class TestCmdUpdateLaunchdRestart:
monkeypatch.setattr(
gateway_cli, "is_macos", lambda: False,
)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
mock_run.side_effect = _make_run_side_effect(
commit_count="3",
@ -426,7 +428,8 @@ class TestCmdUpdateSystemService:
):
"""When user systemd is inactive but a system service exists, restart via system scope."""
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
mock_run.side_effect = _make_run_side_effect(
commit_count="3",
@ -455,7 +458,8 @@ class TestCmdUpdateSystemService:
):
"""When system service restart fails, show the failure message."""
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
mock_run.side_effect = _make_run_side_effect(
commit_count="3",
@ -477,7 +481,8 @@ class TestCmdUpdateSystemService:
):
"""When both user and system services are active, both are restarted."""
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
mock_run.side_effect = _make_run_side_effect(
commit_count="3",
@ -560,7 +565,8 @@ class TestServicePidExclusion:
):
"""After systemd restart, the sweep must exclude the service PID."""
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
SERVICE_PID = 55000
@ -639,7 +645,8 @@ class TestGetServicePids:
"""Unit tests for _get_service_pids()."""
def test_returns_systemd_main_pid(self, monkeypatch):
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
def fake_run(cmd, **kwargs):
@ -688,7 +695,8 @@ class TestGetServicePids:
def test_excludes_zero_pid(self, monkeypatch):
"""systemd returns MainPID=0 for stopped services; skip those."""
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_macos", lambda: False)
def fake_run(cmd, **kwargs):

View file

@ -5,6 +5,7 @@ pieces. The OpenAI client and tool loading are mocked so no network calls
are made.
"""
import io
import json
import logging
import re
@ -1061,6 +1062,77 @@ class TestExecuteToolCalls:
assert len(messages[0]["content"]) < 150_000
assert ("Truncated" in messages[0]["content"] or "<persisted-output>" in messages[0]["content"])
def test_quiet_tool_output_suppressed_when_progress_callback_present(self, agent):
tc = _mock_tool_call(name="web_search", arguments='{"q":"test"}', call_id="c1")
mock_msg = _mock_assistant_msg(content="", tool_calls=[tc])
messages = []
agent.tool_progress_callback = lambda *args, **kwargs: None
with patch("run_agent.handle_function_call", return_value="search result"), \
patch.object(agent, "_safe_print") as mock_print:
agent._execute_tool_calls(mock_msg, messages, "task-1")
mock_print.assert_not_called()
assert len(messages) == 1
assert messages[0]["role"] == "tool"
def test_quiet_tool_output_prints_without_progress_callback(self, agent):
tc = _mock_tool_call(name="web_search", arguments='{"q":"test"}', call_id="c1")
mock_msg = _mock_assistant_msg(content="", tool_calls=[tc])
messages = []
agent.tool_progress_callback = None
with patch("run_agent.handle_function_call", return_value="search result"), \
patch.object(agent, "_safe_print") as mock_print:
agent._execute_tool_calls(mock_msg, messages, "task-1")
mock_print.assert_called_once()
assert "search" in str(mock_print.call_args.args[0]).lower()
assert len(messages) == 1
assert messages[0]["role"] == "tool"
def test_vprint_suppressed_in_parseable_quiet_mode(self, agent):
agent.suppress_status_output = True
with patch.object(agent, "_safe_print") as mock_print:
agent._vprint("status line", force=True)
agent._vprint("normal line")
mock_print.assert_not_called()
def test_run_conversation_suppresses_retry_noise_in_parseable_quiet_mode(self, agent):
class _RateLimitError(Exception):
status_code = 429
def __str__(self):
return "Error code: 429 - Rate limit exceeded."
responses = [_RateLimitError(), _mock_response(content="Recovered")]
def _fake_api_call(api_kwargs):
result = responses.pop(0)
if isinstance(result, Exception):
raise result
return result
agent.suppress_status_output = True
agent._interruptible_api_call = _fake_api_call
agent._persist_session = lambda *args, **kwargs: None
agent._save_trajectory = lambda *args, **kwargs: None
agent._save_session_log = lambda *args, **kwargs: None
captured = io.StringIO()
agent._print_fn = lambda *args, **kw: print(*args, file=captured, **kw)
with patch("run_agent.time.sleep", return_value=None):
result = agent.run_conversation("hello")
assert result["completed"] is True
assert result["final_response"] == "Recovered"
output = captured.getvalue()
assert "API call failed" not in output
assert "Rate limit reached" not in output
class TestConcurrentToolExecution:
"""Tests for _execute_tool_calls_concurrent and dispatch logic."""

View file

@ -211,14 +211,15 @@ class TestExchangeAuthCode:
assert setup_module.PENDING_AUTH_PATH.exists()
assert not setup_module.TOKEN_PATH.exists()
def test_refuses_to_overwrite_existing_token_with_narrower_scopes(self, setup_module, capsys):
def test_accepts_narrower_scopes_with_warning(self, setup_module, capsys):
"""Partial scopes are accepted with a warning (gws migration: v2.0)."""
setup_module.PENDING_AUTH_PATH.write_text(
json.dumps({"state": "saved-state", "code_verifier": "saved-verifier"})
)
setup_module.TOKEN_PATH.write_text(json.dumps({"token": "existing-token", "scopes": setup_module.SCOPES}))
setup_module.TOKEN_PATH.write_text(json.dumps({"token": "***", "scopes": setup_module.SCOPES}))
FakeFlow.credentials_payload = {
"token": "narrow-token",
"refresh_token": "refresh-token",
"token": "***",
"refresh_token": "***",
"token_uri": "https://oauth2.googleapis.com/token",
"client_id": "client-id",
"client_secret": "client-secret",
@ -228,10 +229,12 @@ class TestExchangeAuthCode:
],
}
with pytest.raises(SystemExit):
setup_module.exchange_auth_code("4/test-auth-code")
setup_module.exchange_auth_code("4/test-auth-code")
out = capsys.readouterr().out
assert "refusing to save incomplete google workspace token" in out.lower()
assert json.loads(setup_module.TOKEN_PATH.read_text())["token"] == "existing-token"
assert setup_module.PENDING_AUTH_PATH.exists()
assert "warning" in out.lower()
assert "missing" in out.lower()
# Token is saved (partial scopes accepted)
assert setup_module.TOKEN_PATH.exists()
# Pending auth is cleaned up
assert not setup_module.PENDING_AUTH_PATH.exists()

View file

@ -1,117 +1,175 @@
"""Regression tests for Google Workspace API credential validation."""
"""Tests for Google Workspace gws bridge and CLI wrapper."""
import importlib.util
import json
import os
import subprocess
import sys
import types
from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
SCRIPT_PATH = (
BRIDGE_PATH = (
Path(__file__).resolve().parents[2]
/ "skills/productivity/google-workspace/scripts/gws_bridge.py"
)
API_PATH = (
Path(__file__).resolve().parents[2]
/ "skills/productivity/google-workspace/scripts/google_api.py"
)
class FakeAuthorizedCredentials:
def __init__(self, *, valid=True, expired=False, refresh_token="refresh-token"):
self.valid = valid
self.expired = expired
self.refresh_token = refresh_token
self.refresh_calls = 0
def refresh(self, _request):
self.refresh_calls += 1
self.valid = True
self.expired = False
def to_json(self):
return json.dumps({
"token": "refreshed-token",
"refresh_token": self.refresh_token,
"token_uri": "https://oauth2.googleapis.com/token",
"client_id": "client-id",
"client_secret": "client-secret",
"scopes": [
"https://www.googleapis.com/auth/gmail.readonly",
"https://www.googleapis.com/auth/gmail.send",
"https://www.googleapis.com/auth/gmail.modify",
"https://www.googleapis.com/auth/calendar",
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/contacts.readonly",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/documents.readonly",
],
})
class FakeCredentialsFactory:
creds = FakeAuthorizedCredentials()
@classmethod
def from_authorized_user_file(cls, _path, _scopes):
return cls.creds
@pytest.fixture
def google_api_module(monkeypatch, tmp_path):
google_module = types.ModuleType("google")
oauth2_module = types.ModuleType("google.oauth2")
credentials_module = types.ModuleType("google.oauth2.credentials")
credentials_module.Credentials = FakeCredentialsFactory
auth_module = types.ModuleType("google.auth")
transport_module = types.ModuleType("google.auth.transport")
requests_module = types.ModuleType("google.auth.transport.requests")
requests_module.Request = object
def bridge_module(monkeypatch, tmp_path):
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.setitem(sys.modules, "google", google_module)
monkeypatch.setitem(sys.modules, "google.oauth2", oauth2_module)
monkeypatch.setitem(sys.modules, "google.oauth2.credentials", credentials_module)
monkeypatch.setitem(sys.modules, "google.auth", auth_module)
monkeypatch.setitem(sys.modules, "google.auth.transport", transport_module)
monkeypatch.setitem(sys.modules, "google.auth.transport.requests", requests_module)
spec = importlib.util.spec_from_file_location("google_workspace_api_test", SCRIPT_PATH)
spec = importlib.util.spec_from_file_location("gws_bridge_test", BRIDGE_PATH)
module = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(module)
monkeypatch.setattr(module, "TOKEN_PATH", tmp_path / "google_token.json")
return module
def _write_token(path: Path, scopes):
path.write_text(json.dumps({
"token": "access-token",
"refresh_token": "refresh-token",
@pytest.fixture
def api_module(monkeypatch, tmp_path):
hermes_home = tmp_path / ".hermes"
hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
spec = importlib.util.spec_from_file_location("gws_api_test", API_PATH)
module = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(module)
return module
def _write_token(path: Path, *, token="ya29.test", expiry=None, **extra):
data = {
"token": token,
"refresh_token": "1//refresh",
"client_id": "123.apps.googleusercontent.com",
"client_secret": "secret",
"token_uri": "https://oauth2.googleapis.com/token",
"client_id": "client-id",
"client_secret": "client-secret",
"scopes": scopes,
}))
**extra,
}
if expiry is not None:
data["expiry"] = expiry
path.write_text(json.dumps(data))
def test_get_credentials_rejects_missing_scopes(google_api_module, capsys):
FakeCredentialsFactory.creds = FakeAuthorizedCredentials(valid=True)
_write_token(google_api_module.TOKEN_PATH, [
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/spreadsheets",
])
def test_bridge_returns_valid_token(bridge_module, tmp_path):
"""Non-expired token is returned without refresh."""
future = (datetime.now(timezone.utc) + timedelta(hours=1)).isoformat()
token_path = bridge_module.get_token_path()
_write_token(token_path, token="ya29.valid", expiry=future)
result = bridge_module.get_valid_token()
assert result == "ya29.valid"
def test_bridge_refreshes_expired_token(bridge_module, tmp_path):
"""Expired token triggers a refresh via token_uri."""
past = (datetime.now(timezone.utc) - timedelta(hours=1)).isoformat()
token_path = bridge_module.get_token_path()
_write_token(token_path, token="ya29.old", expiry=past)
mock_resp = MagicMock()
mock_resp.read.return_value = json.dumps({
"access_token": "ya29.refreshed",
"expires_in": 3600,
}).encode()
mock_resp.__enter__ = lambda s: s
mock_resp.__exit__ = MagicMock(return_value=False)
with patch("urllib.request.urlopen", return_value=mock_resp):
result = bridge_module.get_valid_token()
assert result == "ya29.refreshed"
# Verify persisted
saved = json.loads(token_path.read_text())
assert saved["token"] == "ya29.refreshed"
def test_bridge_exits_on_missing_token(bridge_module):
"""Missing token file causes exit with code 1."""
with pytest.raises(SystemExit):
google_api_module.get_credentials()
err = capsys.readouterr().err
assert "missing google workspace scopes" in err.lower()
assert "gmail.send" in err
bridge_module.get_valid_token()
def test_get_credentials_accepts_full_scope_token(google_api_module):
FakeCredentialsFactory.creds = FakeAuthorizedCredentials(valid=True)
_write_token(google_api_module.TOKEN_PATH, list(google_api_module.SCOPES))
def test_bridge_main_injects_token_env(bridge_module, tmp_path):
"""main() sets GOOGLE_WORKSPACE_CLI_TOKEN in subprocess env."""
future = (datetime.now(timezone.utc) + timedelta(hours=1)).isoformat()
token_path = bridge_module.get_token_path()
_write_token(token_path, token="ya29.injected", expiry=future)
creds = google_api_module.get_credentials()
captured = {}
assert creds is FakeCredentialsFactory.creds
def capture_run(cmd, **kwargs):
captured["cmd"] = cmd
captured["env"] = kwargs.get("env", {})
return MagicMock(returncode=0)
with patch.object(sys, "argv", ["gws_bridge.py", "gmail", "+triage"]):
with patch.object(subprocess, "run", side_effect=capture_run):
with pytest.raises(SystemExit):
bridge_module.main()
assert captured["env"]["GOOGLE_WORKSPACE_CLI_TOKEN"] == "ya29.injected"
assert captured["cmd"] == ["gws", "gmail", "+triage"]
def test_api_calendar_list_uses_agenda_by_default(api_module):
"""calendar list without dates uses +agenda helper."""
captured = {}
def capture_run(cmd, **kwargs):
captured["cmd"] = cmd
return MagicMock(returncode=0)
args = api_module.argparse.Namespace(
start="", end="", max=25, calendar="primary", func=api_module.calendar_list,
)
with patch.object(subprocess, "run", side_effect=capture_run):
with pytest.raises(SystemExit):
api_module.calendar_list(args)
gws_args = captured["cmd"][2:] # skip python + bridge path
assert "calendar" in gws_args
assert "+agenda" in gws_args
assert "--days" in gws_args
def test_api_calendar_list_respects_date_range(api_module):
"""calendar list with --start/--end uses raw events list API."""
captured = {}
def capture_run(cmd, **kwargs):
captured["cmd"] = cmd
return MagicMock(returncode=0)
args = api_module.argparse.Namespace(
start="2026-04-01T00:00:00Z",
end="2026-04-07T23:59:59Z",
max=25,
calendar="primary",
func=api_module.calendar_list,
)
with patch.object(subprocess, "run", side_effect=capture_run):
with pytest.raises(SystemExit):
api_module.calendar_list(args)
gws_args = captured["cmd"][2:]
assert "events" in gws_args
assert "list" in gws_args
params_idx = gws_args.index("--params")
params = json.loads(gws_args[params_idx + 1])
assert params["timeMin"] == "2026-04-01T00:00:00Z"
assert params["timeMax"] == "2026-04-07T23:59:59Z"

View file

@ -0,0 +1,319 @@
"""Tests for the context-halving bugfix.
Background
----------
When the API returns "max_tokens too large given prompt" (input is fine,
but input_tokens + requested max_tokens > context_window), the old code
incorrectly halved context_length via get_next_probe_tier().
The fix introduces:
* parse_available_output_tokens_from_error() detects this specific
error class and returns the available output token budget.
* _ephemeral_max_output_tokens on AIAgent a one-shot override that
caps the output for one retry without touching context_length.
Naming note
-----------
max_tokens = OUTPUT token cap (a single response).
context_length = TOTAL context window (input + output combined).
These are different and the old code conflated them; the fix keeps them
separate.
"""
import sys
import os
from unittest.mock import MagicMock, patch, PropertyMock
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pytest
# ---------------------------------------------------------------------------
# parse_available_output_tokens_from_error — unit tests
# ---------------------------------------------------------------------------
class TestParseAvailableOutputTokens:
"""Pure-function tests; no I/O required."""
def _parse(self, msg):
from agent.model_metadata import parse_available_output_tokens_from_error
return parse_available_output_tokens_from_error(msg)
# ── Should detect and extract ────────────────────────────────────────
def test_anthropic_canonical_format(self):
"""Canonical Anthropic error: max_tokens: X > context_window: Y - input_tokens: Z = available_tokens: W"""
msg = (
"max_tokens: 32768 > context_window: 200000 "
"- input_tokens: 190000 = available_tokens: 10000"
)
assert self._parse(msg) == 10000
def test_anthropic_format_large_numbers(self):
msg = (
"max_tokens: 128000 > context_window: 200000 "
"- input_tokens: 180000 = available_tokens: 20000"
)
assert self._parse(msg) == 20000
def test_available_tokens_variant_spacing(self):
"""Handles extra spaces around the colon."""
msg = "max_tokens: 32768 > 200000 available_tokens : 5000"
assert self._parse(msg) == 5000
def test_available_tokens_natural_language(self):
"""'available tokens: N' wording (no underscore)."""
msg = "max_tokens must be at most 10000 given your prompt (available tokens: 10000)"
assert self._parse(msg) == 10000
def test_single_token_available(self):
"""Edge case: only 1 token left."""
msg = "max_tokens: 9999 > context_window: 10000 - input_tokens: 9999 = available_tokens: 1"
assert self._parse(msg) == 1
# ── Should NOT detect (returns None) ─────────────────────────────────
def test_prompt_too_long_is_not_output_cap_error(self):
"""'prompt is too long' errors must NOT be caught — they need context halving."""
msg = "prompt is too long: 205000 tokens > 200000 maximum"
assert self._parse(msg) is None
def test_generic_context_window_exceeded(self):
"""Generic context window errors without available_tokens should not match."""
msg = "context window exceeded: maximum is 32768 tokens"
assert self._parse(msg) is None
def test_context_length_exceeded(self):
msg = "context_length_exceeded: prompt has 131073 tokens, limit is 131072"
assert self._parse(msg) is None
def test_no_max_tokens_keyword(self):
"""Error not related to max_tokens at all."""
msg = "invalid_api_key: the API key is invalid"
assert self._parse(msg) is None
def test_empty_string(self):
assert self._parse("") is None
def test_rate_limit_error(self):
msg = "rate_limit_error: too many requests per minute"
assert self._parse(msg) is None
# ---------------------------------------------------------------------------
# build_anthropic_kwargs — output cap clamping
# ---------------------------------------------------------------------------
class TestBuildAnthropicKwargsClamping:
"""The context_length clamp only fires when output ceiling > window.
For standard Anthropic models (output ceiling < window) it must not fire.
"""
def _build(self, model, max_tokens=None, context_length=None):
from agent.anthropic_adapter import build_anthropic_kwargs
return build_anthropic_kwargs(
model=model,
messages=[{"role": "user", "content": "hi"}],
tools=None,
max_tokens=max_tokens,
reasoning_config=None,
context_length=context_length,
)
def test_no_clamping_when_output_ceiling_fits_in_window(self):
"""Opus 4.6 native output (128K) < context window (200K) — no clamping."""
kwargs = self._build("claude-opus-4-6", context_length=200_000)
assert kwargs["max_tokens"] == 128_000
def test_clamping_fires_for_tiny_custom_window(self):
"""When context_length is 8K (local model), output cap is clamped to 7999."""
kwargs = self._build("claude-opus-4-6", context_length=8_000)
assert kwargs["max_tokens"] == 7_999
def test_explicit_max_tokens_respected_when_within_window(self):
"""Explicit max_tokens smaller than window passes through unchanged."""
kwargs = self._build("claude-opus-4-6", max_tokens=4096, context_length=200_000)
assert kwargs["max_tokens"] == 4096
def test_explicit_max_tokens_clamped_when_exceeds_window(self):
"""Explicit max_tokens larger than a small window is clamped."""
kwargs = self._build("claude-opus-4-6", max_tokens=32_768, context_length=16_000)
assert kwargs["max_tokens"] == 15_999
def test_no_context_length_uses_native_ceiling(self):
"""Without context_length the native output ceiling is used directly."""
kwargs = self._build("claude-sonnet-4-6")
assert kwargs["max_tokens"] == 64_000
# ---------------------------------------------------------------------------
# Ephemeral max_tokens mechanism — _build_api_kwargs
# ---------------------------------------------------------------------------
class TestEphemeralMaxOutputTokens:
"""_build_api_kwargs consumes _ephemeral_max_output_tokens exactly once
and falls back to self.max_tokens on subsequent calls.
"""
def _make_agent(self):
"""Return a minimal AIAgent with api_mode='anthropic_messages' and
a stubbed context_compressor, bypassing full __init__ cost."""
from run_agent import AIAgent
agent = object.__new__(AIAgent)
# Minimal attributes used by _build_api_kwargs
agent.api_mode = "anthropic_messages"
agent.model = "claude-opus-4-6"
agent.tools = []
agent.max_tokens = None
agent.reasoning_config = None
agent._is_anthropic_oauth = False
agent._ephemeral_max_output_tokens = None
compressor = MagicMock()
compressor.context_length = 200_000
agent.context_compressor = compressor
# Stub out the internal message-preparation helper
agent._prepare_anthropic_messages_for_api = MagicMock(
return_value=[{"role": "user", "content": "hi"}]
)
agent._anthropic_preserve_dots = MagicMock(return_value=False)
return agent
def test_ephemeral_override_is_used_on_first_call(self):
"""When _ephemeral_max_output_tokens is set, it overrides self.max_tokens."""
agent = self._make_agent()
agent._ephemeral_max_output_tokens = 5_000
kwargs = agent._build_api_kwargs([{"role": "user", "content": "hi"}])
assert kwargs["max_tokens"] == 5_000
def test_ephemeral_override_is_consumed_after_one_call(self):
"""After one call the ephemeral override is cleared to None."""
agent = self._make_agent()
agent._ephemeral_max_output_tokens = 5_000
agent._build_api_kwargs([{"role": "user", "content": "hi"}])
assert agent._ephemeral_max_output_tokens is None
def test_subsequent_call_uses_self_max_tokens(self):
"""A second _build_api_kwargs call uses the normal max_tokens path."""
agent = self._make_agent()
agent._ephemeral_max_output_tokens = 5_000
agent.max_tokens = None # will resolve to native ceiling (128K for Opus 4.6)
agent._build_api_kwargs([{"role": "user", "content": "hi"}])
# Second call — ephemeral is gone
kwargs2 = agent._build_api_kwargs([{"role": "user", "content": "hi"}])
assert kwargs2["max_tokens"] == 128_000 # Opus 4.6 native ceiling
def test_no_ephemeral_uses_self_max_tokens_directly(self):
"""Without an ephemeral override, self.max_tokens is used normally."""
agent = self._make_agent()
agent.max_tokens = 8_192
kwargs = agent._build_api_kwargs([{"role": "user", "content": "hi"}])
assert kwargs["max_tokens"] == 8_192
# ---------------------------------------------------------------------------
# Integration: error handler does NOT halve context_length for output-cap errors
# ---------------------------------------------------------------------------
class TestContextNotHalvedOnOutputCapError:
"""When the API returns 'max_tokens too large given prompt', the handler
must set _ephemeral_max_output_tokens and NOT modify context_length.
"""
def _make_agent_with_compressor(self, context_length=200_000):
from run_agent import AIAgent
from agent.context_compressor import ContextCompressor
agent = object.__new__(AIAgent)
agent.api_mode = "anthropic_messages"
agent.model = "claude-opus-4-6"
agent.base_url = "https://api.anthropic.com"
agent.tools = []
agent.max_tokens = None
agent.reasoning_config = None
agent._is_anthropic_oauth = False
agent._ephemeral_max_output_tokens = None
agent.log_prefix = ""
agent.quiet_mode = True
agent.verbose_logging = False
compressor = MagicMock(spec=ContextCompressor)
compressor.context_length = context_length
compressor.threshold_percent = 0.75
agent.context_compressor = compressor
agent._prepare_anthropic_messages_for_api = MagicMock(
return_value=[{"role": "user", "content": "hi"}]
)
agent._anthropic_preserve_dots = MagicMock(return_value=False)
agent._vprint = MagicMock()
return agent
def test_output_cap_error_sets_ephemeral_not_context_length(self):
"""On 'max_tokens too large' error, _ephemeral_max_output_tokens is set
and compressor.context_length is left unchanged."""
from agent.model_metadata import parse_available_output_tokens_from_error
from agent.model_metadata import get_next_probe_tier
error_msg = (
"max_tokens: 128000 > context_window: 200000 "
"- input_tokens: 180000 = available_tokens: 20000"
)
# Simulate the handler logic from run_agent.py
agent = self._make_agent_with_compressor(context_length=200_000)
old_ctx = agent.context_compressor.context_length
available_out = parse_available_output_tokens_from_error(error_msg)
assert available_out == 20_000, "parser must detect the error"
# The fix: set ephemeral, skip context_length modification
agent._ephemeral_max_output_tokens = max(1, available_out - 64)
# context_length must be untouched
assert agent.context_compressor.context_length == old_ctx
assert agent._ephemeral_max_output_tokens == 19_936
def test_prompt_too_long_still_triggers_probe_tier(self):
"""Genuine prompt-too-long errors must still use get_next_probe_tier."""
from agent.model_metadata import parse_available_output_tokens_from_error
from agent.model_metadata import get_next_probe_tier
error_msg = "prompt is too long: 205000 tokens > 200000 maximum"
available_out = parse_available_output_tokens_from_error(error_msg)
assert available_out is None, "prompt-too-long must not be caught by output-cap parser"
# The old halving path is still used for this class of error
new_ctx = get_next_probe_tier(200_000)
assert new_ctx == 128_000
def test_output_cap_error_safety_margin(self):
"""The ephemeral value includes a 64-token safety margin below available_out."""
from agent.model_metadata import parse_available_output_tokens_from_error
error_msg = (
"max_tokens: 32768 > context_window: 200000 "
"- input_tokens: 190000 = available_tokens: 10000"
)
available_out = parse_available_output_tokens_from_error(error_msg)
safe_out = max(1, available_out - 64)
assert safe_out == 9_936
def test_safety_margin_never_goes_below_one(self):
"""When available_out is very small, safe_out must be at least 1."""
from agent.model_metadata import parse_available_output_tokens_from_error
error_msg = (
"max_tokens: 10 > context_window: 200000 "
"- input_tokens: 199990 = available_tokens: 1"
)
available_out = parse_available_output_tokens_from_error(error_msg)
safe_out = max(1, available_out - 64)
assert safe_out == 1

View file

@ -2,6 +2,7 @@
import logging
import os
import stat
from logging.handlers import RotatingFileHandler
from pathlib import Path
from unittest.mock import patch
@ -300,6 +301,59 @@ class TestAddRotatingHandler:
logger.removeHandler(h)
h.close()
def test_managed_mode_initial_open_sets_group_writable(self, tmp_path):
log_path = tmp_path / "managed-open.log"
logger = logging.getLogger("_test_rotating_managed_open")
formatter = logging.Formatter("%(message)s")
old_umask = os.umask(0o022)
try:
with patch("hermes_cli.config.is_managed", return_value=True):
hermes_logging._add_rotating_handler(
logger, log_path,
level=logging.INFO, max_bytes=1024, backup_count=1,
formatter=formatter,
)
finally:
os.umask(old_umask)
assert log_path.exists()
assert stat.S_IMODE(log_path.stat().st_mode) == 0o660
for h in list(logger.handlers):
if isinstance(h, RotatingFileHandler):
logger.removeHandler(h)
h.close()
def test_managed_mode_rollover_sets_group_writable(self, tmp_path):
log_path = tmp_path / "managed-rollover.log"
logger = logging.getLogger("_test_rotating_managed_rollover")
formatter = logging.Formatter("%(message)s")
old_umask = os.umask(0o022)
try:
with patch("hermes_cli.config.is_managed", return_value=True):
hermes_logging._add_rotating_handler(
logger, log_path,
level=logging.INFO, max_bytes=1, backup_count=1,
formatter=formatter,
)
handler = next(
h for h in logger.handlers if isinstance(h, RotatingFileHandler)
)
logger.info("a" * 256)
handler.flush()
finally:
os.umask(old_umask)
assert log_path.exists()
assert stat.S_IMODE(log_path.stat().st_mode) == 0o660
for h in list(logger.handlers):
if isinstance(h, RotatingFileHandler):
logger.removeHandler(h)
h.close()
class TestReadLoggingConfig:
"""_read_logging_config() reads from config.yaml."""

View file

@ -63,4 +63,4 @@ class TestCamofoxConfigDefaults:
from hermes_cli.config import DEFAULT_CONFIG
# managed_persistence is auto-merged by _deep_merge, no version bump needed
assert DEFAULT_CONFIG["_config_version"] == 12
assert DEFAULT_CONFIG["_config_version"] == 13

View file

@ -13,6 +13,7 @@ from tools.browser_tool import (
_find_agent_browser,
_run_browser_command,
_SANE_PATH,
check_browser_requirements,
)
@ -149,6 +150,31 @@ class TestFindAgentBrowser:
_find_agent_browser()
class TestBrowserRequirements:
def test_termux_requires_real_agent_browser_install_not_npx_fallback(self, monkeypatch):
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.setattr("tools.browser_tool._is_camofox_mode", lambda: False)
monkeypatch.setattr("tools.browser_tool._get_cloud_provider", lambda: None)
monkeypatch.setattr("tools.browser_tool._find_agent_browser", lambda: "npx agent-browser")
assert check_browser_requirements() is False
class TestRunBrowserCommandTermuxFallback:
def test_termux_local_mode_rejects_bare_npx_fallback(self, monkeypatch):
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.setattr("tools.browser_tool._find_agent_browser", lambda: "npx agent-browser")
monkeypatch.setattr("tools.browser_tool._get_cloud_provider", lambda: None)
result = _run_browser_command("task-1", "navigate", ["https://example.com"])
assert result["success"] is False
assert "bare npx fallback" in result["error"]
assert "agent-browser install" in result["error"]
class TestRunBrowserCommandPathConstruction:
"""Verify _run_browser_command() includes Homebrew node dirs in subprocess PATH."""

View file

@ -44,6 +44,7 @@ from tools.code_execution_tool import (
build_execute_code_schema,
EXECUTE_CODE_SCHEMA,
_TOOL_DOC_LINES,
_execute_remote,
)
@ -115,6 +116,48 @@ class TestHermesToolsGeneration(unittest.TestCase):
self.assertIn("def retry(", src)
self.assertIn("import json, os, socket, shlex, time", src)
def test_file_transport_uses_tempfile_fallback_for_rpc_dir(self):
src = generate_hermes_tools_module(["terminal"], transport="file")
self.assertIn("import json, os, shlex, tempfile, time", src)
self.assertIn("os.path.join(tempfile.gettempdir(), \"hermes_rpc\")", src)
self.assertNotIn('os.environ.get("HERMES_RPC_DIR", "/tmp/hermes_rpc")', src)
class TestExecuteCodeRemoteTempDir(unittest.TestCase):
def test_execute_remote_uses_backend_temp_dir_for_sandbox(self):
class FakeEnv:
def __init__(self):
self.commands = []
def get_temp_dir(self):
return "/data/data/com.termux/files/usr/tmp"
def execute(self, command, cwd=None, timeout=None):
self.commands.append((command, cwd, timeout))
if "command -v python3" in command:
return {"output": "OK\n"}
if "python3 script.py" in command:
return {"output": "hello\n", "returncode": 0}
return {"output": ""}
env = FakeEnv()
fake_thread = MagicMock()
with patch("tools.code_execution_tool._load_config", return_value={"timeout": 30, "max_tool_calls": 5}), \
patch("tools.code_execution_tool._get_or_create_env", return_value=(env, "ssh")), \
patch("tools.code_execution_tool._ship_file_to_remote"), \
patch("tools.code_execution_tool.threading.Thread", return_value=fake_thread):
result = json.loads(_execute_remote("print('hello')", "task-1", ["terminal"]))
self.assertEqual(result["status"], "success")
mkdir_cmd = env.commands[1][0]
run_cmd = next(cmd for cmd, _, _ in env.commands if "python3 script.py" in cmd)
cleanup_cmd = env.commands[-1][0]
self.assertIn("mkdir -p /data/data/com.termux/files/usr/tmp/hermes_exec_", mkdir_cmd)
self.assertIn("HERMES_RPC_DIR=/data/data/com.termux/files/usr/tmp/hermes_exec_", run_cmd)
self.assertIn("rm -rf /data/data/com.termux/files/usr/tmp/hermes_exec_", cleanup_cmd)
self.assertNotIn("mkdir -p /tmp/hermes_exec_", mkdir_cmd)
@unittest.skipIf(sys.platform == "win32", "UDS not available on Windows")
class TestExecuteCode(unittest.TestCase):

View file

@ -258,28 +258,30 @@ def _make_execute_only_env(forward_env=None):
def test_init_env_args_uses_hermes_dotenv_for_allowlisted_env(monkeypatch):
"""_build_init_env_args picks up forwarded env vars from .env file at init time."""
env = _make_execute_only_env(["GITHUB_TOKEN"])
# Use a var that is NOT in _HERMES_PROVIDER_ENV_BLOCKLIST (GITHUB_TOKEN
# is in the copilot provider's api_key_env_vars and gets stripped).
env = _make_execute_only_env(["DATABASE_URL"])
monkeypatch.delenv("GITHUB_TOKEN", raising=False)
monkeypatch.setattr(docker_env, "_load_hermes_env_vars", lambda: {"GITHUB_TOKEN": "value_from_dotenv"})
monkeypatch.delenv("DATABASE_URL", raising=False)
monkeypatch.setattr(docker_env, "_load_hermes_env_vars", lambda: {"DATABASE_URL": "value_from_dotenv"})
args = env._build_init_env_args()
args_str = " ".join(args)
assert "GITHUB_TOKEN=value_from_dotenv" in args_str
assert "DATABASE_URL=value_from_dotenv" in args_str
def test_init_env_args_prefers_shell_env_over_hermes_dotenv(monkeypatch):
"""Shell env vars take priority over .env file values in init env args."""
env = _make_execute_only_env(["GITHUB_TOKEN"])
env = _make_execute_only_env(["DATABASE_URL"])
monkeypatch.setenv("GITHUB_TOKEN", "value_from_shell")
monkeypatch.setattr(docker_env, "_load_hermes_env_vars", lambda: {"GITHUB_TOKEN": "value_from_dotenv"})
monkeypatch.setenv("DATABASE_URL", "value_from_shell")
monkeypatch.setattr(docker_env, "_load_hermes_env_vars", lambda: {"DATABASE_URL": "value_from_dotenv"})
args = env._build_init_env_args()
args_str = " ".join(args)
assert "GITHUB_TOKEN=value_from_shell" in args_str
assert "DATABASE_URL=value_from_shell" in args_str
assert "value_from_dotenv" not in args_str

View file

@ -0,0 +1,51 @@
from unittest.mock import patch
from tools.environments.local import LocalEnvironment
class TestLocalTempDir:
def test_uses_os_tmpdir_for_session_artifacts(self, monkeypatch):
monkeypatch.setenv("TMPDIR", "/data/data/com.termux/files/usr/tmp")
monkeypatch.delenv("TMP", raising=False)
monkeypatch.delenv("TEMP", raising=False)
with patch.object(LocalEnvironment, "init_session", autospec=True, return_value=None):
env = LocalEnvironment(cwd=".", timeout=10)
assert env.get_temp_dir() == "/data/data/com.termux/files/usr/tmp"
assert env._snapshot_path == f"/data/data/com.termux/files/usr/tmp/hermes-snap-{env._session_id}.sh"
assert env._cwd_file == f"/data/data/com.termux/files/usr/tmp/hermes-cwd-{env._session_id}.txt"
def test_prefers_backend_env_tmpdir_override(self, monkeypatch):
monkeypatch.delenv("TMPDIR", raising=False)
monkeypatch.delenv("TMP", raising=False)
monkeypatch.delenv("TEMP", raising=False)
with patch.object(LocalEnvironment, "init_session", autospec=True, return_value=None):
env = LocalEnvironment(
cwd=".",
timeout=10,
env={"TMPDIR": "/data/data/com.termux/files/home/.cache/hermes-tmp/"},
)
assert env.get_temp_dir() == "/data/data/com.termux/files/home/.cache/hermes-tmp"
assert env._snapshot_path == (
f"/data/data/com.termux/files/home/.cache/hermes-tmp/hermes-snap-{env._session_id}.sh"
)
assert env._cwd_file == (
f"/data/data/com.termux/files/home/.cache/hermes-tmp/hermes-cwd-{env._session_id}.txt"
)
def test_falls_back_to_tempfile_when_tmp_missing(self, monkeypatch):
monkeypatch.delenv("TMPDIR", raising=False)
monkeypatch.delenv("TMP", raising=False)
monkeypatch.delenv("TEMP", raising=False)
with patch("tools.environments.local.os.path.isdir", return_value=False), \
patch("tools.environments.local.os.access", return_value=False), \
patch("tools.environments.local.tempfile.gettempdir", return_value="/cache/tmp"), \
patch.object(LocalEnvironment, "init_session", autospec=True, return_value=None):
env = LocalEnvironment(cwd=".", timeout=10)
assert env.get_temp_dir() == "/cache/tmp"
assert env._snapshot_path == f"/cache/tmp/hermes-snap-{env._session_id}.sh"
assert env._cwd_file == f"/cache/tmp/hermes-cwd-{env._session_id}.txt"

View file

@ -147,7 +147,7 @@ class TestBaseEnvCompatibility:
"""Hermes wires parser selection through ServerManager.tool_parser."""
import ast
base_env_path = Path(__file__).parent.parent / "environments" / "hermes_base_env.py"
base_env_path = Path(__file__).parent.parent.parent / "environments" / "hermes_base_env.py"
source = base_env_path.read_text()
tree = ast.parse(source)
@ -171,7 +171,7 @@ class TestBaseEnvCompatibility:
def test_hermes_base_env_uses_config_tool_call_parser(self):
"""Verify hermes_base_env uses the config field rather than a local parser instance."""
base_env_path = Path(__file__).parent.parent / "environments" / "hermes_base_env.py"
base_env_path = Path(__file__).parent.parent.parent / "environments" / "hermes_base_env.py"
source = base_env_path.read_text()
assert 'tool_call_parser: str = Field(' in source

View file

@ -135,6 +135,64 @@ class TestReadLog:
assert "5 lines" in result["showing"]
# =========================================================================
# Stdin helpers
# =========================================================================
class TestStdinHelpers:
def test_close_stdin_not_found(self, registry):
result = registry.close_stdin("nonexistent")
assert result["status"] == "not_found"
def test_close_stdin_pipe_mode(self, registry):
proc = MagicMock()
proc.stdin = MagicMock()
s = _make_session()
s.process = proc
registry._running[s.id] = s
result = registry.close_stdin(s.id)
proc.stdin.close.assert_called_once()
assert result["status"] == "ok"
def test_close_stdin_pty_mode(self, registry):
pty = MagicMock()
s = _make_session()
s._pty = pty
registry._running[s.id] = s
result = registry.close_stdin(s.id)
pty.sendeof.assert_called_once()
assert result["status"] == "ok"
def test_close_stdin_allows_eof_driven_process_to_finish(self, registry, tmp_path):
session = registry.spawn_local(
'python3 -c "import sys; print(sys.stdin.read().strip())"',
cwd=str(tmp_path),
use_pty=False,
)
try:
time.sleep(0.5)
assert registry.submit_stdin(session.id, "hello")["status"] == "ok"
assert registry.close_stdin(session.id)["status"] == "ok"
deadline = time.time() + 5
while time.time() < deadline:
poll = registry.poll(session.id)
if poll["status"] == "exited":
assert poll["exit_code"] == 0
assert "hello" in poll["output_preview"]
return
time.sleep(0.2)
pytest.fail("process did not exit after stdin was closed")
finally:
registry.kill_process(session.id)
# =========================================================================
# List sessions
# =========================================================================
@ -282,6 +340,67 @@ class TestSpawnEnvSanitization:
assert f"{_HERMES_PROVIDER_ENV_FORCE_PREFIX}TELEGRAM_BOT_TOKEN" not in env
assert env["PYTHONUNBUFFERED"] == "1"
def test_spawn_via_env_uses_backend_temp_dir_for_artifacts(self, registry):
class FakeEnv:
def __init__(self):
self.commands = []
def get_temp_dir(self):
return "/data/data/com.termux/files/usr/tmp"
def execute(self, command, timeout=None):
self.commands.append((command, timeout))
return {"output": "4321\n"}
env = FakeEnv()
fake_thread = MagicMock()
with patch("tools.process_registry.threading.Thread", return_value=fake_thread), \
patch.object(registry, "_write_checkpoint"):
session = registry.spawn_via_env(env, "echo hello")
bg_command = env.commands[0][0]
assert session.pid == 4321
assert "/data/data/com.termux/files/usr/tmp/hermes_bg_" in bg_command
assert ".exit" in bg_command
assert "rc=$?;" in bg_command
assert " > /tmp/hermes_bg_" not in bg_command
assert "cat /tmp/hermes_bg_" not in bg_command
fake_thread.start.assert_called_once()
def test_env_poller_quotes_temp_paths_with_spaces(self, registry):
session = _make_session(sid="proc_space")
session.exited = False
class FakeEnv:
def __init__(self):
self.commands = []
self._responses = iter([
{"output": "hello\n"},
{"output": "1\n"},
{"output": "0\n"},
])
def execute(self, command, timeout=None):
self.commands.append((command, timeout))
return next(self._responses)
env = FakeEnv()
with patch("tools.process_registry.time.sleep", return_value=None), \
patch.object(registry, "_move_to_finished"):
registry._env_poller_loop(
session,
env,
"/path with spaces/hermes_bg.log",
"/path with spaces/hermes_bg.pid",
"/path with spaces/hermes_bg.exit",
)
assert env.commands[0][0] == "cat '/path with spaces/hermes_bg.log' 2>/dev/null"
assert env.commands[1][0] == "kill -0 \"$(cat '/path with spaces/hermes_bg.pid' 2>/dev/null)\" 2>/dev/null; echo $?"
assert env.commands[2][0] == "cat '/path with spaces/hermes_bg.exit' 2>/dev/null"
# =========================================================================
# Checkpoint

View file

@ -125,7 +125,9 @@ class TestSendMatrix:
url = call_kwargs[0][0]
assert url.startswith("https://matrix.example.com/_matrix/client/v3/rooms/!room:example.com/send/m.room.message/")
assert call_kwargs[1]["headers"]["Authorization"] == "Bearer syt_tok"
assert call_kwargs[1]["json"] == {"msgtype": "m.text", "body": "hello matrix"}
payload = call_kwargs[1]["json"]
assert payload["msgtype"] == "m.text"
assert payload["body"] == "hello matrix"
def test_http_error(self):
resp = _make_aiohttp_resp(403, text_data="Forbidden")

View file

@ -32,6 +32,30 @@ def _install_telegram_mock(monkeypatch, bot):
monkeypatch.setitem(sys.modules, "telegram.constants", constants_mod)
def _ensure_slack_mock(monkeypatch):
if "slack_bolt" in sys.modules and hasattr(sys.modules["slack_bolt"], "__file__"):
return
slack_bolt = MagicMock()
slack_bolt.async_app.AsyncApp = MagicMock
slack_bolt.adapter.socket_mode.async_handler.AsyncSocketModeHandler = MagicMock
slack_sdk = MagicMock()
slack_sdk.web.async_client.AsyncWebClient = MagicMock
for name, mod in [
("slack_bolt", slack_bolt),
("slack_bolt.async_app", slack_bolt.async_app),
("slack_bolt.adapter", slack_bolt.adapter),
("slack_bolt.adapter.socket_mode", slack_bolt.adapter.socket_mode),
("slack_bolt.adapter.socket_mode.async_handler", slack_bolt.adapter.socket_mode.async_handler),
("slack_sdk", slack_sdk),
("slack_sdk.web", slack_sdk.web),
("slack_sdk.web.async_client", slack_sdk.web.async_client),
]:
monkeypatch.setitem(sys.modules, name, mod)
class TestSendMessageTool:
def test_cron_duplicate_target_is_skipped_and_explained(self):
home = SimpleNamespace(chat_id="-1001")
@ -426,7 +450,7 @@ class TestSendToPlatformChunking:
result = asyncio.run(
_send_to_platform(
Platform.DISCORD,
SimpleNamespace(enabled=True, token="tok", extra={}),
SimpleNamespace(enabled=True, token="***", extra={}),
"ch", long_msg,
)
)
@ -435,8 +459,115 @@ class TestSendToPlatformChunking:
for call in send.await_args_list:
assert len(call.args[2]) <= 2020 # each chunk fits the limit
def test_slack_messages_are_formatted_before_send(self, monkeypatch):
_ensure_slack_mock(monkeypatch)
import gateway.platforms.slack as slack_mod
monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True)
send = AsyncMock(return_value={"success": True, "message_id": "1"})
with patch("tools.send_message_tool._send_slack", send):
result = asyncio.run(
_send_to_platform(
Platform.SLACK,
SimpleNamespace(enabled=True, token="***", extra={}),
"C123",
"**hello** from [Hermes](<https://example.com>)",
)
)
assert result["success"] is True
send.assert_awaited_once_with(
"***",
"C123",
"*hello* from <https://example.com|Hermes>",
)
def test_slack_bold_italic_formatted_before_send(self, monkeypatch):
"""Bold+italic ***text*** survives tool-layer formatting."""
_ensure_slack_mock(monkeypatch)
import gateway.platforms.slack as slack_mod
monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True)
send = AsyncMock(return_value={"success": True, "message_id": "1"})
with patch("tools.send_message_tool._send_slack", send):
result = asyncio.run(
_send_to_platform(
Platform.SLACK,
SimpleNamespace(enabled=True, token="***", extra={}),
"C123",
"***important*** update",
)
)
assert result["success"] is True
sent_text = send.await_args.args[2]
assert "*_important_*" in sent_text
def test_slack_blockquote_formatted_before_send(self, monkeypatch):
"""Blockquote '>' markers must survive formatting (not escaped to '&gt;')."""
_ensure_slack_mock(monkeypatch)
import gateway.platforms.slack as slack_mod
monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True)
send = AsyncMock(return_value={"success": True, "message_id": "1"})
with patch("tools.send_message_tool._send_slack", send):
result = asyncio.run(
_send_to_platform(
Platform.SLACK,
SimpleNamespace(enabled=True, token="***", extra={}),
"C123",
"> important quote\n\nnormal text & stuff",
)
)
assert result["success"] is True
sent_text = send.await_args.args[2]
assert sent_text.startswith("> important quote")
assert "&amp;" in sent_text # & is escaped
assert "&gt;" not in sent_text.split("\n")[0] # > in blockquote is NOT escaped
def test_slack_pre_escaped_entities_not_double_escaped(self, monkeypatch):
"""Pre-escaped HTML entities survive tool-layer formatting without double-escaping."""
_ensure_slack_mock(monkeypatch)
import gateway.platforms.slack as slack_mod
monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True)
send = AsyncMock(return_value={"success": True, "message_id": "1"})
with patch("tools.send_message_tool._send_slack", send):
result = asyncio.run(
_send_to_platform(
Platform.SLACK,
SimpleNamespace(enabled=True, token="***", extra={}),
"C123",
"AT&amp;T &lt;tag&gt; test",
)
)
assert result["success"] is True
sent_text = send.await_args.args[2]
assert "&amp;amp;" not in sent_text
assert "&amp;lt;" not in sent_text
assert "AT&amp;T" in sent_text
def test_slack_url_with_parens_formatted_before_send(self, monkeypatch):
"""Wikipedia-style URL with parens survives tool-layer formatting."""
_ensure_slack_mock(monkeypatch)
import gateway.platforms.slack as slack_mod
monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True)
send = AsyncMock(return_value={"success": True, "message_id": "1"})
with patch("tools.send_message_tool._send_slack", send):
result = asyncio.run(
_send_to_platform(
Platform.SLACK,
SimpleNamespace(enabled=True, token="***", extra={}),
"C123",
"See [Foo](https://en.wikipedia.org/wiki/Foo_(bar))",
)
)
assert result["success"] is True
sent_text = send.await_args.args[2]
assert "<https://en.wikipedia.org/wiki/Foo_(bar)|Foo>" in sent_text
def test_telegram_media_attaches_to_last_chunk(self):
"""When chunked, media files are sent only with the last chunk."""
sent_calls = []
async def fake_send(token, chat_id, message, media_files=None, thread_id=None):

View file

@ -0,0 +1,91 @@
import json
from types import SimpleNamespace
import tools.terminal_tool as terminal_tool_module
from tools import process_registry as process_registry_module
def _base_config(tmp_path):
return {
"env_type": "local",
"docker_image": "",
"singularity_image": "",
"modal_image": "",
"daytona_image": "",
"cwd": str(tmp_path),
"timeout": 30,
}
def test_command_requires_pipe_stdin_detects_gh_with_token():
assert terminal_tool_module._command_requires_pipe_stdin(
"gh auth login --hostname github.com --git-protocol https --with-token"
) is True
assert terminal_tool_module._command_requires_pipe_stdin(
"gh auth login --web"
) is False
def test_terminal_background_disables_pty_for_gh_with_token(monkeypatch, tmp_path):
config = _base_config(tmp_path)
dummy_env = SimpleNamespace(env={})
captured = {}
def fake_spawn_local(**kwargs):
captured.update(kwargs)
return SimpleNamespace(id="proc_test", pid=1234, notify_on_complete=False)
monkeypatch.setattr(terminal_tool_module, "_get_env_config", lambda: config)
monkeypatch.setattr(terminal_tool_module, "_start_cleanup_thread", lambda: None)
monkeypatch.setattr(terminal_tool_module, "_check_all_guards", lambda *_args, **_kwargs: {"approved": True})
monkeypatch.setattr(process_registry_module.process_registry, "spawn_local", fake_spawn_local)
monkeypatch.setitem(terminal_tool_module._active_environments, "default", dummy_env)
monkeypatch.setitem(terminal_tool_module._last_activity, "default", 0.0)
try:
result = json.loads(
terminal_tool_module.terminal_tool(
command="gh auth login --hostname github.com --git-protocol https --with-token",
background=True,
pty=True,
)
)
finally:
terminal_tool_module._active_environments.pop("default", None)
terminal_tool_module._last_activity.pop("default", None)
assert captured["use_pty"] is False
assert result["session_id"] == "proc_test"
assert "PTY disabled" in result["pty_note"]
def test_terminal_background_keeps_pty_for_regular_interactive_commands(monkeypatch, tmp_path):
config = _base_config(tmp_path)
dummy_env = SimpleNamespace(env={})
captured = {}
def fake_spawn_local(**kwargs):
captured.update(kwargs)
return SimpleNamespace(id="proc_test", pid=1234, notify_on_complete=False)
monkeypatch.setattr(terminal_tool_module, "_get_env_config", lambda: config)
monkeypatch.setattr(terminal_tool_module, "_start_cleanup_thread", lambda: None)
monkeypatch.setattr(terminal_tool_module, "_check_all_guards", lambda *_args, **_kwargs: {"approved": True})
monkeypatch.setattr(process_registry_module.process_registry, "spawn_local", fake_spawn_local)
monkeypatch.setitem(terminal_tool_module._active_environments, "default", dummy_env)
monkeypatch.setitem(terminal_tool_module._last_activity, "default", 0.0)
try:
result = json.loads(
terminal_tool_module.terminal_tool(
command="python3 -c \"print(input())\"",
background=True,
pty=True,
)
)
finally:
terminal_tool_module._active_environments.pop("default", None)
terminal_tool_module._last_activity.pop("default", None)
assert captured["use_pty"] is True
assert "pty_note" not in result

View file

@ -16,6 +16,7 @@ from tools.tool_result_storage import (
STORAGE_DIR,
_build_persisted_message,
_heredoc_marker,
_resolve_storage_dir,
_write_to_sandbox,
enforce_turn_budget,
generate_preview,
@ -115,6 +116,24 @@ class TestWriteToSandbox:
_write_to_sandbox("content", "/tmp/hermes-results/abc.txt", env)
assert env.execute.call_args[1]["timeout"] == 30
def test_uses_parent_dir_of_remote_path(self):
env = MagicMock()
env.execute.return_value = {"output": "", "returncode": 0}
remote_path = "/data/data/com.termux/files/usr/tmp/hermes-results/abc.txt"
_write_to_sandbox("content", remote_path, env)
cmd = env.execute.call_args[0][0]
assert "mkdir -p /data/data/com.termux/files/usr/tmp/hermes-results" in cmd
class TestResolveStorageDir:
def test_defaults_to_storage_dir_without_env(self):
assert _resolve_storage_dir(None) == STORAGE_DIR
def test_uses_env_temp_dir_when_available(self):
env = MagicMock()
env.get_temp_dir.return_value = "/data/data/com.termux/files/usr/tmp"
assert _resolve_storage_dir(env) == "/data/data/com.termux/files/usr/tmp/hermes-results"
# ── _build_persisted_message ──────────────────────────────────────────
@ -341,6 +360,22 @@ class TestMaybePersistToolResult:
)
assert "DISTINCTIVE_START_MARKER" in result
def test_env_temp_dir_changes_persisted_path(self):
env = MagicMock()
env.execute.return_value = {"output": "", "returncode": 0}
env.get_temp_dir.return_value = "/data/data/com.termux/files/usr/tmp"
content = "x" * 60_000
result = maybe_persist_tool_result(
content=content,
tool_name="terminal",
tool_use_id="tc_termux",
env=env,
threshold=30_000,
)
assert "/data/data/com.termux/files/usr/tmp/hermes-results/tc_termux.txt" in result
cmd = env.execute.call_args[0][0]
assert "mkdir -p /data/data/com.termux/files/usr/tmp/hermes-results" in cmd
def test_threshold_zero_forces_persist(self):
env = MagicMock()
env.execute.return_value = {"output": "", "returncode": 0}

View file

@ -30,7 +30,10 @@ class TestValidateImageUrl:
"""Tests for URL validation, including urlparse-based netloc check."""
def test_valid_https_url(self):
assert _validate_image_url("https://example.com/image.jpg") is True
with patch("tools.url_safety.socket.getaddrinfo", return_value=[
(2, 1, 6, "", ("93.184.216.34", 0)),
]):
assert _validate_image_url("https://example.com/image.jpg") is True
def test_valid_http_url(self):
with patch("tools.url_safety.socket.getaddrinfo", return_value=[
@ -56,10 +59,16 @@ class TestValidateImageUrl:
assert _validate_image_url("http://localhost:8080/image.png") is False
def test_valid_url_with_port(self):
assert _validate_image_url("http://example.com:8080/image.png") is True
with patch("tools.url_safety.socket.getaddrinfo", return_value=[
(2, 1, 6, "", ("93.184.216.34", 0)),
]):
assert _validate_image_url("http://example.com:8080/image.png") is True
def test_valid_url_with_path_only(self):
assert _validate_image_url("https://example.com/") is True
with patch("tools.url_safety.socket.getaddrinfo", return_value=[
(2, 1, 6, "", ("93.184.216.34", 0)),
]):
assert _validate_image_url("https://example.com/") is True
def test_rejects_empty_string(self):
assert _validate_image_url("") is False
@ -441,6 +450,11 @@ class TestVisionRequirements:
(tmp_path / "auth.json").write_text(
'{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token":"codex-access-token","refresh_token":"codex-refresh-token"}}}}'
)
# config.yaml must reference the codex provider so vision auto-detect
# falls back to the active provider via _read_main_provider().
(tmp_path / "config.yaml").write_text(
'model:\n default: gpt-4o\n provider: openai-codex\n'
)
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
monkeypatch.delenv("OPENAI_BASE_URL", raising=False)
monkeypatch.delenv("OPENAI_API_KEY", raising=False)

View file

@ -183,12 +183,77 @@ class TestDetectAudioEnvironment:
assert result["available"] is False
assert any("PortAudio" in w for w in result["warnings"])
def test_termux_import_error_shows_termux_install_guidance(self, monkeypatch):
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.delenv("SSH_CLIENT", raising=False)
monkeypatch.delenv("SSH_TTY", raising=False)
monkeypatch.delenv("SSH_CONNECTION", raising=False)
monkeypatch.setattr("tools.voice_mode._import_audio", lambda: (_ for _ in ()).throw(ImportError("no audio libs")))
monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: None)
from tools.voice_mode import detect_audio_environment
result = detect_audio_environment()
assert result["available"] is False
assert any("pkg install python-numpy portaudio" in w for w in result["warnings"])
assert any("python -m pip install sounddevice" in w for w in result["warnings"])
def test_termux_api_package_without_android_app_blocks_voice(self, monkeypatch):
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.delenv("SSH_CLIENT", raising=False)
monkeypatch.delenv("SSH_TTY", raising=False)
monkeypatch.delenv("SSH_CONNECTION", raising=False)
monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record")
monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: False)
monkeypatch.setattr("tools.voice_mode._import_audio", lambda: (_ for _ in ()).throw(ImportError("no audio libs")))
from tools.voice_mode import detect_audio_environment
result = detect_audio_environment()
assert result["available"] is False
assert any("Termux:API Android app is not installed" in w for w in result["warnings"])
def test_termux_api_microphone_allows_voice_without_sounddevice(self, monkeypatch):
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.delenv("SSH_CLIENT", raising=False)
monkeypatch.delenv("SSH_TTY", raising=False)
monkeypatch.delenv("SSH_CONNECTION", raising=False)
monkeypatch.setattr("tools.voice_mode.shutil.which", lambda cmd: "/data/data/com.termux/files/usr/bin/termux-microphone-record" if cmd == "termux-microphone-record" else None)
monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True)
monkeypatch.setattr("tools.voice_mode._import_audio", lambda: (_ for _ in ()).throw(ImportError("no audio libs")))
from tools.voice_mode import detect_audio_environment
result = detect_audio_environment()
assert result["available"] is True
assert any("Termux:API microphone recording available" in n for n in result.get("notices", []))
assert result["warnings"] == []
# ============================================================================
# check_voice_requirements
# ============================================================================
class TestCheckVoiceRequirements:
def test_termux_api_capture_counts_as_audio_available(self, monkeypatch):
monkeypatch.setattr("tools.voice_mode._audio_available", lambda: False)
monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record")
monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True)
monkeypatch.setattr("tools.voice_mode.detect_audio_environment", lambda: {"available": True, "warnings": [], "notices": ["Termux:API microphone recording available"]})
monkeypatch.setattr("tools.transcription_tools._get_provider", lambda cfg: "openai")
from tools.voice_mode import check_voice_requirements
result = check_voice_requirements()
assert result["available"] is True
assert result["audio_available"] is True
assert result["missing_packages"] == []
assert "Termux:API microphone" in result["details"]
def test_all_requirements_met(self, monkeypatch):
monkeypatch.setattr("tools.voice_mode._audio_available", lambda: True)
monkeypatch.setattr("tools.voice_mode.detect_audio_environment",
@ -235,8 +300,85 @@ class TestCheckVoiceRequirements:
# AudioRecorder
# ============================================================================
class TestAudioRecorderStart:
def test_start_raises_without_audio(self, monkeypatch):
class TestCreateAudioRecorder:
def test_termux_uses_termux_audio_recorder_when_api_present(self, monkeypatch):
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record")
monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True)
from tools.voice_mode import create_audio_recorder, TermuxAudioRecorder
recorder = create_audio_recorder()
assert isinstance(recorder, TermuxAudioRecorder)
assert recorder.supports_silence_autostop is False
def test_termux_without_android_app_falls_back_to_audio_recorder(self, monkeypatch):
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record")
monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: False)
from tools.voice_mode import create_audio_recorder, AudioRecorder
recorder = create_audio_recorder()
assert isinstance(recorder, AudioRecorder)
class TestTermuxAudioRecorder:
def test_start_and_stop_use_termux_microphone_commands(self, monkeypatch, temp_voice_dir):
command_calls = []
output_path = Path(temp_voice_dir) / "recording_20260409_120000.aac"
def fake_run(cmd, **kwargs):
command_calls.append(cmd)
if cmd[1] == "-f":
Path(cmd[2]).write_bytes(b"aac-bytes")
return MagicMock(returncode=0, stdout="", stderr="")
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record")
monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True)
monkeypatch.setattr("tools.voice_mode.time.strftime", lambda fmt: "20260409_120000")
monkeypatch.setattr("tools.voice_mode.subprocess.run", fake_run)
from tools.voice_mode import TermuxAudioRecorder
recorder = TermuxAudioRecorder()
recorder.start()
recorder._start_time = time.monotonic() - 1.0
result = recorder.stop()
assert result == str(output_path)
assert command_calls[0][:2] == ["/data/data/com.termux/files/usr/bin/termux-microphone-record", "-f"]
assert command_calls[1] == ["/data/data/com.termux/files/usr/bin/termux-microphone-record", "-q"]
def test_cancel_removes_partial_termux_recording(self, monkeypatch, temp_voice_dir):
output_path = Path(temp_voice_dir) / "recording_20260409_120000.aac"
def fake_run(cmd, **kwargs):
if cmd[1] == "-f":
Path(cmd[2]).write_bytes(b"aac-bytes")
return MagicMock(returncode=0, stdout="", stderr="")
monkeypatch.setenv("TERMUX_VERSION", "0.118.3")
monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr")
monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record")
monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True)
monkeypatch.setattr("tools.voice_mode.time.strftime", lambda fmt: "20260409_120000")
monkeypatch.setattr("tools.voice_mode.subprocess.run", fake_run)
from tools.voice_mode import TermuxAudioRecorder
recorder = TermuxAudioRecorder()
recorder.start()
recorder.cancel()
assert output_path.exists() is False
assert recorder.is_recording is False
class TestAudioRecorder:
def test_start_raises_without_audio_libs(self, monkeypatch):
def _fail_import():
raise ImportError("no sounddevice")
monkeypatch.setattr("tools.voice_mode._import_audio", _fail_import)

View file

@ -225,6 +225,7 @@ class TestWebCrawlTavily:
patch.dict(os.environ, {"TAVILY_API_KEY": "tvly-test"}), \
patch("tools.web_tools.httpx.post", return_value=mock_response), \
patch("tools.web_tools.check_website_access", return_value=None), \
patch("tools.web_tools.is_safe_url", return_value=True), \
patch("tools.interrupt.is_interrupted", return_value=False):
from tools.web_tools import web_crawl_tool
result = json.loads(asyncio.get_event_loop().run_until_complete(
@ -244,6 +245,7 @@ class TestWebCrawlTavily:
patch.dict(os.environ, {"TAVILY_API_KEY": "tvly-test"}), \
patch("tools.web_tools.httpx.post", return_value=mock_response) as mock_post, \
patch("tools.web_tools.check_website_access", return_value=None), \
patch("tools.web_tools.is_safe_url", return_value=True), \
patch("tools.interrupt.is_interrupted", return_value=False):
from tools.web_tools import web_crawl_tool
asyncio.get_event_loop().run_until_complete(

View file

@ -594,13 +594,20 @@ def camofox_console(clear: bool = False, task_id: Optional[str] = None) -> str:
# ---------------------------------------------------------------------------
def cleanup_all_camofox_sessions() -> None:
"""Close all active camofox sessions."""
"""Close all active camofox sessions.
When managed persistence is enabled, only clears local tracking state
without destroying server-side browser profiles (cookies, logins, etc.
must survive). Ephemeral sessions are fully deleted on the server.
"""
managed = _managed_persistence_enabled()
with _sessions_lock:
sessions = list(_sessions.items())
for task_id, session in sessions:
try:
_delete(f"/sessions/{session['user_id']}")
except Exception:
pass
if not managed:
for _task_id, session in sessions:
try:
_delete(f"/sessions/{session['user_id']}")
except Exception:
pass
with _sessions_lock:
_sessions.clear()

View file

@ -285,6 +285,26 @@ def _get_cloud_provider() -> Optional[CloudBrowserProvider]:
return _cached_cloud_provider
from hermes_constants import is_termux as _is_termux_environment
def _browser_install_hint() -> str:
if _is_termux_environment():
return "npm install -g agent-browser && agent-browser install"
return "npm install -g agent-browser && agent-browser install --with-deps"
def _requires_real_termux_browser_install(browser_cmd: str) -> bool:
return _is_termux_environment() and _is_local_mode() and browser_cmd.strip() == "npx agent-browser"
def _termux_browser_install_error() -> str:
return (
"Local browser automation on Termux cannot rely on the bare npx fallback. "
f"Install agent-browser explicitly first: {_browser_install_hint()}"
)
def _is_local_mode() -> bool:
"""Return True when the browser tool will use a local browser backend."""
if _get_cdp_override():
@ -796,7 +816,8 @@ def _find_agent_browser() -> str:
return "npx agent-browser"
raise FileNotFoundError(
"agent-browser CLI not found. Install it with: npm install -g agent-browser\n"
"agent-browser CLI not found. Install it with: "
f"{_browser_install_hint()}\n"
"Or run 'npm install' in the repo root to install locally.\n"
"Or ensure npx is available in your PATH."
)
@ -852,6 +873,11 @@ def _run_browser_command(
except FileNotFoundError as e:
logger.warning("agent-browser CLI not found: %s", e)
return {"success": False, "error": str(e)}
if _requires_real_termux_browser_install(browser_cmd):
error = _termux_browser_install_error()
logger.warning("browser command blocked on Termux: %s", error)
return {"success": False, "error": error}
from tools.interrupt import is_interrupted
if is_interrupted():
@ -2040,10 +2066,17 @@ def check_browser_requirements() -> bool:
# The agent-browser CLI is always required
try:
_find_agent_browser()
browser_cmd = _find_agent_browser()
except FileNotFoundError:
return False
# On Termux, the bare npx fallback is too fragile to treat as a satisfied
# local browser dependency. Require a real install (global or local) so the
# browser tool is not advertised as available when it will likely fail on
# first use.
if _requires_real_termux_browser_install(browser_cmd):
return False
# In cloud mode, also require provider credentials
provider = _get_cloud_provider()
if provider is not None and not provider.is_configured():
@ -2073,10 +2106,13 @@ if __name__ == "__main__":
else:
print("❌ Missing requirements:")
try:
_find_agent_browser()
browser_cmd = _find_agent_browser()
if _requires_real_termux_browser_install(browser_cmd):
print(" - bare npx fallback found (insufficient on Termux local mode)")
print(f" Install: {_browser_install_hint()}")
except FileNotFoundError:
print(" - agent-browser CLI not found")
print(" Install: npm install -g agent-browser && agent-browser install --with-deps")
print(f" Install: {_browser_install_hint()}")
if _cp is not None and not _cp.is_configured():
print(f" - {_cp.provider_name()} credentials not configured")
print(" Tip: set browser.cloud_provider to 'local' to use free local mode instead")

View file

@ -33,6 +33,7 @@ import json
import logging
import os
import platform
import shlex
import signal
import socket
import subprocess
@ -246,9 +247,9 @@ def _call(tool_name, args):
_FILE_TRANSPORT_HEADER = '''\
"""Auto-generated Hermes tools RPC stubs (file-based transport)."""
import json, os, shlex, time
import json, os, shlex, tempfile, time
_RPC_DIR = os.environ.get("HERMES_RPC_DIR", "/tmp/hermes_rpc")
_RPC_DIR = os.environ.get("HERMES_RPC_DIR") or os.path.join(tempfile.gettempdir(), "hermes_rpc")
_seq = 0
''' + _COMMON_HELPERS + '''\
@ -536,13 +537,30 @@ def _ship_file_to_remote(env, remote_path: str, content: str) -> None:
quotes are fine.
"""
encoded = base64.b64encode(content.encode("utf-8")).decode("ascii")
quoted_remote_path = shlex.quote(remote_path)
env.execute(
f"echo '{encoded}' | base64 -d > {remote_path}",
f"echo '{encoded}' | base64 -d > {quoted_remote_path}",
cwd="/",
timeout=30,
)
def _env_temp_dir(env: Any) -> str:
"""Return a writable temp dir for env-backed execute_code sandboxes."""
get_temp_dir = getattr(env, "get_temp_dir", None)
if callable(get_temp_dir):
try:
temp_dir = get_temp_dir()
if isinstance(temp_dir, str) and temp_dir.startswith("/"):
return temp_dir.rstrip("/") or "/"
except Exception as exc:
logger.debug("Could not resolve execute_code env temp dir: %s", exc)
candidate = tempfile.gettempdir()
if isinstance(candidate, str) and candidate.startswith("/"):
return candidate.rstrip("/") or "/"
return "/tmp"
def _rpc_poll_loop(
env,
rpc_dir: str,
@ -563,11 +581,12 @@ def _rpc_poll_loop(
poll_interval = 0.1 # 100 ms
quoted_rpc_dir = shlex.quote(rpc_dir)
while not stop_event.is_set():
try:
# List pending request files (skip .tmp partials)
ls_result = env.execute(
f"ls -1 {rpc_dir}/req_* 2>/dev/null || true",
f"ls -1 {quoted_rpc_dir}/req_* 2>/dev/null || true",
cwd="/",
timeout=10,
)
@ -589,9 +608,10 @@ def _rpc_poll_loop(
call_start = time.monotonic()
quoted_req_file = shlex.quote(req_file)
# Read request
read_result = env.execute(
f"cat {req_file}",
f"cat {quoted_req_file}",
cwd="/",
timeout=10,
)
@ -600,7 +620,7 @@ def _rpc_poll_loop(
except (json.JSONDecodeError, ValueError):
logger.debug("Malformed RPC request in %s", req_file)
# Remove bad request to avoid infinite retry
env.execute(f"rm -f {req_file}", cwd="/", timeout=5)
env.execute(f"rm -f {quoted_req_file}", cwd="/", timeout=5)
continue
tool_name = request.get("tool", "")
@ -608,6 +628,7 @@ def _rpc_poll_loop(
seq = request.get("seq", 0)
seq_str = f"{seq:06d}"
res_file = f"{rpc_dir}/res_{seq_str}"
quoted_res_file = shlex.quote(res_file)
# Enforce allow-list
if tool_name not in allowed_tools:
@ -665,14 +686,14 @@ def _rpc_poll_loop(
tool_result.encode("utf-8")
).decode("ascii")
env.execute(
f"echo '{encoded_result}' | base64 -d > {res_file}.tmp"
f" && mv {res_file}.tmp {res_file}",
f"echo '{encoded_result}' | base64 -d > {quoted_res_file}.tmp"
f" && mv {quoted_res_file}.tmp {quoted_res_file}",
cwd="/",
timeout=60,
)
# Remove the request file
env.execute(f"rm -f {req_file}", cwd="/", timeout=5)
env.execute(f"rm -f {quoted_req_file}", cwd="/", timeout=5)
except Exception as e:
if not stop_event.is_set():
@ -707,7 +728,10 @@ def _execute_remote(
env, env_type = _get_or_create_env(effective_task_id)
sandbox_id = uuid.uuid4().hex[:12]
sandbox_dir = f"/tmp/hermes_exec_{sandbox_id}"
temp_dir = _env_temp_dir(env)
sandbox_dir = f"{temp_dir}/hermes_exec_{sandbox_id}"
quoted_sandbox_dir = shlex.quote(sandbox_dir)
quoted_rpc_dir = shlex.quote(f"{sandbox_dir}/rpc")
tool_call_log: list = []
tool_call_counter = [0]
@ -735,7 +759,7 @@ def _execute_remote(
# Create sandbox directory on remote
env.execute(
f"mkdir -p {sandbox_dir}/rpc", cwd="/", timeout=10,
f"mkdir -p {quoted_rpc_dir}", cwd="/", timeout=10,
)
# Generate and ship files
@ -759,7 +783,7 @@ def _execute_remote(
# Build environment variable prefix for the script
env_prefix = (
f"HERMES_RPC_DIR={sandbox_dir}/rpc "
f"HERMES_RPC_DIR={shlex.quote(f'{sandbox_dir}/rpc')} "
f"PYTHONDONTWRITEBYTECODE=1"
)
tz = os.getenv("HERMES_TIMEZONE", "").strip()
@ -770,7 +794,7 @@ def _execute_remote(
logger.info("Executing code on %s backend (task %s)...",
env_type, effective_task_id[:8])
script_result = env.execute(
f"cd {sandbox_dir} && {env_prefix} python3 script.py",
f"cd {quoted_sandbox_dir} && {env_prefix} python3 script.py",
timeout=timeout,
)
@ -807,7 +831,7 @@ def _execute_remote(
# Clean up remote sandbox dir
try:
env.execute(
f"rm -rf {sandbox_dir}", cwd="/", timeout=15,
f"rm -rf {quoted_sandbox_dir}", cwd="/", timeout=15,
)
except Exception:
logger.debug("Failed to clean up remote sandbox %s", sandbox_dir)

View file

@ -226,14 +226,24 @@ class BaseEnvironment(ABC):
# Snapshot creation timeout (override for slow cold-starts).
_snapshot_timeout: int = 30
def get_temp_dir(self) -> str:
"""Return the backend temp directory used for session artifacts.
Most sandboxed backends use ``/tmp`` inside the target environment.
LocalEnvironment overrides this on platforms like Termux where ``/tmp``
may be missing and ``TMPDIR`` is the portable writable location.
"""
return "/tmp"
def __init__(self, cwd: str, timeout: int, env: dict = None):
self.cwd = cwd
self.timeout = timeout
self.env = env or {}
self._session_id = uuid.uuid4().hex[:12]
self._snapshot_path = f"/tmp/hermes-snap-{self._session_id}.sh"
self._cwd_file = f"/tmp/hermes-cwd-{self._session_id}.txt"
temp_dir = self.get_temp_dir().rstrip("/") or "/"
self._snapshot_path = f"{temp_dir}/hermes-snap-{self._session_id}.sh"
self._cwd_file = f"{temp_dir}/hermes-cwd-{self._session_id}.txt"
self._cwd_marker = _cwd_marker(self._session_id)
self._snapshot_ready = False
self._last_sync_time: float | None = (

View file

@ -5,6 +5,7 @@ import platform
import shutil
import signal
import subprocess
import tempfile
from tools.environments.base import BaseEnvironment, _pipe_stdin
@ -209,6 +210,32 @@ class LocalEnvironment(BaseEnvironment):
super().__init__(cwd=cwd or os.getcwd(), timeout=timeout, env=env)
self.init_session()
def get_temp_dir(self) -> str:
"""Return a shell-safe writable temp dir for local execution.
Termux does not provide /tmp by default, but exposes a POSIX TMPDIR.
Prefer POSIX-style env vars when available, keep using /tmp on regular
Unix systems, and only fall back to tempfile.gettempdir() when it also
resolves to a POSIX path.
Check the environment configured for this backend first so callers can
override the temp root explicitly (for example via terminal.env or a
custom TMPDIR), then fall back to the host process environment.
"""
for env_var in ("TMPDIR", "TMP", "TEMP"):
candidate = self.env.get(env_var) or os.environ.get(env_var)
if candidate and candidate.startswith("/"):
return candidate.rstrip("/") or "/"
if os.path.isdir("/tmp") and os.access("/tmp", os.W_OK | os.X_OK):
return "/tmp"
candidate = tempfile.gettempdir()
if candidate.startswith("/"):
return candidate.rstrip("/") or "/"
return "/tmp"
def _run_bash(self, cmd_string: str, *, login: bool = False,
timeout: int = 120,
stdin_data: str | None = None) -> subprocess.Popen:

View file

@ -183,6 +183,19 @@ class ProcessRegistry:
# ----- Spawn -----
@staticmethod
def _env_temp_dir(env: Any) -> str:
"""Return the writable sandbox temp dir for env-backed background tasks."""
get_temp_dir = getattr(env, "get_temp_dir", None)
if callable(get_temp_dir):
try:
temp_dir = get_temp_dir()
if isinstance(temp_dir, str) and temp_dir.startswith("/"):
return temp_dir.rstrip("/") or "/"
except Exception as exc:
logger.debug("Could not resolve environment temp dir: %s", exc)
return "/tmp"
def spawn_local(
self,
command: str,
@ -327,12 +340,20 @@ class ProcessRegistry:
)
# Run the command in the sandbox with output capture
log_path = f"/tmp/hermes_bg_{session.id}.log"
pid_path = f"/tmp/hermes_bg_{session.id}.pid"
temp_dir = self._env_temp_dir(env)
log_path = f"{temp_dir}/hermes_bg_{session.id}.log"
pid_path = f"{temp_dir}/hermes_bg_{session.id}.pid"
exit_path = f"{temp_dir}/hermes_bg_{session.id}.exit"
quoted_command = shlex.quote(command)
quoted_temp_dir = shlex.quote(temp_dir)
quoted_log_path = shlex.quote(log_path)
quoted_pid_path = shlex.quote(pid_path)
quoted_exit_path = shlex.quote(exit_path)
bg_command = (
f"nohup bash -c {quoted_command} > {log_path} 2>&1 & "
f"echo $! > {pid_path} && cat {pid_path}"
f"mkdir -p {quoted_temp_dir} && "
f"( nohup bash -lc {quoted_command} > {quoted_log_path} 2>&1; "
f"rc=$?; printf '%s\\n' \"$rc\" > {quoted_exit_path} ) & "
f"echo $! > {quoted_pid_path} && cat {quoted_pid_path}"
)
try:
@ -353,7 +374,7 @@ class ProcessRegistry:
# Start a poller thread that periodically reads the log file
reader = threading.Thread(
target=self._env_poller_loop,
args=(session, env, log_path, pid_path),
args=(session, env, log_path, pid_path, exit_path),
daemon=True,
name=f"proc-poller-{session.id}",
)
@ -397,14 +418,17 @@ class ProcessRegistry:
self._move_to_finished(session)
def _env_poller_loop(
self, session: ProcessSession, env: Any, log_path: str, pid_path: str
self, session: ProcessSession, env: Any, log_path: str, pid_path: str, exit_path: str
):
"""Background thread: poll a sandbox log file for non-local backends."""
quoted_log_path = shlex.quote(log_path)
quoted_pid_path = shlex.quote(pid_path)
quoted_exit_path = shlex.quote(exit_path)
while not session.exited:
time.sleep(2) # Poll every 2 seconds
try:
# Read new output from the log file
result = env.execute(f"cat {log_path} 2>/dev/null", timeout=10)
result = env.execute(f"cat {quoted_log_path} 2>/dev/null", timeout=10)
new_output = result.get("output", "")
if new_output:
with session._lock:
@ -414,14 +438,14 @@ class ProcessRegistry:
# Check if process is still running
check = env.execute(
f"kill -0 $(cat {pid_path} 2>/dev/null) 2>/dev/null; echo $?",
f"kill -0 \"$(cat {quoted_pid_path} 2>/dev/null)\" 2>/dev/null; echo $?",
timeout=5,
)
check_output = check.get("output", "").strip()
if check_output and check_output.splitlines()[-1].strip() != "0":
# Process has exited -- get exit code
# Process has exited -- get exit code captured by the wrapper shell.
exit_result = env.execute(
f"wait $(cat {pid_path} 2>/dev/null) 2>/dev/null; echo $?",
f"cat {quoted_exit_path} 2>/dev/null",
timeout=5,
)
exit_str = exit_result.get("output", "").strip()
@ -711,6 +735,29 @@ class ProcessRegistry:
"""Send data + newline to a running process's stdin (like pressing Enter)."""
return self.write_stdin(session_id, data + "\n")
def close_stdin(self, session_id: str) -> dict:
"""Close a running process's stdin / send EOF without killing the process."""
session = self.get(session_id)
if session is None:
return {"status": "not_found", "error": f"No process with ID {session_id}"}
if session.exited:
return {"status": "already_exited", "error": "Process has already finished"}
if hasattr(session, '_pty') and session._pty:
try:
session._pty.sendeof()
return {"status": "ok", "message": "EOF sent"}
except Exception as e:
return {"status": "error", "error": str(e)}
if not session.process or not session.process.stdin:
return {"status": "error", "error": "Process stdin not available (non-local backend or stdin closed)"}
try:
session.process.stdin.close()
return {"status": "ok", "message": "stdin closed"}
except Exception as e:
return {"status": "error", "error": str(e)}
def list_sessions(self, task_id: str = None) -> list:
"""List all running and recently-finished processes."""
with self._lock:
@ -926,14 +973,14 @@ PROCESS_SCHEMA = {
"Actions: 'list' (show all), 'poll' (check status + new output), "
"'log' (full output with pagination), 'wait' (block until done or timeout), "
"'kill' (terminate), 'write' (send raw stdin data without newline), "
"'submit' (send data + Enter, for answering prompts)."
"'submit' (send data + Enter, for answering prompts), 'close' (close stdin/send EOF)."
),
"parameters": {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["list", "poll", "log", "wait", "kill", "write", "submit"],
"enum": ["list", "poll", "log", "wait", "kill", "write", "submit", "close"],
"description": "Action to perform on background processes"
},
"session_id": {
@ -973,7 +1020,7 @@ def _handle_process(args, **kw):
if action == "list":
return _json.dumps({"processes": process_registry.list_sessions(task_id=task_id)}, ensure_ascii=False)
elif action in ("poll", "log", "wait", "kill", "write", "submit"):
elif action in ("poll", "log", "wait", "kill", "write", "submit", "close"):
if not session_id:
return tool_error(f"session_id is required for {action}")
if action == "poll":
@ -989,7 +1036,9 @@ def _handle_process(args, **kw):
return _json.dumps(process_registry.write_stdin(session_id, str(args.get("data", ""))), ensure_ascii=False)
elif action == "submit":
return _json.dumps(process_registry.submit_stdin(session_id, str(args.get("data", ""))), ensure_ascii=False)
return tool_error(f"Unknown process action: {action}. Use: list, poll, log, wait, kill, write, submit")
elif action == "close":
return _json.dumps(process_registry.close_stdin(session_id), ensure_ascii=False)
return tool_error(f"Unknown process action: {action}. Use: list, poll, log, wait, kill, write, submit, close")
registry.register(

View file

@ -322,6 +322,13 @@ async def _send_to_platform(platform, pconfig, chat_id, message, thread_id=None,
media_files = media_files or []
if platform == Platform.SLACK and message:
try:
slack_adapter = SlackAdapter.__new__(SlackAdapter)
message = slack_adapter.format_message(message)
except Exception:
logger.debug("Failed to apply Slack mrkdwn formatting in _send_to_platform", exc_info=True)
# Platform message length limits (from adapter class attributes)
_MAX_LENGTHS = {
Platform.TELEGRAM: TelegramAdapter.MAX_MESSAGE_LENGTH,
@ -548,10 +555,13 @@ async def _send_discord(token, chat_id, message):
except ImportError:
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
try:
from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp
_proxy = resolve_proxy_url(platform_env_var="DISCORD_PROXY")
_sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy)
url = f"https://discord.com/api/v10/channels/{chat_id}/messages"
headers = {"Authorization": f"Bot {token}", "Content-Type": "application/json"}
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
async with session.post(url, headers=headers, json={"content": message}) as resp:
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30), **_sess_kw) as session:
async with session.post(url, headers=headers, json={"content": message}, **_req_kw) as resp:
if resp.status not in (200, 201):
body = await resp.text()
return _error(f"Discord API error ({resp.status}): {body}")
@ -568,10 +578,14 @@ async def _send_slack(token, chat_id, message):
except ImportError:
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
try:
from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp
_proxy = resolve_proxy_url()
_sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy)
url = "https://slack.com/api/chat.postMessage"
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
async with session.post(url, headers=headers, json={"channel": chat_id, "text": message}) as resp:
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30), **_sess_kw) as session:
payload = {"channel": chat_id, "text": message, "mrkdwn": True}
async with session.post(url, headers=headers, json=payload, **_req_kw) as resp:
data = await resp.json()
if data.get("ok"):
return {"success": True, "platform": "slack", "chat_id": chat_id, "message_id": data.get("ts")}
@ -704,18 +718,21 @@ async def _send_sms(auth_token, chat_id, message):
message = message.strip()
try:
from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp
_proxy = resolve_proxy_url()
_sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy)
creds = f"{account_sid}:{auth_token}"
encoded = base64.b64encode(creds.encode("ascii")).decode("ascii")
url = f"https://api.twilio.com/2010-04-01/Accounts/{account_sid}/Messages.json"
headers = {"Authorization": f"Basic {encoded}"}
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30), **_sess_kw) as session:
form_data = aiohttp.FormData()
form_data.add_field("From", from_number)
form_data.add_field("To", chat_id)
form_data.add_field("Body", message)
async with session.post(url, data=form_data, headers=headers) as resp:
async with session.post(url, data=form_data, headers=headers, **_req_kw) as resp:
body = await resp.json()
if resp.status >= 400:
error_msg = body.get("message", str(body))

View file

@ -1112,6 +1112,21 @@ def _interpret_exit_code(command: str, exit_code: int) -> str | None:
return None
def _command_requires_pipe_stdin(command: str) -> bool:
"""Return True when PTY mode would break stdin-driven commands.
Some CLIs change behavior when stdin is a TTY. In particular,
`gh auth login --with-token` expects the token to arrive via piped stdin and
waits for EOF; when we launch it under a PTY, `process.submit()` only sends a
newline, so the command appears to hang forever with no visible progress.
"""
normalized = " ".join(command.lower().split())
return (
normalized.startswith("gh auth login")
and "--with-token" in normalized
)
def terminal_tool(
command: str,
background: bool = False,
@ -1332,6 +1347,17 @@ def terminal_tool(
}, ensure_ascii=False)
# Prepare command for execution
pty_disabled_reason = None
effective_pty = pty
if pty and _command_requires_pipe_stdin(command):
effective_pty = False
pty_disabled_reason = (
"PTY disabled for this command because it expects piped stdin/EOF "
"(for example gh auth login --with-token). For local background "
"processes, call process(action='close') after writing so it receives "
"EOF."
)
if background:
# Spawn a tracked background process via the process registry.
# For local backends: uses subprocess.Popen with output buffering.
@ -1349,7 +1375,7 @@ def terminal_tool(
task_id=effective_task_id,
session_key=session_key,
env_vars=env.env if hasattr(env, 'env') else None,
use_pty=pty,
use_pty=effective_pty,
)
else:
proc_session = process_registry.spawn_via_env(
@ -1369,6 +1395,8 @@ def terminal_tool(
}
if approval_note:
result_data["approval"] = approval_note
if pty_disabled_reason:
result_data["pty_note"] = pty_disabled_reason
# Transparent timeout clamping note
max_timeout = effective_timeout

View file

@ -9,9 +9,11 @@ Defense against context-window overflow operates at three levels:
2. **Per-result persistence** (maybe_persist_tool_result): After a tool
returns, if its output exceeds the tool's registered threshold
(registry.get_max_result_size), the full output is written INTO THE
SANDBOX at /tmp/hermes-results/{tool_use_id}.txt via env.execute().
The in-context content is replaced with a preview + file path reference.
The model can read_file to access the full output on any backend.
SANDBOX temp dir (for example /tmp/hermes-results/{tool_use_id}.txt on
standard Linux, or $TMPDIR/hermes-results/{tool_use_id}.txt on Termux)
via env.execute(). The in-context content is replaced with a preview +
file path reference. The model can read_file to access the full output
on any backend.
3. **Per-turn aggregate budget** (enforce_turn_budget): After all tool
results in a single assistant turn are collected, if the total exceeds
@ -21,6 +23,7 @@ Defense against context-window overflow operates at three levels:
"""
import logging
import os
import uuid
from tools.budget_config import (
@ -37,6 +40,22 @@ HEREDOC_MARKER = "HERMES_PERSIST_EOF"
_BUDGET_TOOL_NAME = "__budget_enforcement__"
def _resolve_storage_dir(env) -> str:
"""Return the best temp-backed storage dir for this environment."""
if env is not None:
get_temp_dir = getattr(env, "get_temp_dir", None)
if callable(get_temp_dir):
try:
temp_dir = get_temp_dir()
except Exception as exc:
logger.debug("Could not resolve env temp dir: %s", exc)
else:
if temp_dir:
temp_dir = temp_dir.rstrip("/") or "/"
return f"{temp_dir}/hermes-results"
return STORAGE_DIR
def generate_preview(content: str, max_chars: int = DEFAULT_PREVIEW_SIZE_CHARS) -> tuple[str, bool]:
"""Truncate at last newline within max_chars. Returns (preview, has_more)."""
if len(content) <= max_chars:
@ -58,8 +77,9 @@ def _heredoc_marker(content: str) -> str:
def _write_to_sandbox(content: str, remote_path: str, env) -> bool:
"""Write content into the sandbox via env.execute(). Returns True on success."""
marker = _heredoc_marker(content)
storage_dir = os.path.dirname(remote_path)
cmd = (
f"mkdir -p {STORAGE_DIR} && cat > {remote_path} << '{marker}'\n"
f"mkdir -p {storage_dir} && cat > {remote_path} << '{marker}'\n"
f"{content}\n"
f"{marker}"
)
@ -125,7 +145,8 @@ def maybe_persist_tool_result(
if len(content) <= effective_threshold:
return content
remote_path = f"{STORAGE_DIR}/{tool_use_id}.txt"
storage_dir = _resolve_storage_dir(env)
remote_path = f"{storage_dir}/{tool_use_id}.txt"
preview, has_more = generate_preview(content, max_chars=config.preview_size)
if env is not None:

View file

@ -48,6 +48,47 @@ def _audio_available() -> bool:
return False
from hermes_constants import is_termux as _is_termux_environment
def _voice_capture_install_hint() -> str:
if _is_termux_environment():
return "pkg install python-numpy portaudio && python -m pip install sounddevice"
return "pip install sounddevice numpy"
def _termux_microphone_command() -> Optional[str]:
if not _is_termux_environment():
return None
return shutil.which("termux-microphone-record")
def _termux_media_player_command() -> Optional[str]:
if not _is_termux_environment():
return None
return shutil.which("termux-media-player")
def _termux_api_app_installed() -> bool:
if not _is_termux_environment():
return False
try:
result = subprocess.run(
["pm", "list", "packages", "com.termux.api"],
capture_output=True,
text=True,
timeout=5,
check=False,
)
return "package:com.termux.api" in (result.stdout or "")
except Exception:
return False
def _termux_voice_capture_available() -> bool:
return _termux_microphone_command() is not None and _termux_api_app_installed()
def detect_audio_environment() -> dict:
"""Detect if the current environment supports audio I/O.
@ -57,6 +98,9 @@ def detect_audio_environment() -> dict:
"""
warnings = [] # hard-fail: these block voice mode
notices = [] # informational: logged but don't block
termux_mic_cmd = _termux_microphone_command()
termux_app_installed = _termux_api_app_installed()
termux_capture = bool(termux_mic_cmd and termux_app_installed)
# SSH detection
if any(os.environ.get(v) for v in ('SSH_CLIENT', 'SSH_TTY', 'SSH_CONNECTION')):
@ -89,23 +133,48 @@ def detect_audio_environment() -> dict:
try:
devices = sd.query_devices()
if not devices:
warnings.append("No audio input/output devices detected")
if termux_capture:
notices.append("No PortAudio devices detected, but Termux:API microphone capture is available")
else:
warnings.append("No audio input/output devices detected")
except Exception:
# In WSL with PulseAudio, device queries can fail even though
# recording/playback works fine. Don't block if PULSE_SERVER is set.
if os.environ.get('PULSE_SERVER'):
notices.append("Audio device query failed but PULSE_SERVER is set -- continuing")
elif termux_capture:
notices.append("PortAudio device query failed, but Termux:API microphone capture is available")
else:
warnings.append("Audio subsystem error (PortAudio cannot query devices)")
except ImportError:
warnings.append("Audio libraries not installed (pip install sounddevice numpy)")
if termux_capture:
notices.append("Termux:API microphone recording available (sounddevice not required)")
elif termux_mic_cmd and not termux_app_installed:
warnings.append(
"Termux:API Android app is not installed. Install/update the Termux:API app to use termux-microphone-record."
)
else:
warnings.append(f"Audio libraries not installed ({_voice_capture_install_hint()})")
except OSError:
warnings.append(
"PortAudio system library not found -- install it first:\n"
" Linux: sudo apt-get install libportaudio2\n"
" macOS: brew install portaudio\n"
"Then retry /voice on."
)
if termux_capture:
notices.append("Termux:API microphone recording available (PortAudio not required)")
elif termux_mic_cmd and not termux_app_installed:
warnings.append(
"Termux:API Android app is not installed. Install/update the Termux:API app to use termux-microphone-record."
)
elif _is_termux_environment():
warnings.append(
"PortAudio system library not found -- install it first:\n"
" Termux: pkg install portaudio\n"
"Then retry /voice on."
)
else:
warnings.append(
"PortAudio system library not found -- install it first:\n"
" Linux: sudo apt-get install libportaudio2\n"
" macOS: brew install portaudio\n"
"Then retry /voice on."
)
return {
"available": not warnings,
@ -174,6 +243,134 @@ def play_beep(frequency: int = 880, duration: float = 0.12, count: int = 1) -> N
logger.debug("Beep playback failed: %s", e)
# ============================================================================
# Termux Audio Recorder
# ============================================================================
class TermuxAudioRecorder:
"""Recorder backend that uses Termux:API microphone capture commands."""
supports_silence_autostop = False
def __init__(self) -> None:
self._lock = threading.Lock()
self._recording = False
self._start_time = 0.0
self._recording_path: Optional[str] = None
self._current_rms = 0
@property
def is_recording(self) -> bool:
return self._recording
@property
def elapsed_seconds(self) -> float:
if not self._recording:
return 0.0
return time.monotonic() - self._start_time
@property
def current_rms(self) -> int:
return self._current_rms
def start(self, on_silence_stop=None) -> None:
del on_silence_stop # Termux:API does not expose live silence callbacks.
mic_cmd = _termux_microphone_command()
if not mic_cmd:
raise RuntimeError(
"Termux voice capture requires the termux-api package and app.\n"
"Install with: pkg install termux-api\n"
"Then install/update the Termux:API Android app."
)
if not _termux_api_app_installed():
raise RuntimeError(
"Termux voice capture requires the Termux:API Android app.\n"
"Install/update the Termux:API app, then retry /voice on."
)
with self._lock:
if self._recording:
return
os.makedirs(_TEMP_DIR, exist_ok=True)
timestamp = time.strftime("%Y%m%d_%H%M%S")
self._recording_path = os.path.join(_TEMP_DIR, f"recording_{timestamp}.aac")
command = [
mic_cmd,
"-f", self._recording_path,
"-l", "0",
"-e", "aac",
"-r", str(SAMPLE_RATE),
"-c", str(CHANNELS),
]
try:
subprocess.run(command, capture_output=True, text=True, timeout=15, check=True)
except subprocess.CalledProcessError as e:
details = (e.stderr or e.stdout or str(e)).strip()
raise RuntimeError(f"Termux microphone start failed: {details}") from e
except Exception as e:
raise RuntimeError(f"Termux microphone start failed: {e}") from e
with self._lock:
self._start_time = time.monotonic()
self._recording = True
self._current_rms = 0
logger.info("Termux voice recording started")
def _stop_termux_recording(self) -> None:
mic_cmd = _termux_microphone_command()
if not mic_cmd:
return
subprocess.run([mic_cmd, "-q"], capture_output=True, text=True, timeout=15, check=False)
def stop(self) -> Optional[str]:
with self._lock:
if not self._recording:
return None
self._recording = False
path = self._recording_path
self._recording_path = None
started_at = self._start_time
self._current_rms = 0
self._stop_termux_recording()
if not path or not os.path.isfile(path):
return None
if time.monotonic() - started_at < 0.3:
try:
os.unlink(path)
except OSError:
pass
return None
if os.path.getsize(path) <= 0:
try:
os.unlink(path)
except OSError:
pass
return None
logger.info("Termux voice recording stopped: %s", path)
return path
def cancel(self) -> None:
with self._lock:
path = self._recording_path
self._recording = False
self._recording_path = None
self._current_rms = 0
try:
self._stop_termux_recording()
except Exception:
pass
if path and os.path.isfile(path):
try:
os.unlink(path)
except OSError:
pass
logger.info("Termux voice recording cancelled")
def shutdown(self) -> None:
self.cancel()
# ============================================================================
# AudioRecorder
# ============================================================================
@ -193,6 +390,8 @@ class AudioRecorder:
the user is silent for ``silence_duration`` seconds and calls the callback.
"""
supports_silence_autostop = True
def __init__(self) -> None:
self._lock = threading.Lock()
self._stream: Any = None
@ -526,6 +725,13 @@ class AudioRecorder:
return wav_path
def create_audio_recorder() -> AudioRecorder | TermuxAudioRecorder:
"""Return the best recorder backend for the current environment."""
if _termux_voice_capture_available():
return TermuxAudioRecorder()
return AudioRecorder()
# ============================================================================
# Whisper hallucination filter
# ============================================================================
@ -734,7 +940,8 @@ def check_voice_requirements() -> Dict[str, Any]:
stt_available = stt_enabled and stt_provider != "none"
missing: List[str] = []
has_audio = _audio_available()
termux_capture = _termux_voice_capture_available()
has_audio = _audio_available() or termux_capture
if not has_audio:
missing.extend(["sounddevice", "numpy"])
@ -745,10 +952,12 @@ def check_voice_requirements() -> Dict[str, Any]:
available = has_audio and stt_available and env_check["available"]
details_parts = []
if has_audio:
if termux_capture:
details_parts.append("Audio capture: OK (Termux:API microphone)")
elif has_audio:
details_parts.append("Audio capture: OK")
else:
details_parts.append("Audio capture: MISSING (pip install sounddevice numpy)")
details_parts.append(f"Audio capture: MISSING ({_voice_capture_install_hint()})")
if not stt_enabled:
details_parts.append("STT provider: DISABLED in config (stt.enabled: false)")

View file

@ -101,8 +101,10 @@ Current input behavior is split across `app.tsx`, `components/textInput.tsx`, an
| modified `Left/Right` | Move by word when the terminal sends `Ctrl` or `Meta` with the arrow key |
| `Home` / `Ctrl+A` | Start of line |
| `End` / `Ctrl+E` | End of line |
| `Backspace` / `Delete` | Delete the character to the left of the cursor |
| modified `Backspace` / `Delete` | Delete the previous word |
| `Backspace` | Delete the character to the left of the cursor |
| `Delete` | Delete the character to the right of the cursor |
| modified `Backspace` | Delete the previous word |
| modified `Delete` | Delete the next word |
| `Ctrl+W` | Delete the previous word |
| `Ctrl+U` | Delete from the cursor back to the start of the line |
| `Ctrl+K` | Delete from the cursor to the end of the line |

2324
ui-tui/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
import { Text, useInput } from 'ink'
import { Text, useInput, useStdin } from 'ink'
import { useEffect, useRef, useState } from 'react'
function wordLeft(s: string, p: number) {
@ -29,6 +29,29 @@ function wordRight(s: string, p: number) {
return i
}
const FWD_DELETE_RE = /\x1b\[3[~$^]|\x1b\[3;/
function useForwardDeleteRef(isActive: boolean) {
const ref = useRef(false)
const { internal_eventEmitter: ee } = useStdin()
useEffect(() => {
if (!isActive) return
const onInput = (data: string) => {
ref.current = FWD_DELETE_RE.test(data)
}
ee.prependListener('input', onInput)
return () => {
ee.removeListener('input', onInput)
}
}, [isActive, ee])
return ref
}
const ESC = '\x1b'
const INV = ESC + '[7m'
const INV_OFF = ESC + '[27m'
@ -56,6 +79,7 @@ interface Props {
export function TextInput({ value, onChange, onPaste, onSubmit, placeholder = '', focus = true }: Props) {
const [cur, setCur] = useState(value.length)
const isFwdDelete = useForwardDeleteRef(focus)
const curRef = useRef(cur)
const vRef = useRef(value)
@ -211,7 +235,7 @@ export function TextInput({ value, onChange, onPaste, onSubmit, placeholder = ''
c = mod ? wordLeft(v, c) : Math.max(0, c - 1)
} else if (k.rightArrow) {
c = mod ? wordRight(v, c) : Math.min(v.length, c + 1)
} else if ((k.backspace || k.delete) && c > 0) {
} else if ((k.backspace || k.delete) && !isFwdDelete.current && c > 0) {
if (mod) {
const t = wordLeft(v, c)
v = v.slice(0, t) + v.slice(c)
@ -220,6 +244,13 @@ export function TextInput({ value, onChange, onPaste, onSubmit, placeholder = ''
v = v.slice(0, c - 1) + v.slice(c)
c--
}
} else if (k.delete && isFwdDelete.current && c < v.length) {
if (mod) {
const t = wordRight(v, c)
v = v.slice(0, c) + v.slice(t)
} else {
v = v.slice(0, c) + v.slice(c + 1)
}
} else if (k.ctrl && inp === 'w' && c > 0) {
const t = wordLeft(v, c)
v = v.slice(0, t) + v.slice(c)

Some files were not shown because too many files have changed in this diff Show more