Merge branch 'main' of github.com:NousResearch/hermes-agent into feat/ink-refactor

This commit is contained in:
Brooklyn Nicholson 2026-04-13 18:32:13 -05:00
commit 7e4dd6ea02
220 changed files with 23482 additions and 1959 deletions

View file

@ -5,6 +5,7 @@
# Dependencies # Dependencies
node_modules node_modules
.venv
# CI/CD # CI/CD
.github .github

View file

@ -43,6 +43,7 @@
# KIMI_BASE_URL=https://api.kimi.com/coding/v1 # Default for sk-kimi- keys # KIMI_BASE_URL=https://api.kimi.com/coding/v1 # Default for sk-kimi- keys
# KIMI_BASE_URL=https://api.moonshot.ai/v1 # For legacy Moonshot keys # KIMI_BASE_URL=https://api.moonshot.ai/v1 # For legacy Moonshot keys
# KIMI_BASE_URL=https://api.moonshot.cn/v1 # For Moonshot China keys # KIMI_BASE_URL=https://api.moonshot.cn/v1 # For Moonshot China keys
# KIMI_CN_API_KEY= # Dedicated Moonshot China key
# ============================================================================= # =============================================================================
# LLM PROVIDER (MiniMax) # LLM PROVIDER (MiniMax)

2
.gitattributes vendored Normal file
View file

@ -0,0 +1,2 @@
# Auto-generated files — collapse diffs and exclude from language stats
web/package-lock.json linguist-generated=true

View file

@ -41,11 +41,19 @@ jobs:
python-version: '3.11' python-version: '3.11'
- name: Install PyYAML for skill extraction - name: Install PyYAML for skill extraction
run: pip install pyyaml run: pip install pyyaml httpx
- name: Extract skill metadata for dashboard - name: Extract skill metadata for dashboard
run: python3 website/scripts/extract-skills.py run: python3 website/scripts/extract-skills.py
- name: Build skills index (if not already present)
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
if [ ! -f website/static/api/skills-index.json ]; then
python3 scripts/build_skills_index.py || echo "Skills index build failed (non-fatal)"
fi
- name: Install dependencies - name: Install dependencies
run: npm ci run: npm ci
working-directory: website working-directory: website

101
.github/workflows/skills-index.yml vendored Normal file
View file

@ -0,0 +1,101 @@
name: Build Skills Index
on:
schedule:
# Run twice daily: 6 AM and 6 PM UTC
- cron: '0 6,18 * * *'
workflow_dispatch: # Manual trigger
push:
branches: [main]
paths:
- 'scripts/build_skills_index.py'
- '.github/workflows/skills-index.yml'
permissions:
contents: read
jobs:
build-index:
# Only run on the upstream repository, not on forks
if: github.repository == 'NousResearch/hermes-agent'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: pip install httpx pyyaml
- name: Build skills index
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: python scripts/build_skills_index.py
- name: Upload index artifact
uses: actions/upload-artifact@v4
with:
name: skills-index
path: website/static/api/skills-index.json
retention-days: 7
deploy-with-index:
needs: build-index
runs-on: ubuntu-latest
permissions:
pages: write
id-token: write
environment:
name: github-pages
url: ${{ steps.deploy.outputs.page_url }}
# Only deploy on schedule or manual trigger (not on every push to the script)
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: skills-index
path: website/static/api/
- uses: actions/setup-node@v4
with:
node-version: 20
cache: npm
cache-dependency-path: website/package-lock.json
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install PyYAML for skill extraction
run: pip install pyyaml
- name: Extract skill metadata for dashboard
run: python3 website/scripts/extract-skills.py
- name: Install dependencies
run: npm ci
working-directory: website
- name: Build Docusaurus
run: npm run build
working-directory: website
- name: Stage deployment
run: |
mkdir -p _site/docs
cp -r landingpage/* _site/
cp -r website/build/* _site/docs/
echo "hermes-agent.nousresearch.com" > _site/CNAME
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
path: _site
- name: Deploy to GitHub Pages
id: deploy
uses: actions/deploy-pages@v4

View file

@ -183,7 +183,7 @@ jobs:
--- ---
*Automated scan triggered by [supply-chain-audit](/.github/workflows/supply-chain-audit.yml). If this is a false positive, a maintainer can approve after manual review.*" *Automated scan triggered by [supply-chain-audit](/.github/workflows/supply-chain-audit.yml). If this is a false positive, a maintainer can approve after manual review.*"
gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY" gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY" || echo "::warning::Could not post PR comment (expected for fork PRs — GITHUB_TOKEN is read-only)"
- name: Fail on critical findings - name: Fail on critical findings
if: steps.scan.outputs.critical == 'true' if: steps.scan.outputs.critical == 'true'

4
.gitignore vendored
View file

@ -51,6 +51,9 @@ ignored/
.worktrees/ .worktrees/
environments/benchmarks/evals/ environments/benchmarks/evals/
# Web UI build output
hermes_cli/web_dist/
# Release script temp files # Release script temp files
.release_notes.md .release_notes.md
mini-swe-agent/ mini-swe-agent/
@ -59,3 +62,4 @@ mini-swe-agent/
.direnv/ .direnv/
.nix-stamps/ .nix-stamps/
result result
website/static/api/skills-index.json

View file

@ -12,7 +12,7 @@ ENV PLAYWRIGHT_BROWSERS_PATH=/opt/hermes/.playwright
# Install system dependencies in one layer, clear APT cache # Install system dependencies in one layer, clear APT cache
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
build-essential nodejs npm python3 ripgrep ffmpeg gcc python3-dev libffi-dev procps && \ build-essential nodejs npm python3 ripgrep ffmpeg gcc python3-dev libffi-dev procps git && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
# Non-root user for runtime; UID can be overridden via HERMES_UID at runtime # Non-root user for runtime; UID can be overridden via HERMES_UID at runtime

View file

@ -167,6 +167,7 @@ python -m pytest tests/ -q
- 📚 [Skills Hub](https://agentskills.io) - 📚 [Skills Hub](https://agentskills.io)
- 🐛 [Issues](https://github.com/NousResearch/hermes-agent/issues) - 🐛 [Issues](https://github.com/NousResearch/hermes-agent/issues)
- 💡 [Discussions](https://github.com/NousResearch/hermes-agent/discussions) - 💡 [Discussions](https://github.com/NousResearch/hermes-agent/discussions)
- 🔌 [HermesClaw](https://github.com/AaronWong1999/hermesclaw) — Community WeChat bridge: Run Hermes Agent and OpenClaw on the same WeChat account.
--- ---

328
RELEASE_v0.9.0.md Normal file
View file

@ -0,0 +1,328 @@
# Hermes Agent v0.9.0 (v2026.4.13)
**Release Date:** April 13, 2026
**Since v0.8.0:** 487 commits · 269 merged PRs · 167 resolved issues · 493 files changed · 63,281 insertions · 24 contributors
> The everywhere release — Hermes goes mobile with Termux/Android, adds iMessage and WeChat, ships Fast Mode for OpenAI and Anthropic, introduces background process monitoring, launches a local web dashboard for managing your agent, and delivers the deepest security hardening pass yet across 16 supported platforms.
---
## ✨ Highlights
- **Local Web Dashboard** — A new browser-based dashboard for managing your Hermes Agent locally. Configure settings, monitor sessions, browse skills, and manage your gateway — all from a clean web interface without touching config files or the terminal. The easiest way to get started with Hermes.
- **Fast Mode (`/fast`)** — Priority processing for OpenAI and Anthropic models. Toggle `/fast` to route through priority queues for significantly lower latency on supported models (GPT-5.4, Codex, Claude). Expands across all OpenAI Priority Processing models and Anthropic's fast tier. ([#6875](https://github.com/NousResearch/hermes-agent/pull/6875), [#6960](https://github.com/NousResearch/hermes-agent/pull/6960), [#7037](https://github.com/NousResearch/hermes-agent/pull/7037))
- **iMessage via BlueBubbles** — Full iMessage integration through BlueBubbles, bringing Hermes to Apple's messaging ecosystem. Auto-webhook registration, setup wizard integration, and crash resilience. ([#6437](https://github.com/NousResearch/hermes-agent/pull/6437), [#6460](https://github.com/NousResearch/hermes-agent/pull/6460), [#6494](https://github.com/NousResearch/hermes-agent/pull/6494))
- **WeChat (Weixin) & WeCom Callback Mode** — Native WeChat support via iLink Bot API and a new WeCom callback-mode adapter for self-built enterprise apps. Streaming cursor, media uploads, markdown link handling, and atomic state persistence. Hermes now covers the Chinese messaging ecosystem end-to-end. ([#7166](https://github.com/NousResearch/hermes-agent/pull/7166), [#7943](https://github.com/NousResearch/hermes-agent/pull/7943))
- **Termux / Android Support** — Run Hermes natively on Android via Termux. Adapted install paths, TUI optimizations for mobile screens, voice backend support, and the `/image` command work on-device. ([#6834](https://github.com/NousResearch/hermes-agent/pull/6834))
- **Background Process Monitoring (`watch_patterns`)** — Set patterns to watch for in background process output and get notified in real-time when they match. Monitor for errors, wait for specific events ("listening on port"), or watch build logs — all without polling. ([#7635](https://github.com/NousResearch/hermes-agent/pull/7635))
- **Native xAI & Xiaomi MiMo Providers** — First-class provider support for xAI (Grok) and Xiaomi MiMo, with direct API access, model catalogs, and setup wizard integration. Plus Qwen OAuth with portal request support. ([#7372](https://github.com/NousResearch/hermes-agent/pull/7372), [#7855](https://github.com/NousResearch/hermes-agent/pull/7855))
- **Pluggable Context Engine** — Context management is now a pluggable slot via `hermes plugins`. Swap in custom context engines that control what the agent sees each turn — filtering, summarization, or domain-specific context injection. ([#7464](https://github.com/NousResearch/hermes-agent/pull/7464))
- **Unified Proxy Support** — SOCKS proxy, `DISCORD_PROXY`, and system proxy auto-detection across all gateway platforms. Hermes behind corporate firewalls just works. ([#6814](https://github.com/NousResearch/hermes-agent/pull/6814))
- **Comprehensive Security Hardening** — Path traversal protection in checkpoint manager, shell injection neutralization in sandbox writes, SSRF redirect guards in Slack image uploads, Twilio webhook signature validation (SMS RCE fix), API server auth enforcement, git argument injection prevention, and approval button authorization. ([#7933](https://github.com/NousResearch/hermes-agent/pull/7933), [#7944](https://github.com/NousResearch/hermes-agent/pull/7944), [#7940](https://github.com/NousResearch/hermes-agent/pull/7940), [#7151](https://github.com/NousResearch/hermes-agent/pull/7151), [#7156](https://github.com/NousResearch/hermes-agent/pull/7156))
- **`hermes backup` & `hermes import`** — Full backup and restore of your Hermes configuration, sessions, skills, and memory. Migrate between machines or create snapshots before major changes. ([#7997](https://github.com/NousResearch/hermes-agent/pull/7997))
- **16 Supported Platforms** — With BlueBubbles (iMessage) and WeChat joining Telegram, Discord, Slack, WhatsApp, Signal, Matrix, Email, SMS, DingTalk, Feishu, WeCom, Mattermost, Home Assistant, and Webhooks, Hermes now runs on 16 messaging platforms out of the box.
- **`/debug` & `hermes debug share`** — New debugging toolkit: `/debug` slash command across all platforms for quick diagnostics, plus `hermes debug share` to upload a full debug report to a pastebin for easy sharing when troubleshooting. ([#8681](https://github.com/NousResearch/hermes-agent/pull/8681))
---
## 🏗️ Core Agent & Architecture
### Provider & Model Support
- **Native xAI (Grok) provider** with direct API access and model catalog ([#7372](https://github.com/NousResearch/hermes-agent/pull/7372))
- **Xiaomi MiMo as first-class provider** — setup wizard, model catalog, empty response recovery ([#7855](https://github.com/NousResearch/hermes-agent/pull/7855))
- **Qwen OAuth provider** with portal request support ([#6282](https://github.com/NousResearch/hermes-agent/pull/6282))
- **Fast Mode**`/fast` toggle for OpenAI Priority Processing + Anthropic fast tier ([#6875](https://github.com/NousResearch/hermes-agent/pull/6875), [#6960](https://github.com/NousResearch/hermes-agent/pull/6960), [#7037](https://github.com/NousResearch/hermes-agent/pull/7037))
- **Structured API error classification** for smart failover decisions ([#6514](https://github.com/NousResearch/hermes-agent/pull/6514))
- **Rate limit header capture** shown in `/usage` ([#6541](https://github.com/NousResearch/hermes-agent/pull/6541))
- **API server model name** derived from profile name ([#6857](https://github.com/NousResearch/hermes-agent/pull/6857))
- **Custom providers** now included in `/model` listings and resolution ([#7088](https://github.com/NousResearch/hermes-agent/pull/7088))
- **Fallback provider activation** on repeated empty responses with user-visible status ([#7505](https://github.com/NousResearch/hermes-agent/pull/7505))
- **OpenRouter variant tags** (`:free`, `:extended`, `:fast`) preserved during model switch ([#6383](https://github.com/NousResearch/hermes-agent/pull/6383))
- **Credential exhaustion TTL** reduced from 24 hours to 1 hour ([#6504](https://github.com/NousResearch/hermes-agent/pull/6504))
- **OAuth credential lifecycle** hardening — stale pool keys, auth.json sync, Codex CLI race fixes ([#6874](https://github.com/NousResearch/hermes-agent/pull/6874))
- Empty response recovery for reasoning models (MiMo, Qwen, GLM) ([#8609](https://github.com/NousResearch/hermes-agent/pull/8609))
- MiniMax context lengths, thinking guard, endpoint corrections ([#6082](https://github.com/NousResearch/hermes-agent/pull/6082), [#7126](https://github.com/NousResearch/hermes-agent/pull/7126))
- Z.AI endpoint auto-detect via probe and cache ([#5763](https://github.com/NousResearch/hermes-agent/pull/5763))
### Agent Loop & Conversation
- **Pluggable context engine slot** via `hermes plugins` ([#7464](https://github.com/NousResearch/hermes-agent/pull/7464))
- **Background process monitoring**`watch_patterns` for real-time output alerts ([#7635](https://github.com/NousResearch/hermes-agent/pull/7635))
- **Improved context compression** — higher limits, tool tracking, degradation warnings, token-budget tail protection ([#6395](https://github.com/NousResearch/hermes-agent/pull/6395), [#6453](https://github.com/NousResearch/hermes-agent/pull/6453))
- **`/compress <focus>`** — guided compression with a focus topic ([#8017](https://github.com/NousResearch/hermes-agent/pull/8017))
- **Tiered context pressure warnings** with gateway dedup ([#6411](https://github.com/NousResearch/hermes-agent/pull/6411))
- **Staged inactivity warning** before timeout escalation ([#6387](https://github.com/NousResearch/hermes-agent/pull/6387))
- **Prevent agent from stopping mid-task** — compression floor, budget overhaul, activity tracking ([#7983](https://github.com/NousResearch/hermes-agent/pull/7983))
- **Propagate child activity to parent** during `delegate_task` ([#7295](https://github.com/NousResearch/hermes-agent/pull/7295))
- **Truncated streaming tool call detection** before execution ([#6847](https://github.com/NousResearch/hermes-agent/pull/6847))
- Empty response retry (3 attempts with nudge) ([#6488](https://github.com/NousResearch/hermes-agent/pull/6488))
- Adaptive streaming backoff + cursor strip to prevent message truncation ([#7683](https://github.com/NousResearch/hermes-agent/pull/7683))
- Compression uses live session model instead of stale persisted config ([#8258](https://github.com/NousResearch/hermes-agent/pull/8258))
- Strip `<thought>` tags from Gemma 4 responses ([#8562](https://github.com/NousResearch/hermes-agent/pull/8562))
- Prevent `<think>` in prose from suppressing response output ([#6968](https://github.com/NousResearch/hermes-agent/pull/6968))
- Turn-exit diagnostic logging to agent loop ([#6549](https://github.com/NousResearch/hermes-agent/pull/6549))
- Scope tool interrupt signal per-thread to prevent cross-session leaks ([#7930](https://github.com/NousResearch/hermes-agent/pull/7930))
### Memory & Sessions
- **Hindsight memory plugin** — feature parity, setup wizard, config improvements — @nicoloboschi ([#6428](https://github.com/NousResearch/hermes-agent/pull/6428))
- **Honcho** — opt-in `initOnSessionStart` for tools mode — @Kathie-yu ([#6995](https://github.com/NousResearch/hermes-agent/pull/6995))
- Orphan children instead of cascade-deleting in prune/delete ([#6513](https://github.com/NousResearch/hermes-agent/pull/6513))
- Doctor command only checks the active memory provider ([#6285](https://github.com/NousResearch/hermes-agent/pull/6285))
---
## 📱 Messaging Platforms (Gateway)
### New Platforms
- **BlueBubbles (iMessage)** — full adapter with auto-webhook registration, setup wizard, and crash resilience ([#6437](https://github.com/NousResearch/hermes-agent/pull/6437), [#6460](https://github.com/NousResearch/hermes-agent/pull/6460), [#6494](https://github.com/NousResearch/hermes-agent/pull/6494), [#7107](https://github.com/NousResearch/hermes-agent/pull/7107))
- **Weixin (WeChat)** — native support via iLink Bot API with streaming, media uploads, markdown links ([#7166](https://github.com/NousResearch/hermes-agent/pull/7166), [#8665](https://github.com/NousResearch/hermes-agent/pull/8665))
- **WeCom Callback Mode** — self-built enterprise app adapter with atomic state persistence ([#7943](https://github.com/NousResearch/hermes-agent/pull/7943), [#7928](https://github.com/NousResearch/hermes-agent/pull/7928))
### Discord
- **Allowed channels whitelist** config — @jarvis-phw ([#7044](https://github.com/NousResearch/hermes-agent/pull/7044))
- **Forum channel topic inheritance** in thread sessions — @hermes-agent-dhabibi ([#6377](https://github.com/NousResearch/hermes-agent/pull/6377))
- **DISCORD_REPLY_TO_MODE** setting ([#6333](https://github.com/NousResearch/hermes-agent/pull/6333))
- Accept `.log` attachments, raise document size limit — @kira-ariaki ([#6467](https://github.com/NousResearch/hermes-agent/pull/6467))
- Decouple readiness from slash sync ([#8016](https://github.com/NousResearch/hermes-agent/pull/8016))
### Slack
- **Consolidated Slack improvements** — 7 community PRs salvaged into one ([#6809](https://github.com/NousResearch/hermes-agent/pull/6809))
- Handle assistant thread lifecycle events ([#6433](https://github.com/NousResearch/hermes-agent/pull/6433))
### Matrix
- **Migrated from matrix-nio to mautrix-python** ([#7518](https://github.com/NousResearch/hermes-agent/pull/7518))
- SQLite crypto store replacing pickle (fixes E2EE decryption) — @alt-glitch ([#7981](https://github.com/NousResearch/hermes-agent/pull/7981))
- Cross-signing recovery key verification for E2EE migration ([#8282](https://github.com/NousResearch/hermes-agent/pull/8282))
- DM mention threads + group chat events for Feishu ([#7423](https://github.com/NousResearch/hermes-agent/pull/7423))
### Gateway Core
- **Unified proxy support** — SOCKS, DISCORD_PROXY, multi-platform with macOS auto-detection ([#6814](https://github.com/NousResearch/hermes-agent/pull/6814))
- **Inbound text batching** for Discord, Matrix, WeCom + adaptive delay ([#6979](https://github.com/NousResearch/hermes-agent/pull/6979))
- **Surface natural mid-turn assistant messages** in chat platforms ([#7978](https://github.com/NousResearch/hermes-agent/pull/7978))
- **WSL-aware gateway** with smart systemd detection ([#7510](https://github.com/NousResearch/hermes-agent/pull/7510))
- **All missing platforms added to setup wizard** ([#7949](https://github.com/NousResearch/hermes-agent/pull/7949))
- **Per-platform `tool_progress` overrides** ([#6348](https://github.com/NousResearch/hermes-agent/pull/6348))
- **Configurable 'still working' notification interval** ([#8572](https://github.com/NousResearch/hermes-agent/pull/8572))
- `/model` switch persists across messages ([#7081](https://github.com/NousResearch/hermes-agent/pull/7081))
- `/usage` shows rate limits, cost, and token details between turns ([#7038](https://github.com/NousResearch/hermes-agent/pull/7038))
- Drain in-flight work before restart ([#7503](https://github.com/NousResearch/hermes-agent/pull/7503))
- Don't evict cached agent on failed runs — prevents MCP restart loop ([#7539](https://github.com/NousResearch/hermes-agent/pull/7539))
- Replace `os.environ` session state with `contextvars` ([#7454](https://github.com/NousResearch/hermes-agent/pull/7454))
- Derive channel directory platforms from enum instead of hardcoded list ([#7450](https://github.com/NousResearch/hermes-agent/pull/7450))
- Validate image downloads before caching (cross-platform) ([#7125](https://github.com/NousResearch/hermes-agent/pull/7125))
- Cross-platform webhook delivery for all platforms ([#7095](https://github.com/NousResearch/hermes-agent/pull/7095))
- Cron Discord thread_id delivery support ([#7106](https://github.com/NousResearch/hermes-agent/pull/7106))
- Feishu QR-based bot onboarding ([#8570](https://github.com/NousResearch/hermes-agent/pull/8570))
- Gateway status scoped to active profile ([#7951](https://github.com/NousResearch/hermes-agent/pull/7951))
- Prevent background process notifications from triggering false pairing requests ([#6434](https://github.com/NousResearch/hermes-agent/pull/6434))
---
## 🖥️ CLI & User Experience
### Interactive CLI
- **Termux / Android support** — adapted install paths, TUI, voice, `/image` ([#6834](https://github.com/NousResearch/hermes-agent/pull/6834))
- **Native `/model` picker modal** for provider → model selection ([#8003](https://github.com/NousResearch/hermes-agent/pull/8003))
- **Live per-tool elapsed timer** restored in TUI spinner ([#7359](https://github.com/NousResearch/hermes-agent/pull/7359))
- **Stacked tool progress scrollback** in TUI ([#8201](https://github.com/NousResearch/hermes-agent/pull/8201))
- **Random tips on new session start** (CLI + gateway, 279 tips) ([#8225](https://github.com/NousResearch/hermes-agent/pull/8225), [#8237](https://github.com/NousResearch/hermes-agent/pull/8237))
- **`hermes dump`** — copy-pasteable setup summary for debugging ([#6550](https://github.com/NousResearch/hermes-agent/pull/6550))
- **`hermes backup` / `hermes import`** — full config backup and restore ([#7997](https://github.com/NousResearch/hermes-agent/pull/7997))
- **WSL environment hint** in system prompt ([#8285](https://github.com/NousResearch/hermes-agent/pull/8285))
- **Profile creation UX** — seed SOUL.md + credential warning ([#8553](https://github.com/NousResearch/hermes-agent/pull/8553))
- Shell-aware sudo detection, empty password support ([#6517](https://github.com/NousResearch/hermes-agent/pull/6517))
- Flush stdin after curses/terminal menus to prevent escape sequence leakage ([#7167](https://github.com/NousResearch/hermes-agent/pull/7167))
- Handle broken stdin in prompt_toolkit startup ([#8560](https://github.com/NousResearch/hermes-agent/pull/8560))
### Setup & Configuration
- **Per-platform display verbosity** configuration ([#8006](https://github.com/NousResearch/hermes-agent/pull/8006))
- **Component-separated logging** with session context and filtering ([#7991](https://github.com/NousResearch/hermes-agent/pull/7991))
- **`network.force_ipv4`** config to fix IPv6 timeout issues ([#8196](https://github.com/NousResearch/hermes-agent/pull/8196))
- **Standardize message whitespace and JSON formatting** ([#7988](https://github.com/NousResearch/hermes-agent/pull/7988))
- **Rebrand OpenClaw → Hermes** during migration ([#8210](https://github.com/NousResearch/hermes-agent/pull/8210))
- Config.yaml takes priority over env vars for auxiliary settings ([#7889](https://github.com/NousResearch/hermes-agent/pull/7889))
- Harden setup provider flows + live OpenRouter catalog refresh ([#7078](https://github.com/NousResearch/hermes-agent/pull/7078))
- Normalize reasoning effort ordering across all surfaces ([#6804](https://github.com/NousResearch/hermes-agent/pull/6804))
- Remove dead `LLM_MODEL` env var + migration to clear stale entries ([#6543](https://github.com/NousResearch/hermes-agent/pull/6543))
- Remove `/prompt` slash command — prefix expansion footgun ([#6752](https://github.com/NousResearch/hermes-agent/pull/6752))
- `HERMES_HOME_MODE` env var to override permissions — @ygd58 ([#6993](https://github.com/NousResearch/hermes-agent/pull/6993))
- Fall back to default model when model config is empty ([#8303](https://github.com/NousResearch/hermes-agent/pull/8303))
- Warn when compression model context is too small ([#7894](https://github.com/NousResearch/hermes-agent/pull/7894))
---
## 🔧 Tool System
### Environments & Execution
- **Unified spawn-per-call execution layer** for environments ([#6343](https://github.com/NousResearch/hermes-agent/pull/6343))
- **Unified file sync** with mtime tracking, deletion, and transactional state ([#7087](https://github.com/NousResearch/hermes-agent/pull/7087))
- **Persistent sandbox envs** survive between turns ([#6412](https://github.com/NousResearch/hermes-agent/pull/6412))
- **Bulk file sync** via tar pipe for SSH/Modal backends — @alt-glitch ([#8014](https://github.com/NousResearch/hermes-agent/pull/8014))
- **Daytona** — bulk upload, config bridge, silent disk cap ([#7538](https://github.com/NousResearch/hermes-agent/pull/7538))
- Foreground timeout cap to prevent session deadlocks ([#7082](https://github.com/NousResearch/hermes-agent/pull/7082))
- Guard invalid command values ([#6417](https://github.com/NousResearch/hermes-agent/pull/6417))
### MCP
- **`hermes mcp add --env` and `--preset`** support ([#7970](https://github.com/NousResearch/hermes-agent/pull/7970))
- Combine `content` and `structuredContent` when both present ([#7118](https://github.com/NousResearch/hermes-agent/pull/7118))
- MCP tool name deconfliction fixes ([#7654](https://github.com/NousResearch/hermes-agent/pull/7654))
### Browser
- Browser hardening — dead code removal, caching, scroll perf, security, thread safety ([#7354](https://github.com/NousResearch/hermes-agent/pull/7354))
- `/browser connect` auto-launch uses dedicated Chrome profile dir ([#6821](https://github.com/NousResearch/hermes-agent/pull/6821))
- Reap orphaned browser sessions on startup ([#7931](https://github.com/NousResearch/hermes-agent/pull/7931))
### Voice & Vision
- **Voxtral TTS provider** (Mistral AI) ([#7653](https://github.com/NousResearch/hermes-agent/pull/7653))
- **TTS speed support** for Edge TTS, OpenAI TTS, MiniMax ([#8666](https://github.com/NousResearch/hermes-agent/pull/8666))
- **Vision auto-resize** for oversized images, raise limit to 20 MB, retry-on-failure ([#7883](https://github.com/NousResearch/hermes-agent/pull/7883), [#7902](https://github.com/NousResearch/hermes-agent/pull/7902))
- STT provider-model mismatch fix (whisper-1 vs faster-whisper) ([#7113](https://github.com/NousResearch/hermes-agent/pull/7113))
### Other Tools
- **`hermes dump`** command for setup summary ([#6550](https://github.com/NousResearch/hermes-agent/pull/6550))
- TODO store enforces ID uniqueness during replace operations ([#7986](https://github.com/NousResearch/hermes-agent/pull/7986))
- List all available toolsets in `delegate_task` schema description ([#8231](https://github.com/NousResearch/hermes-agent/pull/8231))
- API server: tool progress as custom SSE event to prevent model corruption ([#7500](https://github.com/NousResearch/hermes-agent/pull/7500))
- API server: share one Docker container across all conversations ([#7127](https://github.com/NousResearch/hermes-agent/pull/7127))
---
## 🧩 Skills Ecosystem
- **Centralized skills index + tree cache** — eliminates rate-limit failures on install ([#8575](https://github.com/NousResearch/hermes-agent/pull/8575))
- **More aggressive skill loading instructions** in system prompt (v3) ([#8209](https://github.com/NousResearch/hermes-agent/pull/8209), [#8286](https://github.com/NousResearch/hermes-agent/pull/8286))
- **Google Workspace skill** migrated to GWS CLI backend ([#6788](https://github.com/NousResearch/hermes-agent/pull/6788))
- **Creative divergence strategies** skill — @SHL0MS ([#6882](https://github.com/NousResearch/hermes-agent/pull/6882))
- **Creative ideation** — constraint-driven project generation — @SHL0MS ([#7555](https://github.com/NousResearch/hermes-agent/pull/7555))
- Parallelize skills browse/search to prevent hanging ([#7301](https://github.com/NousResearch/hermes-agent/pull/7301))
- Read name from SKILL.md frontmatter in skills_sync ([#7623](https://github.com/NousResearch/hermes-agent/pull/7623))
---
## 🔒 Security & Reliability
### Security Hardening
- **Twilio webhook signature validation** — SMS RCE fix ([#7933](https://github.com/NousResearch/hermes-agent/pull/7933))
- **Shell injection neutralization** in `_write_to_sandbox` via path quoting ([#7940](https://github.com/NousResearch/hermes-agent/pull/7940))
- **Git argument injection** and path traversal prevention in checkpoint manager ([#7944](https://github.com/NousResearch/hermes-agent/pull/7944))
- **SSRF redirect bypass** in Slack image uploads + base.py cache helpers ([#7151](https://github.com/NousResearch/hermes-agent/pull/7151))
- **Path traversal, credential gate, DANGEROUS_PATTERNS gaps** ([#7156](https://github.com/NousResearch/hermes-agent/pull/7156))
- **API bind guard** — enforce `API_SERVER_KEY` for non-loopback binding ([#7455](https://github.com/NousResearch/hermes-agent/pull/7455))
- **Approval button authorization** — require auth for session continuation — @Cafexss ([#6930](https://github.com/NousResearch/hermes-agent/pull/6930))
- Path boundary enforcement in skill manager operations ([#7156](https://github.com/NousResearch/hermes-agent/pull/7156))
- DingTalk/API webhook URL origin validation, header injection rejection ([#7455](https://github.com/NousResearch/hermes-agent/pull/7455))
### Reliability
- **Contextual error diagnostics** for invalid API responses ([#8565](https://github.com/NousResearch/hermes-agent/pull/8565))
- **Prevent 400 format errors** from triggering compression loop on Codex ([#6751](https://github.com/NousResearch/hermes-agent/pull/6751))
- **Don't halve context_length** on output-cap-too-large errors — @KUSH42 ([#6664](https://github.com/NousResearch/hermes-agent/pull/6664))
- **Recover primary client** on OpenAI transport errors ([#7108](https://github.com/NousResearch/hermes-agent/pull/7108))
- **Credential pool rotation** on billing-classified 400s ([#7112](https://github.com/NousResearch/hermes-agent/pull/7112))
- **Auto-increase stream read timeout** for local LLM providers ([#6967](https://github.com/NousResearch/hermes-agent/pull/6967))
- **Fall back to default certs** when CA bundle path doesn't exist ([#7352](https://github.com/NousResearch/hermes-agent/pull/7352))
- **Disambiguate usage-limit patterns** in error classifier — @sprmn24 ([#6836](https://github.com/NousResearch/hermes-agent/pull/6836))
- Harden cron script timeout and provider recovery ([#7079](https://github.com/NousResearch/hermes-agent/pull/7079))
- Gateway interrupt detection resilient to monitor task failures ([#8208](https://github.com/NousResearch/hermes-agent/pull/8208))
- Prevent unwanted session auto-reset after graceful gateway restarts ([#8299](https://github.com/NousResearch/hermes-agent/pull/8299))
- Prevent duplicate update prompt spam in gateway watcher ([#8343](https://github.com/NousResearch/hermes-agent/pull/8343))
- Deduplicate reasoning items in Responses API input ([#7946](https://github.com/NousResearch/hermes-agent/pull/7946))
### Infrastructure
- **Multi-arch Docker image** — amd64 + arm64 ([#6124](https://github.com/NousResearch/hermes-agent/pull/6124))
- **Docker runs as non-root user** with virtualenv — @benbarclay contributing ([#8226](https://github.com/NousResearch/hermes-agent/pull/8226))
- **Use `uv`** for Docker dependency resolution to fix resolution-too-deep ([#6965](https://github.com/NousResearch/hermes-agent/pull/6965))
- **Container-aware Nix CLI** — auto-route into managed container — @alt-glitch ([#7543](https://github.com/NousResearch/hermes-agent/pull/7543))
- **Nix shared-state permission model** for interactive CLI users — @alt-glitch ([#6796](https://github.com/NousResearch/hermes-agent/pull/6796))
- **Per-profile subprocess HOME isolation** ([#7357](https://github.com/NousResearch/hermes-agent/pull/7357))
- Profile paths fixed in Docker — profiles go to mounted volume ([#7170](https://github.com/NousResearch/hermes-agent/pull/7170))
- Docker container gateway pathway hardened ([#8614](https://github.com/NousResearch/hermes-agent/pull/8614))
- Enable unbuffered stdout for live Docker logs ([#6749](https://github.com/NousResearch/hermes-agent/pull/6749))
- Install procps in Docker image — @HiddenPuppy ([#7032](https://github.com/NousResearch/hermes-agent/pull/7032))
- Shallow git clone for faster installation — @sosyz ([#8396](https://github.com/NousResearch/hermes-agent/pull/8396))
- `hermes update` always reset on stash conflict ([#7010](https://github.com/NousResearch/hermes-agent/pull/7010))
- Write update exit code before gateway restart (cgroup kill race) ([#8288](https://github.com/NousResearch/hermes-agent/pull/8288))
- Nix: `setupSecrets` optional, tirith runtime dep — @devorun, @ethernet8023 ([#6261](https://github.com/NousResearch/hermes-agent/pull/6261), [#6721](https://github.com/NousResearch/hermes-agent/pull/6721))
- launchd stop uses `bootout` so `KeepAlive` doesn't respawn ([#7119](https://github.com/NousResearch/hermes-agent/pull/7119))
---
## 🐛 Notable Bug Fixes
- Fix: `/model` switch not persisting across gateway messages ([#7081](https://github.com/NousResearch/hermes-agent/pull/7081))
- Fix: session-scoped gateway model overrides ignored — @Hygaard ([#7662](https://github.com/NousResearch/hermes-agent/pull/7662))
- Fix: compaction model context length ignoring config — 3 related issues ([#8258](https://github.com/NousResearch/hermes-agent/pull/8258), [#8107](https://github.com/NousResearch/hermes-agent/pull/8107))
- Fix: OpenCode.ai context window resolved to 128K instead of 1M ([#6472](https://github.com/NousResearch/hermes-agent/pull/6472))
- Fix: Codex fallback auth-store lookup — @cherifya ([#6462](https://github.com/NousResearch/hermes-agent/pull/6462))
- Fix: duplicate completion notifications when process killed ([#7124](https://github.com/NousResearch/hermes-agent/pull/7124))
- Fix: agent daemon thread prevents orphan CLI processes on tab close ([#8557](https://github.com/NousResearch/hermes-agent/pull/8557))
- Fix: stale image attachment on text paste and voice input ([#7077](https://github.com/NousResearch/hermes-agent/pull/7077))
- Fix: DM thread session seeding causing cross-thread contamination ([#7084](https://github.com/NousResearch/hermes-agent/pull/7084))
- Fix: OpenClaw migration shows dry-run preview before executing ([#6769](https://github.com/NousResearch/hermes-agent/pull/6769))
- Fix: auth errors misclassified as retryable — @kuishou68 ([#7027](https://github.com/NousResearch/hermes-agent/pull/7027))
- Fix: Copilot-Integration-Id header missing ([#7083](https://github.com/NousResearch/hermes-agent/pull/7083))
- Fix: ACP session capabilities — @luyao618 ([#6985](https://github.com/NousResearch/hermes-agent/pull/6985))
- Fix: ACP PromptResponse usage from top-level fields ([#7086](https://github.com/NousResearch/hermes-agent/pull/7086))
- Fix: several failing/flaky tests on main — @dsocolobsky ([#6777](https://github.com/NousResearch/hermes-agent/pull/6777))
- Fix: backup marker filenames — @sprmn24 ([#8600](https://github.com/NousResearch/hermes-agent/pull/8600))
- Fix: `NoneType` in fast_mode check — @0xbyt4 ([#7350](https://github.com/NousResearch/hermes-agent/pull/7350))
- Fix: missing imports in uninstall.py — @JiayuuWang ([#7034](https://github.com/NousResearch/hermes-agent/pull/7034))
---
## 📚 Documentation
- Platform adapter developer guide + WeCom Callback docs ([#7969](https://github.com/NousResearch/hermes-agent/pull/7969))
- Cron troubleshooting guide ([#7122](https://github.com/NousResearch/hermes-agent/pull/7122))
- Streaming timeout auto-detection for local LLMs ([#6990](https://github.com/NousResearch/hermes-agent/pull/6990))
- Tool-use enforcement documentation expanded ([#7984](https://github.com/NousResearch/hermes-agent/pull/7984))
- BlueBubbles pairing instructions ([#6548](https://github.com/NousResearch/hermes-agent/pull/6548))
- Telegram proxy support section ([#6348](https://github.com/NousResearch/hermes-agent/pull/6348))
- `hermes dump` and `hermes logs` CLI reference ([#6552](https://github.com/NousResearch/hermes-agent/pull/6552))
- `tool_progress_overrides` configuration reference ([#6364](https://github.com/NousResearch/hermes-agent/pull/6364))
- Compression model context length warning docs ([#7879](https://github.com/NousResearch/hermes-agent/pull/7879))
---
## 👥 Contributors
**269 merged PRs** from **24 contributors** across **487 commits**.
### Community Contributors
- **@alt-glitch** (6 PRs) — Nix container-aware CLI, shared-state permissions, Matrix SQLite crypto store, bulk SSH/Modal file sync, Matrix mautrix compat
- **@SHL0MS** (2 PRs) — Creative divergence strategies skill, creative ideation skill
- **@sprmn24** (2 PRs) — Error classifier disambiguation, backup marker fix
- **@nicoloboschi** — Hindsight memory plugin feature parity
- **@Hygaard** — Session-scoped gateway model override fix
- **@jarvis-phw** — Discord allowed_channels whitelist
- **@Kathie-yu** — Honcho initOnSessionStart for tools mode
- **@hermes-agent-dhabibi** — Discord forum channel topic inheritance
- **@kira-ariaki** — Discord .log attachments and size limit
- **@cherifya** — Codex fallback auth-store lookup
- **@Cafexss** — Security: auth for session continuation
- **@KUSH42** — Compaction context_length fix
- **@kuishou68** — Auth error retryable classification fix
- **@luyao618** — ACP session capabilities
- **@ygd58** — HERMES_HOME_MODE env var override
- **@0xbyt4** — Fast mode NoneType fix
- **@JiayuuWang** — CLI uninstall import fix
- **@HiddenPuppy** — Docker procps installation
- **@dsocolobsky** — Test suite fixes
- **@benbarclay** — Docker image tag simplification
- **@sosyz** — Shallow git clone for faster install
- **@devorun** — Nix setupSecrets optional
- **@ethernet8023** — Nix tirith runtime dep
---
**Full Changelog**: [v2026.4.8...v2026.4.13](https://github.com/NousResearch/hermes-agent/compare/v2026.4.8...v2026.4.13)

View file

@ -27,10 +27,6 @@ Per-task overrides are configured in config.yaml under the ``auxiliary:`` sectio
(e.g. ``auxiliary.vision.provider``, ``auxiliary.compression.model``). (e.g. ``auxiliary.vision.provider``, ``auxiliary.compression.model``).
Default "auto" follows the chains above. Default "auto" follows the chains above.
Legacy env var overrides (AUXILIARY_{TASK}_PROVIDER, AUXILIARY_{TASK}_MODEL,
AUXILIARY_{TASK}_BASE_URL, etc.) are still read as a backward-compat fallback
but config.yaml takes priority. New configuration should always use config.yaml.
Payment / credit exhaustion fallback: Payment / credit exhaustion fallback:
When a resolved provider returns HTTP 402 or a credit-related error, When a resolved provider returns HTTP 402 or a credit-related error,
call_llm() automatically retries with the next available provider in the call_llm() automatically retries with the next available provider in the
@ -68,6 +64,8 @@ _PROVIDER_ALIASES = {
"zhipu": "zai", "zhipu": "zai",
"kimi": "kimi-coding", "kimi": "kimi-coding",
"moonshot": "kimi-coding", "moonshot": "kimi-coding",
"kimi-cn": "kimi-coding-cn",
"moonshot-cn": "kimi-coding-cn",
"minimax-china": "minimax-cn", "minimax-china": "minimax-cn",
"minimax_cn": "minimax-cn", "minimax_cn": "minimax-cn",
"claude": "anthropic", "claude": "anthropic",
@ -75,13 +73,13 @@ _PROVIDER_ALIASES = {
} }
def _normalize_aux_provider(provider: Optional[str], *, for_vision: bool = False) -> str: def _normalize_aux_provider(provider: Optional[str]) -> str:
normalized = (provider or "auto").strip().lower() normalized = (provider or "auto").strip().lower()
if normalized.startswith("custom:"): if normalized.startswith("custom:"):
suffix = normalized.split(":", 1)[1].strip() suffix = normalized.split(":", 1)[1].strip()
if not suffix: if not suffix:
return "custom" return "custom"
normalized = suffix if not for_vision else "custom" normalized = suffix
if normalized == "codex": if normalized == "codex":
return "openai-codex" return "openai-codex"
if normalized == "main": if normalized == "main":
@ -98,6 +96,7 @@ _API_KEY_PROVIDER_AUX_MODELS: Dict[str, str] = {
"gemini": "gemini-3-flash-preview", "gemini": "gemini-3-flash-preview",
"zai": "glm-4.5-flash", "zai": "glm-4.5-flash",
"kimi-coding": "kimi-k2-turbo-preview", "kimi-coding": "kimi-k2-turbo-preview",
"kimi-coding-cn": "kimi-k2-turbo-preview",
"minimax": "MiniMax-M2.7", "minimax": "MiniMax-M2.7",
"minimax-cn": "MiniMax-M2.7", "minimax-cn": "MiniMax-M2.7",
"anthropic": "claude-haiku-4-5-20251001", "anthropic": "claude-haiku-4-5-20251001",
@ -753,30 +752,6 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]:
# ── Provider resolution helpers ───────────────────────────────────────────── # ── Provider resolution helpers ─────────────────────────────────────────────
def _get_auxiliary_provider(task: str = "") -> str:
"""Read the provider override for a specific auxiliary task.
Checks AUXILIARY_{TASK}_PROVIDER first (e.g. AUXILIARY_VISION_PROVIDER),
then CONTEXT_{TASK}_PROVIDER (for the compression section's summary_provider),
then falls back to "auto". Returns one of: "auto", "openrouter", "nous", "main".
"""
if task:
for prefix in ("AUXILIARY_", "CONTEXT_"):
val = os.getenv(f"{prefix}{task.upper()}_PROVIDER", "").strip().lower()
if val and val != "auto":
return val
return "auto"
def _get_auxiliary_env_override(task: str, suffix: str) -> Optional[str]:
"""Read an auxiliary env override from AUXILIARY_* or CONTEXT_* prefixes."""
if not task:
return None
for prefix in ("AUXILIARY_", "CONTEXT_"):
val = os.getenv(f"{prefix}{task.upper()}_{suffix}", "").strip()
if val:
return val
return None
def _try_openrouter() -> Tuple[Optional[OpenAI], Optional[str]]: def _try_openrouter() -> Tuple[Optional[OpenAI], Optional[str]]:
@ -1248,6 +1223,12 @@ def _to_async_client(sync_client, model: str):
return AsyncCodexAuxiliaryClient(sync_client), model return AsyncCodexAuxiliaryClient(sync_client), model
if isinstance(sync_client, AnthropicAuxiliaryClient): if isinstance(sync_client, AnthropicAuxiliaryClient):
return AsyncAnthropicAuxiliaryClient(sync_client), model return AsyncAnthropicAuxiliaryClient(sync_client), model
try:
from agent.copilot_acp_client import CopilotACPClient
if isinstance(sync_client, CopilotACPClient):
return sync_client, model
except ImportError:
pass
async_kwargs = { async_kwargs = {
"api_key": sync_client.api_key, "api_key": sync_client.api_key,
@ -1466,10 +1447,14 @@ def resolve_provider_client(
custom_entry = _get_named_custom_provider(provider) custom_entry = _get_named_custom_provider(provider)
if custom_entry: if custom_entry:
custom_base = custom_entry.get("base_url", "").strip() custom_base = custom_entry.get("base_url", "").strip()
custom_key = custom_entry.get("api_key", "").strip() or "no-key-required" custom_key = custom_entry.get("api_key", "").strip()
custom_key_env = custom_entry.get("key_env", "").strip()
if not custom_key and custom_key_env:
custom_key = os.getenv(custom_key_env, "").strip()
custom_key = custom_key or "no-key-required"
if custom_base: if custom_base:
final_model = _normalize_resolved_model( final_model = _normalize_resolved_model(
model or _read_main_model() or "gpt-4o-mini", model or custom_entry.get("model") or _read_main_model() or "gpt-4o-mini",
provider, provider,
) )
client = OpenAI(api_key=custom_key, base_url=custom_base) client = OpenAI(api_key=custom_key, base_url=custom_base)
@ -1488,7 +1473,11 @@ def resolve_provider_client(
# ── API-key providers from PROVIDER_REGISTRY ───────────────────── # ── API-key providers from PROVIDER_REGISTRY ─────────────────────
try: try:
from hermes_cli.auth import PROVIDER_REGISTRY, resolve_api_key_provider_credentials from hermes_cli.auth import (
PROVIDER_REGISTRY,
resolve_api_key_provider_credentials,
resolve_external_process_provider_credentials,
)
except ImportError: except ImportError:
logger.debug("hermes_cli.auth not available for provider %s", provider) logger.debug("hermes_cli.auth not available for provider %s", provider)
return None, None return None, None
@ -1562,6 +1551,41 @@ def resolve_provider_client(
return (_to_async_client(client, final_model) if async_mode return (_to_async_client(client, final_model) if async_mode
else (client, final_model)) else (client, final_model))
if pconfig.auth_type == "external_process":
creds = resolve_external_process_provider_credentials(provider)
final_model = _normalize_resolved_model(model or _read_main_model(), provider)
if provider == "copilot-acp":
api_key = str(creds.get("api_key", "")).strip()
base_url = str(creds.get("base_url", "")).strip()
command = str(creds.get("command", "")).strip() or None
args = list(creds.get("args") or [])
if not final_model:
logger.warning(
"resolve_provider_client: copilot-acp requested but no model "
"was provided or configured"
)
return None, None
if not api_key or not base_url:
logger.warning(
"resolve_provider_client: copilot-acp requested but external "
"process credentials are incomplete"
)
return None, None
from agent.copilot_acp_client import CopilotACPClient
client = CopilotACPClient(
api_key=api_key,
base_url=base_url,
command=command,
args=args,
)
logger.debug("resolve_provider_client: %s (%s)", provider, final_model)
return (_to_async_client(client, final_model) if async_mode
else (client, final_model))
logger.warning("resolve_provider_client: external-process provider %s not "
"directly supported", provider)
return None, None
elif pconfig.auth_type in ("oauth_device_code", "oauth_external"): elif pconfig.auth_type in ("oauth_device_code", "oauth_external"):
# OAuth providers — route through their specific try functions # OAuth providers — route through their specific try functions
if provider == "nous": if provider == "nous":
@ -1591,8 +1615,8 @@ def get_text_auxiliary_client(
task: Optional task name ("compression", "web_extract") to check task: Optional task name ("compression", "web_extract") to check
for a task-specific provider override. for a task-specific provider override.
Callers may override the returned model with a per-task env var Callers may override the returned model via config.yaml
(e.g. CONTEXT_COMPRESSION_MODEL, AUXILIARY_WEB_EXTRACT_MODEL). (e.g. auxiliary.compression.model, auxiliary.web_extract.model).
""" """
provider, model, base_url, api_key, api_mode = _resolve_task_provider_model(task or None) provider, model, base_url, api_key, api_mode = _resolve_task_provider_model(task or None)
return resolve_provider_client( return resolve_provider_client(
@ -1631,7 +1655,7 @@ _VISION_AUTO_PROVIDER_ORDER = (
def _normalize_vision_provider(provider: Optional[str]) -> str: def _normalize_vision_provider(provider: Optional[str]) -> str:
return _normalize_aux_provider(provider, for_vision=True) return _normalize_aux_provider(provider)
def _resolve_strict_vision_backend(provider: str) -> Tuple[Optional[Any], Optional[str]]: def _resolve_strict_vision_backend(provider: str) -> Tuple[Optional[Any], Optional[str]]:
@ -1714,6 +1738,7 @@ def resolve_vision_provider_client(
async_mode=async_mode, async_mode=async_mode,
explicit_base_url=resolved_base_url, explicit_base_url=resolved_base_url,
explicit_api_key=resolved_api_key, explicit_api_key=resolved_api_key,
api_mode=resolved_api_mode,
) )
if client is None: if client is None:
return "custom", None, None return "custom", None, None
@ -1738,7 +1763,8 @@ def resolve_vision_provider_client(
# Use provider-specific vision model if available, otherwise main model. # Use provider-specific vision model if available, otherwise main model.
vision_model = _PROVIDER_VISION_MODELS.get(main_provider, main_model) vision_model = _PROVIDER_VISION_MODELS.get(main_provider, main_model)
rpc_client, rpc_model = resolve_provider_client( rpc_client, rpc_model = resolve_provider_client(
main_provider, vision_model) main_provider, vision_model,
api_mode=resolved_api_mode)
if rpc_client is not None: if rpc_client is not None:
logger.info( logger.info(
"Vision auto-detect: using active provider %s (%s)", "Vision auto-detect: using active provider %s (%s)",
@ -1762,7 +1788,8 @@ def resolve_vision_provider_client(
sync_client, default_model = _resolve_strict_vision_backend(requested) sync_client, default_model = _resolve_strict_vision_backend(requested)
return _finalize(requested, sync_client, default_model) return _finalize(requested, sync_client, default_model)
client, final_model = _get_cached_client(requested, resolved_model, async_mode) client, final_model = _get_cached_client(requested, resolved_model, async_mode,
api_mode=resolved_api_mode)
if client is None: if client is None:
return requested, None, None return requested, None, None
return requested, client, final_model return requested, client, final_model
@ -2011,9 +2038,8 @@ def _resolve_task_provider_model(
Priority: Priority:
1. Explicit provider/model/base_url/api_key args (always win) 1. Explicit provider/model/base_url/api_key args (always win)
2. Config file (auxiliary.{task}.* or compression.*) 2. Config file (auxiliary.{task}.provider/model/base_url)
3. Env var overrides (backward-compat: AUXILIARY_{TASK}_*, CONTEXT_{TASK}_*) 3. "auto" (full auto-detection chain)
4. "auto" (full auto-detection chain)
Returns (provider, model, base_url, api_key, api_mode) where model may Returns (provider, model, base_url, api_key, api_mode) where model may
be None (use provider default). When base_url is set, provider is forced be None (use provider default). When base_url is set, provider is forced
@ -2044,22 +2070,8 @@ def _resolve_task_provider_model(
cfg_api_key = str(task_config.get("api_key", "")).strip() or None cfg_api_key = str(task_config.get("api_key", "")).strip() or None
cfg_api_mode = str(task_config.get("api_mode", "")).strip() or None cfg_api_mode = str(task_config.get("api_mode", "")).strip() or None
# Backwards compat: compression section has its own keys. resolved_model = model or cfg_model
# The auxiliary.compression defaults to provider="auto", so treat resolved_api_mode = cfg_api_mode
# both None and "auto" as "not explicitly configured".
if task == "compression" and (not cfg_provider or cfg_provider == "auto"):
comp = config.get("compression", {}) if isinstance(config, dict) else {}
if isinstance(comp, dict):
cfg_provider = comp.get("summary_provider", "").strip() or None
cfg_model = cfg_model or comp.get("summary_model", "").strip() or None
_sbu = comp.get("summary_base_url") or ""
cfg_base_url = cfg_base_url or _sbu.strip() or None
# Env vars are backward-compat fallback only — config.yaml is primary.
env_model = _get_auxiliary_env_override(task, "MODEL") if task else None
env_api_mode = _get_auxiliary_env_override(task, "API_MODE") if task else None
resolved_model = model or cfg_model or env_model
resolved_api_mode = cfg_api_mode or env_api_mode
if base_url: if base_url:
return "custom", resolved_model, base_url, api_key, resolved_api_mode return "custom", resolved_model, base_url, api_key, resolved_api_mode
@ -2073,17 +2085,6 @@ def _resolve_task_provider_model(
if cfg_provider and cfg_provider != "auto": if cfg_provider and cfg_provider != "auto":
return cfg_provider, resolved_model, None, None, resolved_api_mode return cfg_provider, resolved_model, None, None, resolved_api_mode
# Env vars are backward-compat fallback for users who haven't
# migrated to config.yaml yet.
env_base_url = _get_auxiliary_env_override(task, "BASE_URL")
env_api_key = _get_auxiliary_env_override(task, "API_KEY")
if env_base_url:
return "custom", resolved_model, env_base_url, env_api_key, resolved_api_mode
env_provider = _get_auxiliary_provider(task)
if env_provider != "auto":
return env_provider, resolved_model, None, None, resolved_api_mode
return "auto", resolved_model, None, None, resolved_api_mode return "auto", resolved_model, None, None, resolved_api_mode
return "auto", resolved_model, None, None, resolved_api_mode return "auto", resolved_model, None, None, resolved_api_mode
@ -2454,9 +2455,9 @@ def extract_content_or_reasoning(response) -> str:
if content: if content:
# Strip inline think/reasoning blocks (mirrors _strip_think_blocks) # Strip inline think/reasoning blocks (mirrors _strip_think_blocks)
cleaned = re.sub( cleaned = re.sub(
r"<(?:think|thinking|reasoning|REASONING_SCRATCHPAD)>" r"<(?:think|thinking|reasoning|thought|REASONING_SCRATCHPAD)>"
r".*?" r".*?"
r"</(?:think|thinking|reasoning|REASONING_SCRATCHPAD)>", r"</(?:think|thinking|reasoning|thought|REASONING_SCRATCHPAD)>",
"", content, flags=re.DOTALL | re.IGNORECASE, "", content, flags=re.DOTALL | re.IGNORECASE,
).strip() ).strip()
if cleaned: if cleaned:

View file

@ -289,6 +289,14 @@ def _iter_custom_providers(config: Optional[dict] = None):
return return
custom_providers = config.get("custom_providers") custom_providers = config.get("custom_providers")
if not isinstance(custom_providers, list): if not isinstance(custom_providers, list):
# Fall back to the v12+ providers dict via the compatibility layer
try:
from hermes_cli.config import get_compatible_custom_providers
custom_providers = get_compatible_custom_providers(config)
except Exception:
return
if not custom_providers:
return return
for entry in custom_providers: for entry in custom_providers:
if not isinstance(entry, dict): if not isinstance(entry, dict):

View file

@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
# are preserved so the full model name reaches cache lookups and server queries. # are preserved so the full model name reaches cache lookups and server queries.
_PROVIDER_PREFIXES: frozenset[str] = frozenset({ _PROVIDER_PREFIXES: frozenset[str] = frozenset({
"openrouter", "nous", "openai-codex", "copilot", "copilot-acp", "openrouter", "nous", "openai-codex", "copilot", "copilot-acp",
"gemini", "zai", "kimi-coding", "minimax", "minimax-cn", "anthropic", "deepseek", "gemini", "zai", "kimi-coding", "kimi-coding-cn", "minimax", "minimax-cn", "anthropic", "deepseek",
"opencode-zen", "opencode-go", "ai-gateway", "kilocode", "alibaba", "opencode-zen", "opencode-go", "ai-gateway", "kilocode", "alibaba",
"qwen-oauth", "qwen-oauth",
"xiaomi", "xiaomi",
@ -32,7 +32,7 @@ _PROVIDER_PREFIXES: frozenset[str] = frozenset({
# Common aliases # Common aliases
"google", "google-gemini", "google-ai-studio", "google", "google-gemini", "google-ai-studio",
"glm", "z-ai", "z.ai", "zhipu", "github", "github-copilot", "glm", "z-ai", "z.ai", "zhipu", "github", "github-copilot",
"github-models", "kimi", "moonshot", "claude", "deep-seek", "github-models", "kimi", "moonshot", "kimi-cn", "moonshot-cn", "claude", "deep-seek",
"opencode", "zen", "go", "vercel", "kilo", "dashscope", "aliyun", "qwen", "opencode", "zen", "go", "vercel", "kilo", "dashscope", "aliyun", "qwen",
"mimo", "xiaomi-mimo", "mimo", "xiaomi-mimo",
"qwen-portal", "qwen-portal",
@ -211,6 +211,7 @@ _URL_TO_PROVIDER: Dict[str, str] = {
"api.anthropic.com": "anthropic", "api.anthropic.com": "anthropic",
"api.z.ai": "zai", "api.z.ai": "zai",
"api.moonshot.ai": "kimi-coding", "api.moonshot.ai": "kimi-coding",
"api.moonshot.cn": "kimi-coding-cn",
"api.kimi.com": "kimi-coding", "api.kimi.com": "kimi-coding",
"api.minimax": "minimax", "api.minimax": "minimax",
"dashscope.aliyuncs.com": "alibaba", "dashscope.aliyuncs.com": "alibaba",
@ -775,12 +776,12 @@ def _query_local_context_length(model: str, base_url: str) -> Optional[int]:
resp = client.post(f"{server_url}/api/show", json={"name": model}) resp = client.post(f"{server_url}/api/show", json={"name": model})
if resp.status_code == 200: if resp.status_code == 200:
data = resp.json() data = resp.json()
# Check model_info for context length # Prefer explicit num_ctx from Modelfile parameters: this is
model_info = data.get("model_info", {}) # the *runtime* context Ollama will actually allocate KV cache
for key, value in model_info.items(): # for. The GGUF model_info.context_length is the training max,
if "context_length" in key and isinstance(value, (int, float)): # which can be larger than num_ctx — using it here would let
return int(value) # Hermes grow conversations past the runtime limit and Ollama
# Check parameters string for num_ctx # would silently truncate. Matches query_ollama_num_ctx().
params = data.get("parameters", "") params = data.get("parameters", "")
if "num_ctx" in params: if "num_ctx" in params:
for line in params.split("\n"): for line in params.split("\n"):
@ -791,6 +792,11 @@ def _query_local_context_length(model: str, base_url: str) -> Optional[int]:
return int(parts[-1]) return int(parts[-1])
except ValueError: except ValueError:
pass pass
# Fall back to GGUF model_info context_length (training max)
model_info = data.get("model_info", {})
for key, value in model_info.items():
if "context_length" in key and isinstance(value, (int, float)):
return int(value)
# LM Studio native API: /api/v1/models returns max_context_length. # LM Studio native API: /api/v1/models returns max_context_length.
# This is more reliable than the OpenAI-compat /v1/models which # This is more reliable than the OpenAI-compat /v1/models which

View file

@ -148,6 +148,7 @@ PROVIDER_TO_MODELS_DEV: Dict[str, str] = {
"openai-codex": "openai", "openai-codex": "openai",
"zai": "zai", "zai": "zai",
"kimi-coding": "kimi-for-coding", "kimi-coding": "kimi-for-coding",
"kimi-coding-cn": "kimi-for-coding",
"minimax": "minimax", "minimax": "minimax",
"minimax-cn": "minimax-cn", "minimax-cn": "minimax-cn",
"deepseek": "deepseek", "deepseek": "deepseek",

View file

@ -364,6 +364,18 @@ PLATFORM_HINTS = {
"documents. You can also include image URLs in markdown format ![alt](url) and they " "documents. You can also include image URLs in markdown format ![alt](url) and they "
"will be downloaded and sent as native media when possible." "will be downloaded and sent as native media when possible."
), ),
"wecom": (
"You are on WeCom (企业微信 / Enterprise WeChat). Markdown formatting is supported. "
"You CAN send media files natively — to deliver a file to the user, include "
"MEDIA:/absolute/path/to/file in your response. The file will be sent as a native "
"WeCom attachment: images (.jpg, .png, .webp) are sent as photos (up to 10 MB), "
"other files (.pdf, .docx, .xlsx, .md, .txt, etc.) arrive as downloadable documents "
"(up to 20 MB), and videos (.mp4) play inline. Voice messages are supported but "
"must be in AMR format — other audio formats are automatically sent as file attachments. "
"You can also include image URLs in markdown format ![alt](url) and they will be "
"downloaded and sent as native photos. Do NOT tell the user you lack file-sending "
"capability — use MEDIA: syntax whenever a file delivery is appropriate."
),
} }
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------

View file

@ -309,15 +309,8 @@ compression:
# compression of older turns. # compression of older turns.
protect_last_n: 20 protect_last_n: 20
# Model to use for generating summaries (fast/cheap recommended) # To pin a specific model/provider for compression summaries, use the
# This model compresses the middle turns into a concise summary. # auxiliary section below (auxiliary.compression.provider / model).
# IMPORTANT: it receives the full middle section of the conversation, so it
# MUST support a context length at least as large as your main model's.
summary_model: "google/gemini-3-flash-preview"
# Provider for the summary model (default: "auto")
# Options: "auto", "openrouter", "nous", "main"
# summary_provider: "auto"
# ============================================================================= # =============================================================================
# Auxiliary Models (Advanced — Experimental) # Auxiliary Models (Advanced — Experimental)

208
cli.py
View file

@ -275,7 +275,6 @@ def load_cli_config() -> Dict[str, Any]:
"compression": { "compression": {
"enabled": True, # Auto-compress when approaching context limit "enabled": True, # Auto-compress when approaching context limit
"threshold": 0.50, # Compress at 50% of model's context limit "threshold": 0.50, # Compress at 50% of model's context limit
"summary_model": "", # Model for summaries (empty = use main model)
}, },
"smart_model_routing": { "smart_model_routing": {
"enabled": False, "enabled": False,
@ -2464,8 +2463,8 @@ class HermesCLI:
# suppress them during streaming too — unless show_reasoning is # suppress them during streaming too — unless show_reasoning is
# enabled, in which case we route the inner content to the # enabled, in which case we route the inner content to the
# reasoning display box instead of discarding it. # reasoning display box instead of discarding it.
_OPEN_TAGS = ("<REASONING_SCRATCHPAD>", "<think>", "<reasoning>", "<THINKING>", "<thinking>") _OPEN_TAGS = ("<REASONING_SCRATCHPAD>", "<think>", "<reasoning>", "<THINKING>", "<thinking>", "<thought>")
_CLOSE_TAGS = ("</REASONING_SCRATCHPAD>", "</think>", "</reasoning>", "</THINKING>", "</thinking>") _CLOSE_TAGS = ("</REASONING_SCRATCHPAD>", "</think>", "</reasoning>", "</THINKING>", "</thinking>", "</thought>")
# Append to a pre-filter buffer first # Append to a pre-filter buffer first
self._stream_prefilt = getattr(self, "_stream_prefilt", "") + text self._stream_prefilt = getattr(self, "_stream_prefilt", "") + text
@ -3043,8 +3042,10 @@ class HermesCLI:
) )
# Warn if the configured model is a Nous Hermes LLM (not agentic) # Warn if the configured model is a Nous Hermes LLM (not agentic)
from hermes_cli.model_switch import is_nous_hermes_non_agentic
model_name = getattr(self, "model", "") or "" model_name = getattr(self, "model", "") or ""
if "hermes" in model_name.lower(): if is_nous_hermes_non_agentic(model_name):
self.console.print() self.console.print()
self.console.print( self.console.print(
"[bold yellow]⚠ Nous Research Hermes 3 & 4 models are NOT agentic and are not " "[bold yellow]⚠ Nous Research Hermes 3 & 4 models are NOT agentic and are not "
@ -3143,6 +3144,8 @@ class HermesCLI:
# Collect displayable entries (skip system, tool-result messages) # Collect displayable entries (skip system, tool-result messages)
entries = [] # list of (role, display_text) entries = [] # list of (role, display_text)
_last_asst_idx = None # index of last assistant entry
_last_asst_full = None # un-truncated display text for last assistant
for msg in self.conversation_history: for msg in self.conversation_history:
role = msg.get("role", "") role = msg.get("role", "")
content = msg.get("content") content = msg.get("content")
@ -3172,7 +3175,9 @@ class HermesCLI:
text = "" if content is None else str(content) text = "" if content is None else str(content)
text = _strip_reasoning_tags(text) text = _strip_reasoning_tags(text)
parts = [] parts = []
full_parts = [] # un-truncated version
if text: if text:
full_parts.append(text)
lines = text.splitlines() lines = text.splitlines()
if len(lines) > MAX_ASST_LINES: if len(lines) > MAX_ASST_LINES:
text = "\n".join(lines[:MAX_ASST_LINES]) + " ..." text = "\n".join(lines[:MAX_ASST_LINES]) + " ..."
@ -3192,11 +3197,15 @@ class HermesCLI:
if len(names) > 4: if len(names) > 4:
names_str += ", ..." names_str += ", ..."
noun = "call" if tc_count == 1 else "calls" noun = "call" if tc_count == 1 else "calls"
parts.append(f"[{tc_count} tool {noun}: {names_str}]") tc_summary = f"[{tc_count} tool {noun}: {names_str}]"
parts.append(tc_summary)
full_parts.append(tc_summary)
if not parts: if not parts:
# Skip pure-reasoning messages that have no visible output # Skip pure-reasoning messages that have no visible output
continue continue
entries.append(("assistant", " ".join(parts))) entries.append(("assistant", " ".join(parts)))
_last_asst_idx = len(entries) - 1
_last_asst_full = " ".join(full_parts)
if not entries: if not entries:
return return
@ -3207,6 +3216,13 @@ class HermesCLI:
skipped = len(entries) - MAX_DISPLAY_EXCHANGES * 2 skipped = len(entries) - MAX_DISPLAY_EXCHANGES * 2
entries = entries[skipped:] entries = entries[skipped:]
# Replace last assistant entry with full (un-truncated) text
# so the user can see where they left off without wasting tokens.
if _last_asst_idx is not None and _last_asst_full:
adj_idx = _last_asst_idx - skipped
if 0 <= adj_idx < len(entries):
entries[adj_idx] = ("assistant_last", _last_asst_full)
# Build the display using Rich # Build the display using Rich
from rich.panel import Panel from rich.panel import Panel
from rich.text import Text from rich.text import Text
@ -3239,6 +3255,13 @@ class HermesCLI:
lines.append(msg_lines[0] + "\n", style="dim") lines.append(msg_lines[0] + "\n", style="dim")
for ml in msg_lines[1:]: for ml in msg_lines[1:]:
lines.append(f" {ml}\n", style="dim") lines.append(f" {ml}\n", style="dim")
elif role == "assistant_last":
# Last assistant response shown in full, non-dim
lines.append(" ◆ Hermes: ", style=f"bold {_assistant_label_c}")
msg_lines = text.splitlines()
lines.append(msg_lines[0] + "\n", style="")
for ml in msg_lines[1:]:
lines.append(f" {ml}\n", style="")
else: else:
lines.append(" ◆ Hermes: ", style=f"dim bold {_assistant_label_c}") lines.append(" ◆ Hermes: ", style=f"dim bold {_assistant_label_c}")
msg_lines = text.splitlines() msg_lines = text.splitlines()
@ -3383,6 +3406,93 @@ class HermesCLI:
# Treat as a git hash # Treat as a git hash
return ref return ref
def _handle_snapshot_command(self, command: str):
"""Handle /snapshot — lightweight state snapshots for Hermes config/state.
Syntax:
/snapshot list recent snapshots
/snapshot create [label] create a snapshot
/snapshot restore <id> restore state from snapshot
/snapshot prune [N] prune to N snapshots (default 20)
"""
from hermes_cli.backup import (
create_quick_snapshot, list_quick_snapshots,
restore_quick_snapshot, prune_quick_snapshots,
)
from hermes_constants import display_hermes_home
parts = command.split()
subcmd = parts[1].lower() if len(parts) > 1 else "list"
if subcmd in ("list", "ls"):
snaps = list_quick_snapshots()
if not snaps:
print(" No state snapshots yet.")
print(" Create one: /snapshot create [label]")
return
print(f" State snapshots ({display_hermes_home()}/state-snapshots/):\n")
print(f" {'#':>3} {'ID':<35} {'Files':>5} {'Size':>10} {'Label'}")
print(f" {''*3} {''*35} {''*5} {''*10} {''*20}")
for i, s in enumerate(snaps, 1):
size = s.get("total_size", 0)
if size < 1024:
size_str = f"{size} B"
elif size < 1024 * 1024:
size_str = f"{size / 1024:.0f} KB"
else:
size_str = f"{size / 1024 / 1024:.1f} MB"
label = s.get("label") or ""
print(f" {i:3} {s['id']:<35} {s.get('file_count', 0):>5} {size_str:>10} {label}")
elif subcmd == "create":
label = " ".join(parts[2:]) if len(parts) > 2 else None
snap_id = create_quick_snapshot(label=label)
if snap_id:
print(f" Snapshot created: {snap_id}")
else:
print(" No state files found to snapshot.")
elif subcmd in ("restore", "rewind"):
if len(parts) < 3:
print(" Usage: /snapshot restore <snapshot-id>")
# Show hint with most recent snapshot
snaps = list_quick_snapshots(limit=1)
if snaps:
print(f" Most recent: {snaps[0]['id']}")
return
snap_id = parts[2]
# Allow restore by number (1-indexed)
try:
idx = int(snap_id)
snaps = list_quick_snapshots()
if 1 <= idx <= len(snaps):
snap_id = snaps[idx - 1]["id"]
else:
print(f" Invalid snapshot number. Use 1-{len(snaps)}.")
return
except ValueError:
pass
if restore_quick_snapshot(snap_id):
print(f" Restored state from: {snap_id}")
print(" Restart recommended for state.db changes to take effect.")
else:
print(f" Snapshot not found: {snap_id}")
elif subcmd == "prune":
keep = 20
if len(parts) > 2:
try:
keep = int(parts[2])
except ValueError:
print(" Usage: /snapshot prune [keep-count]")
return
deleted = prune_quick_snapshots(keep=keep)
print(f" Pruned {deleted} old snapshot(s) (keeping {keep}).")
else:
print(f" Unknown subcommand: {subcmd}")
print(" Usage: /snapshot [list|create [label]|restore <id>|prune [N]]")
def _handle_stop_command(self): def _handle_stop_command(self):
"""Handle /stop — kill all running background processes. """Handle /stop — kill all running background processes.
@ -4704,10 +4814,10 @@ class HermesCLI:
user_provs = None user_provs = None
custom_provs = None custom_provs = None
try: try:
from hermes_cli.config import load_config from hermes_cli.config import get_compatible_custom_providers, load_config
cfg = load_config() cfg = load_config()
user_provs = cfg.get("providers") user_provs = cfg.get("providers")
custom_provs = cfg.get("custom_providers") custom_provs = get_compatible_custom_providers(cfg)
except Exception: except Exception:
pass pass
@ -5497,10 +5607,16 @@ class HermesCLI:
self._show_insights(cmd_original) self._show_insights(cmd_original)
elif canonical == "copy": elif canonical == "copy":
self._handle_copy_command(cmd_original) self._handle_copy_command(cmd_original)
elif canonical == "debug":
self._handle_debug_command()
elif canonical == "paste": elif canonical == "paste":
self._handle_paste_command() self._handle_paste_command()
elif canonical == "image": elif canonical == "image":
self._handle_image_command(cmd_original) self._handle_image_command(cmd_original)
elif canonical == "reload":
from hermes_cli.config import reload_env
count = reload_env()
print(f" Reloaded .env ({count} var(s) updated)")
elif canonical == "reload-mcp": elif canonical == "reload-mcp":
with self._busy_command(self._slow_command_status(cmd_original)): with self._busy_command(self._slow_command_status(cmd_original)):
self._reload_mcp() self._reload_mcp()
@ -5529,6 +5645,8 @@ class HermesCLI:
print(f"Plugin system error: {e}") print(f"Plugin system error: {e}")
elif canonical == "rollback": elif canonical == "rollback":
self._handle_rollback_command(cmd_original) self._handle_rollback_command(cmd_original)
elif canonical == "snapshot":
self._handle_snapshot_command(cmd_original)
elif canonical == "stop": elif canonical == "stop":
self._handle_stop_command() self._handle_stop_command()
elif canonical == "agents": elif canonical == "agents":
@ -6413,6 +6531,14 @@ class HermesCLI:
except Exception as e: except Exception as e:
print(f" ❌ Compression failed: {e}") print(f" ❌ Compression failed: {e}")
def _handle_debug_command(self):
"""Handle /debug — upload debug report + logs and print paste URLs."""
from hermes_cli.debug import run_debug_share
from types import SimpleNamespace
args = SimpleNamespace(lines=200, expire=7, local=False)
run_debug_share(args)
def _show_usage(self): def _show_usage(self):
"""Show rate limits (if available) and session token usage.""" """Show rate limits (if available) and session token usage."""
if not self.agent: if not self.agent:
@ -7725,8 +7851,10 @@ class HermesCLI:
"error": _summary, "error": _summary,
} }
# Start agent in background thread # Start agent in background thread (daemon so it cannot keep the
agent_thread = threading.Thread(target=run_agent) # process alive when the user closes the terminal tab — SIGHUP
# exits the main thread and daemon threads are reaped automatically).
agent_thread = threading.Thread(target=run_agent, daemon=True)
agent_thread.start() agent_thread.start()
# Monitor the dedicated interrupt queue while the agent runs. # Monitor the dedicated interrupt queue while the agent runs.
@ -7912,6 +8040,17 @@ class HermesCLI:
sys.stdout.write("\a") sys.stdout.write("\a")
sys.stdout.flush() sys.stdout.flush()
# Notify when iteration budget was hit
if result and not result.get("completed") and not result.get("interrupted"):
_api_calls = result.get("api_calls", 0)
if _api_calls >= getattr(self.agent, "max_iterations", 90):
_max_iter = getattr(self.agent, "max_iterations", 90)
_cprint(
f"\n{_DIM}⚠ Iteration budget reached "
f"({_api_calls}/{_max_iter}) — "
f"response may be incomplete{_RST}"
)
# Speak response aloud if voice TTS is enabled # Speak response aloud if voice TTS is enabled
# Skip batch TTS when streaming TTS already handled it # Skip batch TTS when streaming TTS already handled it
if self._voice_tts and response and not use_streaming_tts: if self._voice_tts and response and not use_streaming_tts:
@ -8752,6 +8891,9 @@ class HermesCLI:
if _should_auto_attach_clipboard_image_on_paste(pasted_text) and self._try_attach_clipboard_image(): if _should_auto_attach_clipboard_image_on_paste(pasted_text) and self._try_attach_clipboard_image():
event.app.invalidate() event.app.invalidate()
if pasted_text: if pasted_text:
# Sanitize surrogate characters (e.g. from Word/Google Docs paste) before writing
from run_agent import _sanitize_surrogates
pasted_text = _sanitize_surrogates(pasted_text)
line_count = pasted_text.count('\n') line_count = pasted_text.count('\n')
buf = event.current_buffer buf = event.current_buffer
if line_count >= 5 and not buf.text.strip().startswith('/'): if line_count >= 5 and not buf.text.strip().startswith('/'):
@ -9677,17 +9819,37 @@ class HermesCLI:
pass # Signal handlers may fail in restricted environments pass # Signal handlers may fail in restricted environments
# Install a custom asyncio exception handler that suppresses the # Install a custom asyncio exception handler that suppresses the
# "Event loop is closed" RuntimeError from httpx transport cleanup. # "Event loop is closed" RuntimeError from httpx transport cleanup
# This is defense-in-depth — the primary fix is neuter_async_httpx_del # and the "0 is not registered" KeyError from broken stdin (#6393).
# which disables __del__ entirely, but older clients or SDK upgrades # The RuntimeError fix is defense-in-depth — the primary fix is
# could bypass it. # neuter_async_httpx_del which disables __del__ entirely. The
# KeyError fix handles macOS + uv-managed Python environments where
# fd 0 is not reliably available to the asyncio selector.
def _suppress_closed_loop_errors(loop, context): def _suppress_closed_loop_errors(loop, context):
exc = context.get("exception") exc = context.get("exception")
if isinstance(exc, RuntimeError) and "Event loop is closed" in str(exc): if isinstance(exc, RuntimeError) and "Event loop is closed" in str(exc):
return # silently suppress return # silently suppress
if isinstance(exc, KeyError) and "is not registered" in str(exc):
return # suppress selector registration failures (#6393)
# Fall back to default handler for everything else # Fall back to default handler for everything else
loop.default_exception_handler(context) loop.default_exception_handler(context)
# Validate stdin before launching prompt_toolkit — on macOS with
# uv-managed Python, fd 0 can be invalid or unregisterable with the
# asyncio selector, causing "KeyError: '0 is not registered'" (#6393).
try:
import os as _os
_os.fstat(0)
except OSError:
print(
"Error: stdin (fd 0) is not available.\n"
"This can happen with certain Python installations (e.g. uv-managed cPython on macOS).\n"
"Try reinstalling Python via pyenv or Homebrew, then re-run: hermes setup"
)
_run_cleanup()
self._print_exit_summary()
return
# Run the application with patch_stdout for proper output handling # Run the application with patch_stdout for proper output handling
try: try:
with patch_stdout(): with patch_stdout():
@ -9701,8 +9863,28 @@ class HermesCLI:
app.run() app.run()
except (EOFError, KeyboardInterrupt, BrokenPipeError): except (EOFError, KeyboardInterrupt, BrokenPipeError):
pass pass
except (KeyError, OSError) as _stdin_err:
# Catch selector registration failures from broken stdin (#6393).
# This is the fallback for cases that slip past the fstat() guard.
if "is not registered" in str(_stdin_err) or "Bad file descriptor" in str(_stdin_err):
print(
f"\nError: stdin is not usable ({_stdin_err}).\n"
"This can happen with certain Python installations (e.g. uv-managed cPython on macOS).\n"
"Try reinstalling Python via pyenv or Homebrew, then re-run: hermes setup"
)
else:
raise
finally: finally:
self._should_exit = True self._should_exit = True
# Interrupt the agent immediately so its daemon thread stops making
# API calls and exits promptly (agent_thread is daemon, so the
# process will exit once the main thread finishes, but interrupting
# avoids wasted API calls and lets run_conversation clean up).
if self.agent and getattr(self, '_agent_running', False):
try:
self.agent.interrupt()
except Exception:
pass
# Flush memories before exit (only for substantial conversations) # Flush memories before exit (only for substantial conversations)
if self.agent and self.conversation_history: if self.agent and self.conversation_history:
try: try:

View file

@ -665,6 +665,17 @@ def load_gateway_config() -> GatewayConfig:
_apply_env_overrides(config) _apply_env_overrides(config)
# --- Validate loaded values --- # --- Validate loaded values ---
_validate_gateway_config(config)
return config
def _validate_gateway_config(config: "GatewayConfig") -> None:
"""Validate and sanitize a loaded GatewayConfig in place.
Called by ``load_gateway_config()`` after all config sources are merged.
Extracted as a separate function for testability.
"""
policy = config.default_reset_policy policy = config.default_reset_policy
if not (0 <= policy.at_hour <= 23): if not (0 <= policy.at_hour <= 23):
@ -701,7 +712,31 @@ def load_gateway_config() -> GatewayConfig:
platform.value, env_name, platform.value, env_name,
) )
return config # Reject known-weak placeholder tokens.
# Ported from openclaw/openclaw#64586: users who copy .env.example
# without changing placeholder values get a clear startup error instead
# of a confusing "auth failed" from the platform API.
try:
from hermes_cli.auth import has_usable_secret
except ImportError:
has_usable_secret = None # type: ignore[assignment]
if has_usable_secret is not None:
for platform, pconfig in config.platforms.items():
if not pconfig.enabled:
continue
env_name = _token_env_names.get(platform)
if not env_name:
continue
token = pconfig.token
if token and token.strip() and not has_usable_secret(token, min_length=4):
logger.error(
"%s is enabled but %s is set to a placeholder value ('%s'). "
"Set a real bot token before starting the gateway. "
"The adapter will NOT be started.",
platform.value, env_name, token.strip()[:6] + "...",
)
pconfig.enabled = False
def _apply_env_overrides(config: GatewayConfig) -> None: def _apply_env_overrides(config: GatewayConfig) -> None:

View file

@ -82,7 +82,7 @@ _PLATFORM_DEFAULTS: dict[str, dict[str, Any]] = {
# Tier 3 — no edit support, progress messages are permanent # Tier 3 — no edit support, progress messages are permanent
"signal": _TIER_LOW, "signal": _TIER_LOW,
"whatsapp": _TIER_LOW, "whatsapp": _TIER_MEDIUM, # Baileys bridge supports /edit
"bluebubbles": _TIER_LOW, "bluebubbles": _TIER_LOW,
"weixin": _TIER_LOW, "weixin": _TIER_LOW,
"wecom": _TIER_LOW, "wecom": _TIER_LOW,

View file

@ -54,6 +54,66 @@ DEFAULT_PORT = 8642
MAX_STORED_RESPONSES = 100 MAX_STORED_RESPONSES = 100
MAX_REQUEST_BYTES = 1_000_000 # 1 MB default limit for POST bodies MAX_REQUEST_BYTES = 1_000_000 # 1 MB default limit for POST bodies
CHAT_COMPLETIONS_SSE_KEEPALIVE_SECONDS = 30.0 CHAT_COMPLETIONS_SSE_KEEPALIVE_SECONDS = 30.0
MAX_NORMALIZED_TEXT_LENGTH = 65_536 # 64 KB cap for normalized content parts
MAX_CONTENT_LIST_SIZE = 1_000 # Max items when content is an array
def _normalize_chat_content(
content: Any, *, _max_depth: int = 10, _depth: int = 0,
) -> str:
"""Normalize OpenAI chat message content into a plain text string.
Some clients (Open WebUI, LobeChat, etc.) send content as an array of
typed parts instead of a plain string::
[{"type": "text", "text": "hello"}, {"type": "input_text", "text": "..."}]
This function flattens those into a single string so the agent pipeline
(which expects strings) doesn't choke.
Defensive limits prevent abuse: recursion depth, list size, and output
length are all bounded.
"""
if _depth > _max_depth:
return ""
if content is None:
return ""
if isinstance(content, str):
return content[:MAX_NORMALIZED_TEXT_LENGTH] if len(content) > MAX_NORMALIZED_TEXT_LENGTH else content
if isinstance(content, list):
parts: List[str] = []
items = content[:MAX_CONTENT_LIST_SIZE] if len(content) > MAX_CONTENT_LIST_SIZE else content
for item in items:
if isinstance(item, str):
if item:
parts.append(item[:MAX_NORMALIZED_TEXT_LENGTH])
elif isinstance(item, dict):
item_type = str(item.get("type") or "").strip().lower()
if item_type in {"text", "input_text", "output_text"}:
text = item.get("text", "")
if text:
try:
parts.append(str(text)[:MAX_NORMALIZED_TEXT_LENGTH])
except Exception:
pass
# Silently skip image_url / other non-text parts
elif isinstance(item, list):
nested = _normalize_chat_content(item, _max_depth=_max_depth, _depth=_depth + 1)
if nested:
parts.append(nested)
# Check accumulated size
if sum(len(p) for p in parts) >= MAX_NORMALIZED_TEXT_LENGTH:
break
result = "\n".join(parts)
return result[:MAX_NORMALIZED_TEXT_LENGTH] if len(result) > MAX_NORMALIZED_TEXT_LENGTH else result
# Fallback for unexpected types (int, float, bool, etc.)
try:
result = str(content)
return result[:MAX_NORMALIZED_TEXT_LENGTH] if len(result) > MAX_NORMALIZED_TEXT_LENGTH else result
except Exception:
return ""
def check_api_server_requirements() -> bool: def check_api_server_requirements() -> bool:
@ -553,7 +613,7 @@ class APIServerAdapter(BasePlatformAdapter):
for msg in messages: for msg in messages:
role = msg.get("role", "") role = msg.get("role", "")
content = msg.get("content", "") content = _normalize_chat_content(msg.get("content", ""))
if role == "system": if role == "system":
# Accumulate system messages # Accumulate system messages
if system_prompt is None: if system_prompt is None:
@ -926,18 +986,7 @@ class APIServerAdapter(BasePlatformAdapter):
input_messages.append({"role": "user", "content": item}) input_messages.append({"role": "user", "content": item})
elif isinstance(item, dict): elif isinstance(item, dict):
role = item.get("role", "user") role = item.get("role", "user")
content = item.get("content", "") content = _normalize_chat_content(item.get("content", ""))
# Handle content that may be a list of content parts
if isinstance(content, list):
text_parts = []
for part in content:
if isinstance(part, dict) and part.get("type") == "input_text":
text_parts.append(part.get("text", ""))
elif isinstance(part, dict) and part.get("type") == "output_text":
text_parts.append(part.get("text", ""))
elif isinstance(part, str):
text_parts.append(part)
content = "\n".join(text_parts)
input_messages.append({"role": role, "content": content}) input_messages.append({"role": role, "content": content})
else: else:
return web.json_response(_openai_error("'input' must be a string or array"), status=400) return web.json_response(_openai_error("'input' must be a string or array"), status=400)
@ -1770,6 +1819,23 @@ class APIServerAdapter(BasePlatformAdapter):
) )
return False return False
# Refuse to start network-accessible with a placeholder key.
# Ported from openclaw/openclaw#64586.
if is_network_accessible(self._host) and self._api_key:
try:
from hermes_cli.auth import has_usable_secret
if not has_usable_secret(self._api_key, min_length=8):
logger.error(
"[%s] Refusing to start: API_SERVER_KEY is set to a "
"placeholder value. Generate a real secret "
"(e.g. `openssl rand -hex 32`) and set API_SERVER_KEY "
"before exposing the API server on %s.",
self.name, self._host,
)
return False
except ImportError:
pass
# Port conflict detection — fail fast if port is already in use # Port conflict detection — fail fast if port is already in use
try: try:
with _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) as _s: with _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) as _s:

View file

@ -21,6 +21,59 @@ from urllib.parse import urlsplit
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def utf16_len(s: str) -> int:
"""Count UTF-16 code units in *s*.
Telegram's message-length limit (4 096) is measured in UTF-16 code units,
**not** Unicode code-points. Characters outside the Basic Multilingual
Plane (emoji like 😀, CJK Extension B, musical symbols, ) are encoded as
surrogate pairs and therefore consume **two** UTF-16 code units each, even
though Python's ``len()`` counts them as one.
Ported from nearai/ironclaw#2304 which discovered the same discrepancy in
Rust's ``chars().count()``.
"""
return len(s.encode("utf-16-le")) // 2
def _prefix_within_utf16_limit(s: str, limit: int) -> str:
"""Return the longest prefix of *s* whose UTF-16 length ≤ *limit*.
Unlike a plain ``s[:limit]``, this respects surrogate-pair boundaries so
we never slice a multi-code-unit character in half.
"""
if utf16_len(s) <= limit:
return s
# Binary search for the longest safe prefix
lo, hi = 0, len(s)
while lo < hi:
mid = (lo + hi + 1) // 2
if utf16_len(s[:mid]) <= limit:
lo = mid
else:
hi = mid - 1
return s[:lo]
def _custom_unit_to_cp(s: str, budget: int, len_fn) -> int:
"""Return the largest codepoint offset *n* such that ``len_fn(s[:n]) <= budget``.
Used by :meth:`BasePlatformAdapter.truncate_message` when *len_fn* measures
length in units different from Python codepoints (e.g. UTF-16 code units).
Falls back to binary search which is O(log n) calls to *len_fn*.
"""
if len_fn(s) <= budget:
return len(s)
lo, hi = 0, len(s)
while lo < hi:
mid = (lo + hi + 1) // 2
if len_fn(s[:mid]) <= budget:
lo = mid
else:
hi = mid - 1
return lo
def is_network_accessible(host: str) -> bool: def is_network_accessible(host: str) -> bool:
"""Return True if *host* would expose the server beyond loopback. """Return True if *host* would expose the server beyond loopback.
@ -1897,7 +1950,11 @@ class BasePlatformAdapter(ABC):
return content return content
@staticmethod @staticmethod
def truncate_message(content: str, max_length: int = 4096) -> List[str]: def truncate_message(
content: str,
max_length: int = 4096,
len_fn: Optional["Callable[[str], int]"] = None,
) -> List[str]:
""" """
Split a long message into chunks, preserving code block boundaries. Split a long message into chunks, preserving code block boundaries.
@ -1909,11 +1966,16 @@ class BasePlatformAdapter(ABC):
Args: Args:
content: The full message content content: The full message content
max_length: Maximum length per chunk (platform-specific) max_length: Maximum length per chunk (platform-specific)
len_fn: Optional length function for measuring string length.
Defaults to ``len`` (Unicode code-points). Pass
``utf16_len`` for platforms that measure message
length in UTF-16 code units (e.g. Telegram).
Returns: Returns:
List of message chunks List of message chunks
""" """
if len(content) <= max_length: _len = len_fn or len
if _len(content) <= max_length:
return [content] return [content]
INDICATOR_RESERVE = 10 # room for " (XX/XX)" INDICATOR_RESERVE = 10 # room for " (XX/XX)"
@ -1932,22 +1994,33 @@ class BasePlatformAdapter(ABC):
# How much body text we can fit after accounting for the prefix, # How much body text we can fit after accounting for the prefix,
# a potential closing fence, and the chunk indicator. # a potential closing fence, and the chunk indicator.
headroom = max_length - INDICATOR_RESERVE - len(prefix) - len(FENCE_CLOSE) headroom = max_length - INDICATOR_RESERVE - _len(prefix) - _len(FENCE_CLOSE)
if headroom < 1: if headroom < 1:
headroom = max_length // 2 headroom = max_length // 2
# Everything remaining fits in one final chunk # Everything remaining fits in one final chunk
if len(prefix) + len(remaining) <= max_length - INDICATOR_RESERVE: if _len(prefix) + _len(remaining) <= max_length - INDICATOR_RESERVE:
chunks.append(prefix + remaining) chunks.append(prefix + remaining)
break break
# Find a natural split point (prefer newlines, then spaces) # Find a natural split point (prefer newlines, then spaces).
region = remaining[:headroom] # When _len != len (e.g. utf16_len for Telegram), headroom is
# measured in the custom unit. We need codepoint-based slice
# positions that stay within the custom-unit budget.
#
# _safe_slice_pos() maps a custom-unit budget to the largest
# codepoint offset whose custom length ≤ budget.
if _len is not len:
# Map headroom (custom units) → codepoint slice length
_cp_limit = _custom_unit_to_cp(remaining, headroom, _len)
else:
_cp_limit = headroom
region = remaining[:_cp_limit]
split_at = region.rfind("\n") split_at = region.rfind("\n")
if split_at < headroom // 2: if split_at < _cp_limit // 2:
split_at = region.rfind(" ") split_at = region.rfind(" ")
if split_at < 1: if split_at < 1:
split_at = headroom split_at = _cp_limit
# Avoid splitting inside an inline code span (`...`). # Avoid splitting inside an inline code span (`...`).
# If the text before split_at has an odd number of unescaped # If the text before split_at has an odd number of unescaped
@ -1967,7 +2040,7 @@ class BasePlatformAdapter(ABC):
safe_split = candidate.rfind(" ", 0, last_bt) safe_split = candidate.rfind(" ", 0, last_bt)
nl_split = candidate.rfind("\n", 0, last_bt) nl_split = candidate.rfind("\n", 0, last_bt)
safe_split = max(safe_split, nl_split) safe_split = max(safe_split, nl_split)
if safe_split > headroom // 4: if safe_split > _cp_limit // 4:
split_at = safe_split split_at = safe_split
chunk_body = remaining[:split_at] chunk_body = remaining[:split_at]

View file

@ -442,6 +442,7 @@ class DiscordAdapter(BasePlatformAdapter):
self._pending_text_batches: Dict[str, MessageEvent] = {} self._pending_text_batches: Dict[str, MessageEvent] = {}
self._pending_text_batch_tasks: Dict[str, asyncio.Task] = {} self._pending_text_batch_tasks: Dict[str, asyncio.Task] = {}
self._voice_text_channels: Dict[int, int] = {} # guild_id -> text_channel_id self._voice_text_channels: Dict[int, int] = {} # guild_id -> text_channel_id
self._voice_sources: Dict[int, Dict[str, Any]] = {} # guild_id -> linked text channel source metadata
self._voice_timeout_tasks: Dict[int, asyncio.Task] = {} # guild_id -> timeout task self._voice_timeout_tasks: Dict[int, asyncio.Task] = {} # guild_id -> timeout task
# Phase 2: voice listening # Phase 2: voice listening
self._voice_receivers: Dict[int, VoiceReceiver] = {} # guild_id -> VoiceReceiver self._voice_receivers: Dict[int, VoiceReceiver] = {} # guild_id -> VoiceReceiver
@ -1045,6 +1046,7 @@ class DiscordAdapter(BasePlatformAdapter):
if task: if task:
task.cancel() task.cancel()
self._voice_text_channels.pop(guild_id, None) self._voice_text_channels.pop(guild_id, None)
self._voice_sources.pop(guild_id, None)
# Maximum seconds to wait for voice playback before giving up # Maximum seconds to wait for voice playback before giving up
PLAYBACK_TIMEOUT = 120 PLAYBACK_TIMEOUT = 120
@ -2244,6 +2246,7 @@ class DiscordAdapter(BasePlatformAdapter):
thread_id = str(message.channel.id) thread_id = str(message.channel.id)
parent_channel_id = self._get_parent_channel_id(message.channel) parent_channel_id = self._get_parent_channel_id(message.channel)
is_voice_linked_channel = False
if not isinstance(message.channel, discord.DMChannel): if not isinstance(message.channel, discord.DMChannel):
channel_ids = {str(message.channel.id)} channel_ids = {str(message.channel.id)}
if parent_channel_id: if parent_channel_id:
@ -2270,7 +2273,12 @@ class DiscordAdapter(BasePlatformAdapter):
channel_ids.add(parent_channel_id) channel_ids.add(parent_channel_id)
require_mention = os.getenv("DISCORD_REQUIRE_MENTION", "true").lower() not in ("false", "0", "no") require_mention = os.getenv("DISCORD_REQUIRE_MENTION", "true").lower() not in ("false", "0", "no")
is_free_channel = bool(channel_ids & free_channels) # Voice-linked text channels act as free-response while voice is active.
# Only the exact bound channel gets the exemption, not sibling threads.
voice_linked_ids = {str(ch_id) for ch_id in self._voice_text_channels.values()}
current_channel_id = str(message.channel.id)
is_voice_linked_channel = current_channel_id in voice_linked_ids
is_free_channel = bool(channel_ids & free_channels) or is_voice_linked_channel
# Skip the mention check if the message is in a thread where # Skip the mention check if the message is in a thread where
# the bot has previously participated (auto-created or replied in). # the bot has previously participated (auto-created or replied in).
@ -2294,7 +2302,7 @@ class DiscordAdapter(BasePlatformAdapter):
no_thread_channels = {ch.strip() for ch in no_thread_channels_raw.split(",") if ch.strip()} no_thread_channels = {ch.strip() for ch in no_thread_channels_raw.split(",") if ch.strip()}
skip_thread = bool(channel_ids & no_thread_channels) skip_thread = bool(channel_ids & no_thread_channels)
auto_thread = os.getenv("DISCORD_AUTO_THREAD", "true").lower() in ("true", "1", "yes") auto_thread = os.getenv("DISCORD_AUTO_THREAD", "true").lower() in ("true", "1", "yes")
if auto_thread and not skip_thread: if auto_thread and not skip_thread and not is_voice_linked_channel:
thread = await self._auto_create_thread(message) thread = await self._auto_create_thread(message)
if thread: if thread:
is_thread = True is_thread = True

View file

@ -34,6 +34,9 @@ from datetime import datetime
from pathlib import Path from pathlib import Path
from types import SimpleNamespace from types import SimpleNamespace
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from urllib.error import HTTPError, URLError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
# aiohttp/websockets are independent optional deps — import outside lark_oapi # aiohttp/websockets are independent optional deps — import outside lark_oapi
# so they remain available for tests and webhook mode even if lark_oapi is missing. # so they remain available for tests and webhook mode even if lark_oapi is missing.
@ -169,6 +172,19 @@ _FEISHU_CARD_ACTION_DEDUP_TTL_SECONDS = 15 * 60 # card action token dedup win
_FEISHU_BOT_MSG_TRACK_SIZE = 512 # LRU size for tracking sent message IDs _FEISHU_BOT_MSG_TRACK_SIZE = 512 # LRU size for tracking sent message IDs
_FEISHU_REPLY_FALLBACK_CODES = frozenset({230011, 231003}) # reply target withdrawn/missing → create fallback _FEISHU_REPLY_FALLBACK_CODES = frozenset({230011, 231003}) # reply target withdrawn/missing → create fallback
_FEISHU_ACK_EMOJI = "OK" _FEISHU_ACK_EMOJI = "OK"
# QR onboarding constants
_ONBOARD_ACCOUNTS_URLS = {
"feishu": "https://accounts.feishu.cn",
"lark": "https://accounts.larksuite.com",
}
_ONBOARD_OPEN_URLS = {
"feishu": "https://open.feishu.cn",
"lark": "https://open.larksuite.com",
}
_REGISTRATION_PATH = "/oauth/v1/app/registration"
_ONBOARD_REQUEST_TIMEOUT_S = 10
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Fallback display strings # Fallback display strings
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@ -3621,3 +3637,328 @@ class FeishuAdapter(BasePlatformAdapter):
return _FEISHU_FILE_UPLOAD_TYPE, "file" return _FEISHU_FILE_UPLOAD_TYPE, "file"
return _FEISHU_FILE_UPLOAD_TYPE, "file" return _FEISHU_FILE_UPLOAD_TYPE, "file"
# =============================================================================
# QR scan-to-create onboarding
#
# Device-code flow: user scans a QR code with Feishu/Lark mobile app and the
# platform creates a fully configured bot application automatically.
# Called by `hermes gateway setup` via _setup_feishu() in hermes_cli/gateway.py.
# =============================================================================
def _accounts_base_url(domain: str) -> str:
return _ONBOARD_ACCOUNTS_URLS.get(domain, _ONBOARD_ACCOUNTS_URLS["feishu"])
def _onboard_open_base_url(domain: str) -> str:
return _ONBOARD_OPEN_URLS.get(domain, _ONBOARD_OPEN_URLS["feishu"])
def _post_registration(base_url: str, body: Dict[str, str]) -> dict:
"""POST form-encoded data to the registration endpoint, return parsed JSON.
The registration endpoint returns JSON even on 4xx (e.g. poll returns
authorization_pending as a 400). We always parse the body regardless of
HTTP status.
"""
url = f"{base_url}{_REGISTRATION_PATH}"
data = urlencode(body).encode("utf-8")
req = Request(url, data=data, headers={"Content-Type": "application/x-www-form-urlencoded"})
try:
with urlopen(req, timeout=_ONBOARD_REQUEST_TIMEOUT_S) as resp:
return json.loads(resp.read().decode("utf-8"))
except HTTPError as exc:
body_bytes = exc.read()
if body_bytes:
try:
return json.loads(body_bytes.decode("utf-8"))
except (ValueError, json.JSONDecodeError):
raise exc from None
raise
def _init_registration(domain: str = "feishu") -> None:
"""Verify the environment supports client_secret auth.
Raises RuntimeError if not supported.
"""
base_url = _accounts_base_url(domain)
res = _post_registration(base_url, {"action": "init"})
methods = res.get("supported_auth_methods") or []
if "client_secret" not in methods:
raise RuntimeError(
f"Feishu / Lark registration environment does not support client_secret auth. "
f"Supported: {methods}"
)
def _begin_registration(domain: str = "feishu") -> dict:
"""Start the device-code flow. Returns device_code, qr_url, user_code, interval, expire_in."""
base_url = _accounts_base_url(domain)
res = _post_registration(base_url, {
"action": "begin",
"archetype": "PersonalAgent",
"auth_method": "client_secret",
"request_user_info": "open_id",
})
device_code = res.get("device_code")
if not device_code:
raise RuntimeError("Feishu / Lark registration did not return a device_code")
qr_url = res.get("verification_uri_complete", "")
if "?" in qr_url:
qr_url += "&from=hermes&tp=hermes"
else:
qr_url += "?from=hermes&tp=hermes"
return {
"device_code": device_code,
"qr_url": qr_url,
"user_code": res.get("user_code", ""),
"interval": res.get("interval") or 5,
"expire_in": res.get("expire_in") or 600,
}
def _poll_registration(
*,
device_code: str,
interval: int,
expire_in: int,
domain: str = "feishu",
) -> Optional[dict]:
"""Poll until the user scans the QR code, or timeout/denial.
Returns dict with app_id, app_secret, domain, open_id on success.
Returns None on failure.
"""
deadline = time.time() + expire_in
current_domain = domain
domain_switched = False
poll_count = 0
while time.time() < deadline:
base_url = _accounts_base_url(current_domain)
try:
res = _post_registration(base_url, {
"action": "poll",
"device_code": device_code,
"tp": "ob_app",
})
except (URLError, OSError, json.JSONDecodeError):
time.sleep(interval)
continue
poll_count += 1
if poll_count == 1:
print(" Fetching configuration results...", end="", flush=True)
elif poll_count % 6 == 0:
print(".", end="", flush=True)
# Domain auto-detection
user_info = res.get("user_info") or {}
tenant_brand = user_info.get("tenant_brand")
if tenant_brand == "lark" and not domain_switched:
current_domain = "lark"
domain_switched = True
# Fall through — server may return credentials in this same response.
# Success
if res.get("client_id") and res.get("client_secret"):
if poll_count > 0:
print() # newline after "Fetching configuration results..." dots
return {
"app_id": res["client_id"],
"app_secret": res["client_secret"],
"domain": current_domain,
"open_id": user_info.get("open_id"),
}
# Terminal errors
error = res.get("error", "")
if error in ("access_denied", "expired_token"):
if poll_count > 0:
print()
logger.warning("[Feishu onboard] Registration %s", error)
return None
# authorization_pending or unknown — keep polling
time.sleep(interval)
if poll_count > 0:
print()
logger.warning("[Feishu onboard] Poll timed out after %ds", expire_in)
return None
try:
import qrcode as _qrcode_mod
except (ImportError, TypeError):
_qrcode_mod = None # type: ignore[assignment]
def _render_qr(url: str) -> bool:
"""Try to render a QR code in the terminal. Returns True if successful."""
if _qrcode_mod is None:
return False
try:
qr = _qrcode_mod.QRCode()
qr.add_data(url)
qr.make(fit=True)
qr.print_ascii(invert=True)
return True
except Exception:
return False
def probe_bot(app_id: str, app_secret: str, domain: str) -> Optional[dict]:
"""Verify bot connectivity via /open-apis/bot/v3/info.
Uses lark_oapi SDK when available, falls back to raw HTTP otherwise.
Returns {"bot_name": ..., "bot_open_id": ...} on success, None on failure.
"""
if FEISHU_AVAILABLE:
return _probe_bot_sdk(app_id, app_secret, domain)
return _probe_bot_http(app_id, app_secret, domain)
def _build_onboard_client(app_id: str, app_secret: str, domain: str) -> Any:
"""Build a lark Client for the given credentials and domain."""
sdk_domain = LARK_DOMAIN if domain == "lark" else FEISHU_DOMAIN
return (
lark.Client.builder()
.app_id(app_id)
.app_secret(app_secret)
.domain(sdk_domain)
.log_level(lark.LogLevel.WARNING)
.build()
)
def _parse_bot_response(data: dict) -> Optional[dict]:
"""Extract bot_name and bot_open_id from a /bot/v3/info response."""
if data.get("code") != 0:
return None
bot = data.get("bot") or data.get("data", {}).get("bot") or {}
return {
"bot_name": bot.get("bot_name"),
"bot_open_id": bot.get("open_id"),
}
def _probe_bot_sdk(app_id: str, app_secret: str, domain: str) -> Optional[dict]:
"""Probe bot info using lark_oapi SDK."""
try:
client = _build_onboard_client(app_id, app_secret, domain)
resp = client.request(
method="GET",
url="/open-apis/bot/v3/info",
body=None,
raw_response=True,
)
return _parse_bot_response(json.loads(resp.content))
except Exception as exc:
logger.debug("[Feishu onboard] SDK probe failed: %s", exc)
return None
def _probe_bot_http(app_id: str, app_secret: str, domain: str) -> Optional[dict]:
"""Fallback probe using raw HTTP (when lark_oapi is not installed)."""
base_url = _onboard_open_base_url(domain)
try:
token_data = json.dumps({"app_id": app_id, "app_secret": app_secret}).encode("utf-8")
token_req = Request(
f"{base_url}/open-apis/auth/v3/tenant_access_token/internal",
data=token_data,
headers={"Content-Type": "application/json"},
)
with urlopen(token_req, timeout=_ONBOARD_REQUEST_TIMEOUT_S) as resp:
token_res = json.loads(resp.read().decode("utf-8"))
access_token = token_res.get("tenant_access_token")
if not access_token:
return None
bot_req = Request(
f"{base_url}/open-apis/bot/v3/info",
headers={
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
},
)
with urlopen(bot_req, timeout=_ONBOARD_REQUEST_TIMEOUT_S) as resp:
bot_res = json.loads(resp.read().decode("utf-8"))
return _parse_bot_response(bot_res)
except (URLError, OSError, KeyError, json.JSONDecodeError) as exc:
logger.debug("[Feishu onboard] HTTP probe failed: %s", exc)
return None
def qr_register(
*,
initial_domain: str = "feishu",
timeout_seconds: int = 600,
) -> Optional[dict]:
"""Run the Feishu / Lark scan-to-create QR registration flow.
Returns on success::
{
"app_id": str,
"app_secret": str,
"domain": "feishu" | "lark",
"open_id": str | None,
"bot_name": str | None,
"bot_open_id": str | None,
}
Returns None on expected failures (network, auth denied, timeout).
Unexpected errors (bugs, protocol regressions) propagate to the caller.
"""
try:
return _qr_register_inner(initial_domain=initial_domain, timeout_seconds=timeout_seconds)
except (RuntimeError, URLError, OSError, json.JSONDecodeError) as exc:
logger.warning("[Feishu onboard] Registration failed: %s", exc)
return None
def _qr_register_inner(
*,
initial_domain: str,
timeout_seconds: int,
) -> Optional[dict]:
"""Run init → begin → poll → probe. Raises on network/protocol errors."""
print(" Connecting to Feishu / Lark...", end="", flush=True)
_init_registration(initial_domain)
begin = _begin_registration(initial_domain)
print(" done.")
print()
qr_url = begin["qr_url"]
if _render_qr(qr_url):
print(f"\n Scan the QR code above, or open this URL directly:\n {qr_url}")
else:
print(f" Open this URL in Feishu / Lark on your phone:\n\n {qr_url}\n")
print(" Tip: pip install qrcode to display a scannable QR code here next time")
print()
result = _poll_registration(
device_code=begin["device_code"],
interval=begin["interval"],
expire_in=min(begin["expire_in"], timeout_seconds),
domain=initial_domain,
)
if not result:
return None
# Probe bot — best-effort, don't fail the registration
bot_info = probe_bot(result["app_id"], result["app_secret"], result["domain"])
if bot_info:
result["bot_name"] = bot_info.get("bot_name")
result["bot_open_id"] = bot_info.get("bot_open_id")
else:
result["bot_name"] = None
result["bot_open_id"] = None
return result

View file

@ -782,7 +782,7 @@ class MatrixAdapter(BasePlatformAdapter):
# Try aiohttp first (always available), fall back to httpx # Try aiohttp first (always available), fall back to httpx
try: try:
import aiohttp as _aiohttp import aiohttp as _aiohttp
async with _aiohttp.ClientSession() as http: async with _aiohttp.ClientSession(trust_env=True) as http:
async with http.get(image_url, timeout=_aiohttp.ClientTimeout(total=30)) as resp: async with http.get(image_url, timeout=_aiohttp.ClientTimeout(total=30)) as resp:
resp.raise_for_status() resp.raise_for_status()
data = await resp.read() data = await resp.read()
@ -1135,7 +1135,10 @@ class MatrixAdapter(BasePlatformAdapter):
thread_id = relates_to.get("event_id") thread_id = relates_to.get("event_id")
formatted_body = source_content.get("formatted_body") formatted_body = source_content.get("formatted_body")
is_mentioned = self._is_bot_mentioned(body, formatted_body) # m.mentions.user_ids (MSC3952 / Matrix v1.7) — authoritative mention signal.
mentions_block = source_content.get("m.mentions") or {}
mention_user_ids = mentions_block.get("user_ids") if isinstance(mentions_block, dict) else None
is_mentioned = self._is_bot_mentioned(body, formatted_body, mention_user_ids)
# Require-mention gating. # Require-mention gating.
if not is_dm: if not is_dm:
@ -1822,8 +1825,24 @@ class MatrixAdapter(BasePlatformAdapter):
# Mention detection helpers # Mention detection helpers
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def _is_bot_mentioned(self, body: str, formatted_body: Optional[str] = None) -> bool: def _is_bot_mentioned(
"""Return True if the bot is mentioned in the message.""" self,
body: str,
formatted_body: Optional[str] = None,
mention_user_ids: Optional[list] = None,
) -> bool:
"""Return True if the bot is mentioned in the message.
Per MSC3952, ``m.mentions.user_ids`` is the authoritative mention
signal in the Matrix spec. When the sender's client populates that
field with the bot's user-id, we trust it — even when the visible
body text does not contain an explicit ``@bot`` string (some clients
only render mention "pills" in ``formatted_body`` or use display
names).
"""
# m.mentions.user_ids — authoritative per MSC3952 / Matrix v1.7.
if mention_user_ids and self._user_id and self._user_id in mention_user_ids:
return True
if not body and not formatted_body: if not body and not formatted_body:
return False return False
if self._user_id and self._user_id in body: if self._user_id and self._user_id in body:

View file

@ -65,7 +65,10 @@ from gateway.platforms.base import (
cache_image_from_bytes, cache_image_from_bytes,
cache_audio_from_bytes, cache_audio_from_bytes,
cache_document_from_bytes, cache_document_from_bytes,
resolve_proxy_url,
SUPPORTED_DOCUMENT_TYPES, SUPPORTED_DOCUMENT_TYPES,
utf16_len,
_prefix_within_utf16_limit,
) )
from gateway.platforms.telegram_network import ( from gateway.platforms.telegram_network import (
TelegramFallbackTransport, TelegramFallbackTransport,
@ -537,10 +540,7 @@ class TelegramAdapter(BasePlatformAdapter):
"write_timeout": _env_float("HERMES_TELEGRAM_HTTP_WRITE_TIMEOUT", 20.0), "write_timeout": _env_float("HERMES_TELEGRAM_HTTP_WRITE_TIMEOUT", 20.0),
} }
proxy_configured = any( proxy_url = resolve_proxy_url()
(os.getenv(k) or "").strip()
for k in ("HTTPS_PROXY", "HTTP_PROXY", "ALL_PROXY", "https_proxy", "http_proxy", "all_proxy")
)
disable_fallback = (os.getenv("HERMES_TELEGRAM_DISABLE_FALLBACK_IPS", "").strip().lower() in ("1", "true", "yes", "on")) disable_fallback = (os.getenv("HERMES_TELEGRAM_DISABLE_FALLBACK_IPS", "").strip().lower() in ("1", "true", "yes", "on"))
fallback_ips = self._fallback_ips() fallback_ips = self._fallback_ips()
if not fallback_ips: if not fallback_ips:
@ -551,7 +551,7 @@ class TelegramAdapter(BasePlatformAdapter):
", ".join(fallback_ips), ", ".join(fallback_ips),
) )
if fallback_ips and not proxy_configured and not disable_fallback: if fallback_ips and not proxy_url and not disable_fallback:
logger.info( logger.info(
"[%s] Telegram fallback IPs active: %s", "[%s] Telegram fallback IPs active: %s",
self.name, self.name,
@ -567,10 +567,12 @@ class TelegramAdapter(BasePlatformAdapter):
**request_kwargs, **request_kwargs,
httpx_kwargs={"transport": TelegramFallbackTransport(fallback_ips)}, httpx_kwargs={"transport": TelegramFallbackTransport(fallback_ips)},
) )
elif proxy_url:
logger.info("[%s] Proxy detected; passing explicitly to HTTPXRequest: %s", self.name, proxy_url)
request = HTTPXRequest(**request_kwargs, proxy=proxy_url)
get_updates_request = HTTPXRequest(**request_kwargs, proxy=proxy_url)
else: else:
if proxy_configured: if disable_fallback:
logger.info("[%s] Proxy configured; skipping Telegram fallback-IP transport", self.name)
elif disable_fallback:
logger.info("[%s] Telegram fallback-IP transport disabled via env", self.name) logger.info("[%s] Telegram fallback-IP transport disabled via env", self.name)
request = HTTPXRequest(**request_kwargs) request = HTTPXRequest(**request_kwargs)
get_updates_request = HTTPXRequest(**request_kwargs) get_updates_request = HTTPXRequest(**request_kwargs)
@ -799,7 +801,9 @@ class TelegramAdapter(BasePlatformAdapter):
try: try:
# Format and split message if needed # Format and split message if needed
formatted = self.format_message(content) formatted = self.format_message(content)
chunks = self.truncate_message(formatted, self.MAX_MESSAGE_LENGTH) chunks = self.truncate_message(
formatted, self.MAX_MESSAGE_LENGTH, len_fn=utf16_len,
)
if len(chunks) > 1: if len(chunks) > 1:
# truncate_message appends a raw " (1/2)" suffix. Escape the # truncate_message appends a raw " (1/2)" suffix. Escape the
# MarkdownV2-special parentheses so Telegram doesn't reject the # MarkdownV2-special parentheses so Telegram doesn't reject the
@ -970,7 +974,9 @@ class TelegramAdapter(BasePlatformAdapter):
# streaming). Truncate and succeed so the stream consumer can # streaming). Truncate and succeed so the stream consumer can
# split the overflow into a new message instead of dying. # split the overflow into a new message instead of dying.
if "message_too_long" in err_str or "too long" in err_str: if "message_too_long" in err_str or "too long" in err_str:
truncated = content[: self.MAX_MESSAGE_LENGTH - 20] + "" truncated = _prefix_within_utf16_limit(
content, self.MAX_MESSAGE_LENGTH - 20
) + ""
try: try:
await self._bot.edit_message_text( await self._bot.edit_message_text(
chat_id=int(chat_id), chat_id=int(chat_id),

View file

@ -266,7 +266,7 @@ class WeComAdapter(BasePlatformAdapter):
async def _open_connection(self) -> None: async def _open_connection(self) -> None:
"""Open and authenticate a websocket connection.""" """Open and authenticate a websocket connection."""
await self._cleanup_ws() await self._cleanup_ws()
self._session = aiohttp.ClientSession() self._session = aiohttp.ClientSession(trust_env=True)
self._ws = await self._session.ws_connect( self._ws = await self._session.ws_connect(
self._ws_url, self._ws_url,
heartbeat=HEARTBEAT_INTERVAL_SECONDS * 2, heartbeat=HEARTBEAT_INTERVAL_SECONDS * 2,

View file

@ -112,6 +112,7 @@ TYPING_STOP = 2
_HEADER_RE = re.compile(r"^(#{1,6})\s+(.+?)\s*$") _HEADER_RE = re.compile(r"^(#{1,6})\s+(.+?)\s*$")
_TABLE_RULE_RE = re.compile(r"^\s*\|?(?:\s*:?-{3,}:?\s*\|)+\s*:?-{3,}:?\s*\|?\s*$") _TABLE_RULE_RE = re.compile(r"^\s*\|?(?:\s*:?-{3,}:?\s*\|)+\s*:?-{3,}:?\s*\|?\s*$")
_FENCE_RE = re.compile(r"^```([^\n`]*)\s*$") _FENCE_RE = re.compile(r"^```([^\n`]*)\s*$")
_MARKDOWN_LINK_RE = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
def check_weixin_requirements() -> bool: def check_weixin_requirements() -> bool:
@ -398,15 +399,16 @@ async def _send_message(
context_token: Optional[str], context_token: Optional[str],
client_id: str, client_id: str,
) -> None: ) -> None:
if not text or not text.strip():
raise ValueError("_send_message: text must not be empty")
message: Dict[str, Any] = { message: Dict[str, Any] = {
"from_user_id": "", "from_user_id": "",
"to_user_id": to, "to_user_id": to,
"client_id": client_id, "client_id": client_id,
"message_type": MSG_TYPE_BOT, "message_type": MSG_TYPE_BOT,
"message_state": MSG_STATE_FINISH, "message_state": MSG_STATE_FINISH,
"item_list": [{"type": ITEM_TEXT, "text_item": {"text": text}}],
} }
if text:
message["item_list"] = [{"type": ITEM_TEXT, "text_item": {"text": text}}]
if context_token: if context_token:
message["context_token"] = context_token message["context_token"] = context_token
await _api_post( await _api_post(
@ -499,13 +501,15 @@ async def _upload_ciphertext(
session: "aiohttp.ClientSession", session: "aiohttp.ClientSession",
*, *,
ciphertext: bytes, ciphertext: bytes,
cdn_base_url: str, upload_url: str,
upload_param: str,
filekey: str,
) -> str: ) -> str:
url = _cdn_upload_url(cdn_base_url, upload_param, filekey) """Upload encrypted media to the CDN.
Accepts either a constructed CDN URL (from upload_param) or a direct
upload_full_url both use POST with the raw ciphertext as the body.
"""
timeout = aiohttp.ClientTimeout(total=120) timeout = aiohttp.ClientTimeout(total=120)
async with session.post(url, data=ciphertext, headers={"Content-Type": "application/octet-stream"}, timeout=timeout) as response: async with session.post(upload_url, data=ciphertext, headers={"Content-Type": "application/octet-stream"}, timeout=timeout) as response:
if response.status == 200: if response.status == 200:
encrypted_param = response.headers.get("x-encrypted-param") encrypted_param = response.headers.get("x-encrypted-param")
if encrypted_param: if encrypted_param:
@ -649,7 +653,7 @@ def _normalize_markdown_blocks(content: str) -> str:
result.append(_rewrite_table_block_for_weixin(table_lines)) result.append(_rewrite_table_block_for_weixin(table_lines))
continue continue
result.append(_rewrite_headers_for_weixin(line)) result.append(_MARKDOWN_LINK_RE.sub(r"\1 (\2)", _rewrite_headers_for_weixin(line)))
i += 1 i += 1
normalized = "\n".join(item.rstrip() for item in result) normalized = "\n".join(item.rstrip() for item in result)
@ -811,6 +815,8 @@ def _split_text_for_weixin_delivery(
``platforms.weixin.extra.split_multiline_messages`` (``true`` / ``false``) ``platforms.weixin.extra.split_multiline_messages`` (``true`` / ``false``)
or the env var ``WEIXIN_SPLIT_MULTILINE_MESSAGES``. or the env var ``WEIXIN_SPLIT_MULTILINE_MESSAGES``.
""" """
if not content:
return []
if split_per_line: if split_per_line:
# Legacy: one message per top-level delivery unit. # Legacy: one message per top-level delivery unit.
if len(content) <= max_length and "\n" not in content: if len(content) <= max_length and "\n" not in content:
@ -821,14 +827,14 @@ def _split_text_for_weixin_delivery(
chunks.append(unit) chunks.append(unit)
continue continue
chunks.extend(_pack_markdown_blocks_for_weixin(unit, max_length)) chunks.extend(_pack_markdown_blocks_for_weixin(unit, max_length))
return chunks or [content] return [c for c in chunks if c] or [content]
# Compact (default): single message when under the limit — unless the # Compact (default): single message when under the limit — unless the
# content looks like a short chatty exchange, in which case split into # content looks like a short chatty exchange, in which case split into
# separate bubbles for a more natural chat feel. # separate bubbles for a more natural chat feel.
if len(content) <= max_length: if len(content) <= max_length:
return ( return (
_split_delivery_units_for_weixin(content) [u for u in _split_delivery_units_for_weixin(content) if u]
if _should_split_short_chat_block_for_weixin(content) if _should_split_short_chat_block_for_weixin(content)
else [content] else [content]
) )
@ -929,7 +935,7 @@ async def qr_login(
if not AIOHTTP_AVAILABLE: if not AIOHTTP_AVAILABLE:
raise RuntimeError("aiohttp is required for Weixin QR login") raise RuntimeError("aiohttp is required for Weixin QR login")
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession(trust_env=True) as session:
try: try:
qr_resp = await _api_get( qr_resp = await _api_get(
session, session,
@ -1042,6 +1048,10 @@ class WeixinAdapter(BasePlatformAdapter):
MAX_MESSAGE_LENGTH = 4000 MAX_MESSAGE_LENGTH = 4000
# WeChat does not support editing sent messages — streaming must use the
# fallback "send-final-only" path so the cursor (▉) is never left visible.
SUPPORTS_MESSAGE_EDITING = False
def __init__(self, config: PlatformConfig): def __init__(self, config: PlatformConfig):
super().__init__(config, Platform.WEIXIN) super().__init__(config, Platform.WEIXIN)
extra = config.extra or {} extra = config.extra or {}
@ -1124,7 +1134,7 @@ class WeixinAdapter(BasePlatformAdapter):
except Exception as exc: except Exception as exc:
logger.debug("[%s] Token lock unavailable (non-fatal): %s", self.name, exc) logger.debug("[%s] Token lock unavailable (non-fatal): %s", self.name, exc)
self._session = aiohttp.ClientSession() self._session = aiohttp.ClientSession(trust_env=True)
self._token_store.restore(self._account_id) self._token_store.restore(self._account_id)
self._poll_task = asyncio.create_task(self._poll_loop(), name="weixin-poll") self._poll_task = asyncio.create_task(self._poll_loop(), name="weixin-poll")
self._mark_connected() self._mark_connected()
@ -1451,7 +1461,7 @@ class WeixinAdapter(BasePlatformAdapter):
context_token = self._token_store.get(self._account_id, chat_id) context_token = self._token_store.get(self._account_id, chat_id)
last_message_id: Optional[str] = None last_message_id: Optional[str] = None
try: try:
chunks = self._split_text(self.format_message(content)) chunks = [c for c in self._split_text(self.format_message(content)) if c and c.strip()]
for idx, chunk in enumerate(chunks): for idx, chunk in enumerate(chunks):
client_id = f"hermes-weixin-{uuid.uuid4().hex}" client_id = f"hermes-weixin-{uuid.uuid4().hex}"
await self._send_text_chunk( await self._send_text_chunk(
@ -1537,24 +1547,51 @@ class WeixinAdapter(BasePlatformAdapter):
reply_to: Optional[str] = None, reply_to: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None,
) -> SendResult: ) -> SendResult:
return await self.send_document(chat_id, path, caption=caption, metadata=metadata) return await self.send_document(chat_id, file_path=path, caption=caption, metadata=metadata)
async def send_document( async def send_document(
self, self,
chat_id: str, chat_id: str,
path: str, file_path: str,
caption: str = "", caption: str = "",
metadata: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None,
) -> SendResult: ) -> SendResult:
if not self._session or not self._token: if not self._session or not self._token:
return SendResult(success=False, error="Not connected") return SendResult(success=False, error="Not connected")
try: try:
message_id = await self._send_file(chat_id, path, caption) message_id = await self._send_file(chat_id, file_path, caption)
return SendResult(success=True, message_id=message_id) return SendResult(success=True, message_id=message_id)
except Exception as exc: except Exception as exc:
logger.error("[%s] send_document failed to=%s: %s", self.name, _safe_id(chat_id), exc) logger.error("[%s] send_document failed to=%s: %s", self.name, _safe_id(chat_id), exc)
return SendResult(success=False, error=str(exc)) return SendResult(success=False, error=str(exc))
async def send_video(
self,
chat_id: str,
video_path: str,
caption: Optional[str] = None,
reply_to: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> SendResult:
if not self._session or not self._token:
return SendResult(success=False, error="Not connected")
try:
message_id = await self._send_file(chat_id, video_path, caption or "")
return SendResult(success=True, message_id=message_id)
except Exception as exc:
logger.error("[%s] send_video failed to=%s: %s", self.name, _safe_id(chat_id), exc)
return SendResult(success=False, error=str(exc))
async def send_voice(
self,
chat_id: str,
audio_path: str,
caption: Optional[str] = None,
reply_to: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> SendResult:
return await self.send_document(chat_id, audio_path, caption=caption or "", metadata=metadata)
async def _download_remote_media(self, url: str) -> str: async def _download_remote_media(self, url: str) -> str:
from tools.url_safety import is_safe_url from tools.url_safety import is_safe_url
@ -1577,6 +1614,7 @@ class WeixinAdapter(BasePlatformAdapter):
filekey = secrets.token_hex(16) filekey = secrets.token_hex(16)
aes_key = secrets.token_bytes(16) aes_key = secrets.token_bytes(16)
rawsize = len(plaintext) rawsize = len(plaintext)
rawfilemd5 = hashlib.md5(plaintext).hexdigest()
upload_response = await _get_upload_url( upload_response = await _get_upload_url(
self._session, self._session,
base_url=self._base_url, base_url=self._base_url,
@ -1585,41 +1623,42 @@ class WeixinAdapter(BasePlatformAdapter):
media_type=media_type, media_type=media_type,
filekey=filekey, filekey=filekey,
rawsize=rawsize, rawsize=rawsize,
rawfilemd5=hashlib.md5(plaintext).hexdigest(), rawfilemd5=rawfilemd5,
filesize=_aes_padded_size(rawsize), filesize=_aes_padded_size(rawsize),
aeskey_hex=aes_key.hex(), aeskey_hex=aes_key.hex(),
) )
upload_param = str(upload_response.get("upload_param") or "") upload_param = str(upload_response.get("upload_param") or "")
upload_full_url = str(upload_response.get("upload_full_url") or "") upload_full_url = str(upload_response.get("upload_full_url") or "")
ciphertext = _aes128_ecb_encrypt(plaintext, aes_key) ciphertext = _aes128_ecb_encrypt(plaintext, aes_key)
if upload_param:
encrypted_query_param = await _upload_ciphertext( # Prefer upload_full_url (direct CDN), fall back to constructed CDN URL
self._session, # from upload_param. Both paths use POST — the old PUT for
ciphertext=ciphertext, # upload_full_url caused 404s on the WeChat CDN.
cdn_base_url=self._cdn_base_url, if upload_full_url:
upload_param=upload_param, upload_url = upload_full_url
filekey=filekey, elif upload_param:
) upload_url = _cdn_upload_url(self._cdn_base_url, upload_param, filekey)
elif upload_full_url:
timeout = aiohttp.ClientTimeout(total=120)
async with self._session.put(
upload_full_url,
data=ciphertext,
headers={"Content-Type": "application/octet-stream"},
timeout=timeout,
) as response:
response.raise_for_status()
encrypted_query_param = response.headers.get("x-encrypted-param") or filekey
else: else:
raise RuntimeError(f"getUploadUrl returned neither upload_param nor upload_full_url: {upload_response}") raise RuntimeError(f"getUploadUrl returned neither upload_param nor upload_full_url: {upload_response}")
encrypted_query_param = await _upload_ciphertext(
self._session,
ciphertext=ciphertext,
upload_url=upload_url,
)
context_token = self._token_store.get(self._account_id, chat_id) context_token = self._token_store.get(self._account_id, chat_id)
# The iLink API expects aes_key as base64(hex_string), not base64(raw_bytes).
# Sending base64(raw_bytes) causes images to show as grey boxes on the
# receiver side because the decryption key doesn't match.
aes_key_for_api = base64.b64encode(aes_key.hex().encode("ascii")).decode("ascii")
media_item = item_builder( media_item = item_builder(
encrypt_query_param=encrypted_query_param, encrypt_query_param=encrypted_query_param,
aes_key_b64=base64.b64encode(aes_key).decode("ascii"), aes_key_for_api=aes_key_for_api,
ciphertext_size=len(ciphertext), ciphertext_size=len(ciphertext),
plaintext_size=rawsize, plaintext_size=rawsize,
filename=Path(path).name, filename=Path(path).name,
rawfilemd5=rawfilemd5,
) )
last_message_id = None last_message_id = None
@ -1659,39 +1698,53 @@ class WeixinAdapter(BasePlatformAdapter):
def _outbound_media_builder(self, path: str): def _outbound_media_builder(self, path: str):
mime = mimetypes.guess_type(path)[0] or "application/octet-stream" mime = mimetypes.guess_type(path)[0] or "application/octet-stream"
if mime.startswith("image/"): if mime.startswith("image/"):
return MEDIA_IMAGE, lambda **kwargs: { return MEDIA_IMAGE, lambda **kw: {
"type": ITEM_IMAGE, "type": ITEM_IMAGE,
"image_item": { "image_item": {
"media": { "media": {
"encrypt_query_param": kwargs["encrypt_query_param"], "encrypt_query_param": kw["encrypt_query_param"],
"aes_key": kwargs["aes_key_b64"], "aes_key": kw["aes_key_for_api"],
"encrypt_type": 1, "encrypt_type": 1,
}, },
"mid_size": kwargs["ciphertext_size"], "mid_size": kw["ciphertext_size"],
}, },
} }
if mime.startswith("video/"): if mime.startswith("video/"):
return MEDIA_VIDEO, lambda **kwargs: { return MEDIA_VIDEO, lambda **kw: {
"type": ITEM_VIDEO, "type": ITEM_VIDEO,
"video_item": { "video_item": {
"media": { "media": {
"encrypt_query_param": kwargs["encrypt_query_param"], "encrypt_query_param": kw["encrypt_query_param"],
"aes_key": kwargs["aes_key_b64"], "aes_key": kw["aes_key_for_api"],
"encrypt_type": 1, "encrypt_type": 1,
}, },
"video_size": kwargs["ciphertext_size"], "video_size": kw["ciphertext_size"],
"play_length": kw.get("play_length", 0),
"video_md5": kw.get("rawfilemd5", ""),
}, },
} }
return MEDIA_FILE, lambda **kwargs: { if mime.startswith("audio/") or path.endswith(".silk"):
return MEDIA_VOICE, lambda **kw: {
"type": ITEM_VOICE,
"voice_item": {
"media": {
"encrypt_query_param": kw["encrypt_query_param"],
"aes_key": kw["aes_key_for_api"],
"encrypt_type": 1,
},
"playtime": kw.get("playtime", 0),
},
}
return MEDIA_FILE, lambda **kw: {
"type": ITEM_FILE, "type": ITEM_FILE,
"file_item": { "file_item": {
"media": { "media": {
"encrypt_query_param": kwargs["encrypt_query_param"], "encrypt_query_param": kw["encrypt_query_param"],
"aes_key": kwargs["aes_key_b64"], "aes_key": kw["aes_key_for_api"],
"encrypt_type": 1, "encrypt_type": 1,
}, },
"file_name": kwargs["filename"], "file_name": kw["filename"],
"len": str(kwargs["plaintext_size"]), "len": str(kw["plaintext_size"]),
}, },
} }
@ -1731,7 +1784,7 @@ async def send_weixin_direct(
token_store.restore(account_id) token_store.restore(account_id)
context_token = token_store.get(account_id, chat_id) context_token = token_store.get(account_id, chat_id)
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession(trust_env=True) as session:
adapter = WeixinAdapter( adapter = WeixinAdapter(
PlatformConfig( PlatformConfig(
enabled=True, enabled=True,

View file

@ -120,8 +120,9 @@ class WhatsAppAdapter(BasePlatformAdapter):
- session_path: Path to store WhatsApp session data - session_path: Path to store WhatsApp session data
""" """
# WhatsApp message limits # WhatsApp message limits — practical UX limit, not protocol max.
MAX_MESSAGE_LENGTH = 65536 # WhatsApp allows longer messages # WhatsApp allows ~65K but long messages are unreadable on mobile.
MAX_MESSAGE_LENGTH = 4096
# Default bridge location relative to the hermes-agent install # Default bridge location relative to the hermes-agent install
_DEFAULT_BRIDGE_DIR = Path(__file__).resolve().parents[2] / "scripts" / "whatsapp-bridge" _DEFAULT_BRIDGE_DIR = Path(__file__).resolve().parents[2] / "scripts" / "whatsapp-bridge"
@ -531,6 +532,63 @@ class WhatsAppAdapter(BasePlatformAdapter):
self._close_bridge_log() self._close_bridge_log()
print(f"[{self.name}] Disconnected") print(f"[{self.name}] Disconnected")
def format_message(self, content: str) -> str:
"""Convert standard markdown to WhatsApp-compatible formatting.
WhatsApp supports: *bold*, _italic_, ~strikethrough~, ```code```,
and monospaced `inline`. Standard markdown uses different syntax
for bold/italic/strikethrough, so we convert here.
Code blocks (``` fenced) and inline code (`) are protected from
conversion via placeholder substitution.
"""
if not content:
return content
# --- 1. Protect fenced code blocks from formatting changes ---
_FENCE_PH = "\x00FENCE"
fences: list[str] = []
def _save_fence(m: re.Match) -> str:
fences.append(m.group(0))
return f"{_FENCE_PH}{len(fences) - 1}\x00"
result = re.sub(r"```[\s\S]*?```", _save_fence, content)
# --- 2. Protect inline code ---
_CODE_PH = "\x00CODE"
codes: list[str] = []
def _save_code(m: re.Match) -> str:
codes.append(m.group(0))
return f"{_CODE_PH}{len(codes) - 1}\x00"
result = re.sub(r"`[^`\n]+`", _save_code, result)
# --- 3. Convert markdown formatting to WhatsApp syntax ---
# Bold: **text** or __text__ → *text*
result = re.sub(r"\*\*(.+?)\*\*", r"*\1*", result)
result = re.sub(r"__(.+?)__", r"*\1*", result)
# Strikethrough: ~~text~~ → ~text~
result = re.sub(r"~~(.+?)~~", r"~\1~", result)
# Italic: *text* is already WhatsApp italic — leave as-is
# _text_ is already WhatsApp italic — leave as-is
# --- 4. Convert markdown headers to bold text ---
# # Header → *Header*
result = re.sub(r"^#{1,6}\s+(.+)$", r"*\1*", result, flags=re.MULTILINE)
# --- 5. Convert markdown links: [text](url) → text (url) ---
result = re.sub(r"\[([^\]]+)\]\(([^)]+)\)", r"\1 (\2)", result)
# --- 6. Restore protected sections ---
for i, fence in enumerate(fences):
result = result.replace(f"{_FENCE_PH}{i}\x00", fence)
for i, code in enumerate(codes):
result = result.replace(f"{_CODE_PH}{i}\x00", code)
return result
async def send( async def send(
self, self,
chat_id: str, chat_id: str,
@ -538,21 +596,35 @@ class WhatsAppAdapter(BasePlatformAdapter):
reply_to: Optional[str] = None, reply_to: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None metadata: Optional[Dict[str, Any]] = None
) -> SendResult: ) -> SendResult:
"""Send a message via the WhatsApp bridge.""" """Send a message via the WhatsApp bridge.
Formats markdown for WhatsApp, splits long messages into chunks
that preserve code block boundaries, and sends each chunk sequentially.
"""
if not self._running or not self._http_session: if not self._running or not self._http_session:
return SendResult(success=False, error="Not connected") return SendResult(success=False, error="Not connected")
bridge_exit = await self._check_managed_bridge_exit() bridge_exit = await self._check_managed_bridge_exit()
if bridge_exit: if bridge_exit:
return SendResult(success=False, error=bridge_exit) return SendResult(success=False, error=bridge_exit)
if not content or not content.strip():
return SendResult(success=True, message_id=None)
try: try:
import aiohttp import aiohttp
payload = { # Format and chunk the message
formatted = self.format_message(content)
chunks = self.truncate_message(formatted, self.MAX_MESSAGE_LENGTH)
last_message_id = None
for chunk in chunks:
payload: Dict[str, Any] = {
"chatId": chat_id, "chatId": chat_id,
"message": content, "message": chunk,
} }
if reply_to: if reply_to and last_message_id is None:
# Only reply-to on the first chunk
payload["replyTo"] = reply_to payload["replyTo"] = reply_to
async with self._http_session.post( async with self._http_session.post(
@ -562,14 +634,19 @@ class WhatsAppAdapter(BasePlatformAdapter):
) as resp: ) as resp:
if resp.status == 200: if resp.status == 200:
data = await resp.json() data = await resp.json()
return SendResult( last_message_id = data.get("messageId")
success=True,
message_id=data.get("messageId"),
raw_response=data
)
else: else:
error = await resp.text() error = await resp.text()
return SendResult(success=False, error=error) return SendResult(success=False, error=error)
# Small delay between chunks to avoid rate limiting
if len(chunks) > 1:
await asyncio.sleep(0.3)
return SendResult(
success=True,
message_id=last_message_id,
)
except Exception as e: except Exception as e:
return SendResult(success=False, error=str(e)) return SendResult(success=False, error=str(e))

View file

@ -186,6 +186,8 @@ if _config_path.exists():
os.environ["HERMES_AGENT_TIMEOUT"] = str(_agent_cfg["gateway_timeout"]) os.environ["HERMES_AGENT_TIMEOUT"] = str(_agent_cfg["gateway_timeout"])
if "gateway_timeout_warning" in _agent_cfg and "HERMES_AGENT_TIMEOUT_WARNING" not in os.environ: if "gateway_timeout_warning" in _agent_cfg and "HERMES_AGENT_TIMEOUT_WARNING" not in os.environ:
os.environ["HERMES_AGENT_TIMEOUT_WARNING"] = str(_agent_cfg["gateway_timeout_warning"]) os.environ["HERMES_AGENT_TIMEOUT_WARNING"] = str(_agent_cfg["gateway_timeout_warning"])
if "gateway_notify_interval" in _agent_cfg and "HERMES_AGENT_NOTIFY_INTERVAL" not in os.environ:
os.environ["HERMES_AGENT_NOTIFY_INTERVAL"] = str(_agent_cfg["gateway_notify_interval"])
if "restart_drain_timeout" in _agent_cfg and "HERMES_RESTART_DRAIN_TIMEOUT" not in os.environ: if "restart_drain_timeout" in _agent_cfg and "HERMES_RESTART_DRAIN_TIMEOUT" not in os.environ:
os.environ["HERMES_RESTART_DRAIN_TIMEOUT"] = str(_agent_cfg["restart_drain_timeout"]) os.environ["HERMES_RESTART_DRAIN_TIMEOUT"] = str(_agent_cfg["restart_drain_timeout"])
_display_cfg = _cfg.get("display", {}) _display_cfg = _cfg.get("display", {})
@ -1715,6 +1717,9 @@ class GatewayRunner:
): ):
self._schedule_update_notification_watch() self._schedule_update_notification_watch()
# Notify the chat that initiated /restart that the gateway is back.
await self._send_restart_notification()
# Drain any recovered process watchers (from crash recovery checkpoint) # Drain any recovered process watchers (from crash recovery checkpoint)
try: try:
from tools.process_registry import process_registry from tools.process_registry import process_registry
@ -2541,11 +2546,8 @@ class GatewayRunner:
self._pending_messages.pop(_quick_key, None) self._pending_messages.pop(_quick_key, None)
if _quick_key in self._running_agents: if _quick_key in self._running_agents:
del self._running_agents[_quick_key] del self._running_agents[_quick_key]
# Mark session suspended so the next message starts fresh logger.info("STOP for session %s — agent interrupted, session lock released", _quick_key[:20])
# instead of resuming the stuck context (#7536). return "⚡ Stopped. You can continue this session."
self.session_store.suspend_session(_quick_key)
logger.info("HARD STOP for session %s — suspended, session lock released", _quick_key[:20])
return "⚡ Force-stopped. The session is suspended — your next message will start fresh."
# /reset and /new must bypass the running-agent guard so they # /reset and /new must bypass the running-agent guard so they
# actually dispatch as commands instead of being queued as user # actually dispatch as commands instead of being queued as user
@ -2762,6 +2764,9 @@ class GatewayRunner:
if canonical == "update": if canonical == "update":
return await self._handle_update_command(event) return await self._handle_update_command(event)
if canonical == "debug":
return await self._handle_debug_command(event)
if canonical == "title": if canonical == "title":
return await self._handle_title_command(event) return await self._handle_title_command(event)
@ -3329,8 +3334,13 @@ class GatewayRunner:
# Must run after runtime resolution so _hyg_base_url is set. # Must run after runtime resolution so _hyg_base_url is set.
if _hyg_config_context_length is None and _hyg_base_url: if _hyg_config_context_length is None and _hyg_base_url:
try: try:
try:
from hermes_cli.config import get_compatible_custom_providers as _gw_gcp
_hyg_custom_providers = _gw_gcp(_hyg_data)
except Exception:
_hyg_custom_providers = _hyg_data.get("custom_providers") _hyg_custom_providers = _hyg_data.get("custom_providers")
if isinstance(_hyg_custom_providers, list): if not isinstance(_hyg_custom_providers, list):
_hyg_custom_providers = []
for _cp in _hyg_custom_providers: for _cp in _hyg_custom_providers:
if not isinstance(_cp, dict): if not isinstance(_cp, dict):
continue continue
@ -4204,9 +4214,7 @@ class GatewayRunner:
only through normal command dispatch (no running agent) or as a only through normal command dispatch (no running agent) or as a
fallback. Force-clean the session lock in all cases for safety. fallback. Force-clean the session lock in all cases for safety.
When there IS a running/pending agent, the session is also marked The session is preserved so the user can continue the conversation.
as *suspended* so the next message starts a fresh session instead
of resuming the stuck context (#7536).
""" """
source = event.source source = event.source
session_entry = self.session_store.get_or_create_session(source) session_entry = self.session_store.get_or_create_session(source)
@ -4217,17 +4225,15 @@ class GatewayRunner:
# Force-clean the sentinel so the session is unlocked. # Force-clean the sentinel so the session is unlocked.
if session_key in self._running_agents: if session_key in self._running_agents:
del self._running_agents[session_key] del self._running_agents[session_key]
self.session_store.suspend_session(session_key) logger.info("STOP (pending) for session %s — sentinel cleared", session_key[:20])
logger.info("HARD STOP (pending) for session %s — suspended, sentinel cleared", session_key[:20]) return "⚡ Stopped. The agent hadn't started yet — you can continue this session."
return "⚡ Force-stopped. The agent was still starting — your next message will start fresh."
if agent: if agent:
agent.interrupt("Stop requested") agent.interrupt("Stop requested")
# Force-clean the session lock so a truly hung agent doesn't # Force-clean the session lock so a truly hung agent doesn't
# keep it locked forever. # keep it locked forever.
if session_key in self._running_agents: if session_key in self._running_agents:
del self._running_agents[session_key] del self._running_agents[session_key]
self.session_store.suspend_session(session_key) return "⚡ Stopped. You can continue this session."
return "⚡ Force-stopped. Your next message will start a fresh session."
else: else:
return "No active task to stop." return "No active task to stop."
@ -4239,11 +4245,36 @@ class GatewayRunner:
return f"⏳ Draining {count} active agent(s) before restart..." return f"⏳ Draining {count} active agent(s) before restart..."
return "⏳ Gateway restart already in progress..." return "⏳ Gateway restart already in progress..."
# Save the requester's routing info so the new gateway process can
# notify them once it comes back online.
try:
import json as _json
notify_data = {
"platform": event.source.platform.value if event.source.platform else None,
"chat_id": event.source.chat_id,
}
if event.source.thread_id:
notify_data["thread_id"] = event.source.thread_id
(_hermes_home / ".restart_notify.json").write_text(
_json.dumps(notify_data)
)
except Exception as e:
logger.debug("Failed to write restart notify file: %s", e)
active_agents = self._running_agent_count() active_agents = self._running_agent_count()
# When running under a service manager (systemd/launchd), use the
# service restart path: exit with code 75 so the service manager
# restarts us. The detached subprocess approach (setsid + bash)
# doesn't work under systemd because KillMode=mixed kills all
# processes in the cgroup, including the detached helper.
_under_service = bool(os.environ.get("INVOCATION_ID")) # systemd sets this
if _under_service:
self.request_restart(detached=False, via_service=True)
else:
self.request_restart(detached=True, via_service=False) self.request_restart(detached=True, via_service=False)
if active_agents: if active_agents:
return f"⏳ Draining {active_agents} active agent(s) before restart..." return f"⏳ Draining {active_agents} active agent(s) before restart..."
return "♻ Restarting gateway..." return "♻ Restarting gateway. If you aren't notified within 60 seconds, restart from the console with `hermes gateway restart`."
async def _handle_help_command(self, event: MessageEvent) -> str: async def _handle_help_command(self, event: MessageEvent) -> str:
"""Handle /help command - list available commands.""" """Handle /help command - list available commands."""
@ -4360,6 +4391,10 @@ class GatewayRunner:
current_provider = model_cfg.get("provider", current_provider) current_provider = model_cfg.get("provider", current_provider)
current_base_url = model_cfg.get("base_url", "") current_base_url = model_cfg.get("base_url", "")
user_provs = cfg.get("providers") user_provs = cfg.get("providers")
try:
from hermes_cli.config import get_compatible_custom_providers
custom_provs = get_compatible_custom_providers(cfg)
except Exception:
custom_provs = cfg.get("custom_providers") custom_provs = cfg.get("custom_providers")
except Exception: except Exception:
pass pass
@ -4991,6 +5026,8 @@ class GatewayRunner:
if success: if success:
adapter._voice_text_channels[guild_id] = int(event.source.chat_id) adapter._voice_text_channels[guild_id] = int(event.source.chat_id)
if hasattr(adapter, "_voice_sources"):
adapter._voice_sources[guild_id] = event.source.to_dict()
self._voice_mode[event.source.chat_id] = "all" self._voice_mode[event.source.chat_id] = "all"
self._save_voice_modes() self._save_voice_modes()
self._set_adapter_auto_tts_disabled(adapter, event.source.chat_id, disabled=False) self._set_adapter_auto_tts_disabled(adapter, event.source.chat_id, disabled=False)
@ -5051,7 +5088,14 @@ class GatewayRunner:
if not text_ch_id: if not text_ch_id:
return return
# Check authorization before processing voice input # Build source — reuse the linked text channel's metadata when available
# so voice input shares the same session as the bound text conversation.
source_data = getattr(adapter, "_voice_sources", {}).get(guild_id)
if source_data:
source = SessionSource.from_dict(source_data)
source.user_id = str(user_id)
source.user_name = str(user_id)
else:
source = SessionSource( source = SessionSource(
platform=Platform.DISCORD, platform=Platform.DISCORD,
chat_id=str(text_ch_id), chat_id=str(text_ch_id),
@ -5059,6 +5103,8 @@ class GatewayRunner:
user_name=str(user_id), user_name=str(user_id),
chat_type="channel", chat_type="channel",
) )
# Check authorization before processing voice input
if not self._is_user_authorized(source): if not self._is_user_authorized(source):
logger.debug("Unauthorized voice input from user %d, ignoring", user_id) logger.debug("Unauthorized voice input from user %d, ignoring", user_id)
return return
@ -6523,6 +6569,61 @@ class GatewayRunner:
Platform.FEISHU, Platform.WECOM, Platform.WECOM_CALLBACK, Platform.WEIXIN, Platform.BLUEBUBBLES, Platform.LOCAL, Platform.FEISHU, Platform.WECOM, Platform.WECOM_CALLBACK, Platform.WEIXIN, Platform.BLUEBUBBLES, Platform.LOCAL,
}) })
async def _handle_debug_command(self, event: MessageEvent) -> str:
"""Handle /debug — upload debug report + logs and return paste URLs."""
import asyncio
from hermes_cli.debug import (
_capture_dump, collect_debug_report, _read_full_log,
upload_to_pastebin,
)
loop = asyncio.get_running_loop()
# Run blocking I/O (dump capture, log reads, uploads) in a thread.
def _collect_and_upload():
dump_text = _capture_dump()
report = collect_debug_report(log_lines=200, dump_text=dump_text)
agent_log = _read_full_log("agent")
gateway_log = _read_full_log("gateway")
if agent_log:
agent_log = dump_text + "\n\n--- full agent.log ---\n" + agent_log
if gateway_log:
gateway_log = dump_text + "\n\n--- full gateway.log ---\n" + gateway_log
urls = {}
failures = []
try:
urls["Report"] = upload_to_pastebin(report)
except Exception as exc:
return f"✗ Failed to upload debug report: {exc}"
if agent_log:
try:
urls["agent.log"] = upload_to_pastebin(agent_log)
except Exception:
failures.append("agent.log")
if gateway_log:
try:
urls["gateway.log"] = upload_to_pastebin(gateway_log)
except Exception:
failures.append("gateway.log")
lines = ["**Debug report uploaded:**", ""]
label_width = max(len(k) for k in urls)
for label, url in urls.items():
lines.append(f"`{label:<{label_width}}` {url}")
if failures:
lines.append(f"\n_(failed to upload: {', '.join(failures)})_")
lines.append("\nShare these links with the Hermes team for support.")
return "\n".join(lines)
return await loop.run_in_executor(None, _collect_and_upload)
async def _handle_update_command(self, event: MessageEvent) -> str: async def _handle_update_command(self, event: MessageEvent) -> str:
"""Handle /update command — update Hermes Agent to the latest version. """Handle /update command — update Hermes Agent to the latest version.
@ -6917,6 +7018,48 @@ class GatewayRunner:
return True return True
async def _send_restart_notification(self) -> None:
"""Notify the chat that initiated /restart that the gateway is back."""
import json as _json
notify_path = _hermes_home / ".restart_notify.json"
if not notify_path.exists():
return
try:
data = _json.loads(notify_path.read_text())
platform_str = data.get("platform")
chat_id = data.get("chat_id")
thread_id = data.get("thread_id")
if not platform_str or not chat_id:
return
platform = Platform(platform_str)
adapter = self.adapters.get(platform)
if not adapter:
logger.debug(
"Restart notification skipped: %s adapter not connected",
platform_str,
)
return
metadata = {"thread_id": thread_id} if thread_id else None
await adapter.send(
chat_id,
"♻ Gateway restarted successfully. Your session continues.",
metadata=metadata,
)
logger.info(
"Sent restart notification to %s:%s",
platform_str,
chat_id,
)
except Exception as e:
logger.warning("Restart notification failed: %s", e)
finally:
notify_path.unlink(missing_ok=True)
def _set_session_env(self, context: SessionContext) -> list: def _set_session_env(self, context: SessionContext) -> list:
"""Set session context variables for the current async task. """Set session context variables for the current async task.
@ -7448,9 +7591,11 @@ class GatewayRunner:
_pl = get_tool_preview_max_len() _pl = get_tool_preview_max_len()
import json as _json import json as _json
args_str = _json.dumps(args, ensure_ascii=False, default=str) args_str = _json.dumps(args, ensure_ascii=False, default=str)
_cap = _pl if _pl > 0 else 200 # When tool_preview_length is 0 (default), don't truncate
if len(args_str) > _cap: # in verbose mode — the user explicitly asked for full
args_str = args_str[:_cap - 3] + "..." # detail. Platform message-length limits handle the rest.
if _pl > 0 and len(args_str) > _pl:
args_str = args_str[:_pl - 3] + "..."
msg = f"{emoji} {tool_name}({list(args.keys())})\n{args_str}" msg = f"{emoji} {tool_name}({list(args.keys())})\n{args_str}"
elif preview: elif preview:
msg = f"{emoji} {tool_name}: \"{preview}\"" msg = f"{emoji} {tool_name}: \"{preview}\""
@ -7760,10 +7905,18 @@ class GatewayRunner:
from gateway.stream_consumer import GatewayStreamConsumer, StreamConsumerConfig from gateway.stream_consumer import GatewayStreamConsumer, StreamConsumerConfig
_adapter = self.adapters.get(source.platform) _adapter = self.adapters.get(source.platform)
if _adapter: if _adapter:
# Platforms that don't support editing sent messages
# (e.g. WeChat) must not show a cursor in intermediate
# sends — the cursor would be permanently visible because
# it can never be edited away. Use an empty cursor for
# such platforms so streaming still delivers the final
# response, just without the typing indicator.
_adapter_supports_edit = getattr(_adapter, "SUPPORTS_MESSAGE_EDITING", True)
_effective_cursor = _scfg.cursor if _adapter_supports_edit else ""
_consumer_cfg = StreamConsumerConfig( _consumer_cfg = StreamConsumerConfig(
edit_interval=_scfg.edit_interval, edit_interval=_scfg.edit_interval,
buffer_threshold=_scfg.buffer_threshold, buffer_threshold=_scfg.buffer_threshold,
cursor=_scfg.cursor, cursor=_effective_cursor,
) )
_stream_consumer = GatewayStreamConsumer( _stream_consumer = GatewayStreamConsumer(
adapter=_adapter, adapter=_adapter,
@ -8243,11 +8396,17 @@ class GatewayRunner:
interrupt_monitor = asyncio.create_task(monitor_for_interrupt()) interrupt_monitor = asyncio.create_task(monitor_for_interrupt())
# Periodic "still working" notifications for long-running tasks. # Periodic "still working" notifications for long-running tasks.
# Fires every 10 minutes so the user knows the agent hasn't died. # Fires every N seconds so the user knows the agent hasn't died.
_NOTIFY_INTERVAL = 600 # 10 minutes # Config: agent.gateway_notify_interval in config.yaml, or
# HERMES_AGENT_NOTIFY_INTERVAL env var. Default 600s (10 min).
# 0 = disable notifications.
_NOTIFY_INTERVAL_RAW = float(os.getenv("HERMES_AGENT_NOTIFY_INTERVAL", 600))
_NOTIFY_INTERVAL = _NOTIFY_INTERVAL_RAW if _NOTIFY_INTERVAL_RAW > 0 else None
_notify_start = time.time() _notify_start = time.time()
async def _notify_long_running(): async def _notify_long_running():
if _NOTIFY_INTERVAL is None:
return # Notifications disabled (gateway_notify_interval: 0)
_notify_adapter = self.adapters.get(source.platform) _notify_adapter = self.adapters.get(source.platform)
if not _notify_adapter: if not _notify_adapter:
return return
@ -8842,6 +9001,7 @@ async def start_gateway(config: Optional[GatewayConfig] = None, replace: bool =
runner.request_restart(detached=False, via_service=True) runner.request_restart(detached=False, via_service=True)
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
if threading.current_thread() is threading.main_thread():
for sig in (signal.SIGINT, signal.SIGTERM): for sig in (signal.SIGINT, signal.SIGTERM):
try: try:
loop.add_signal_handler(sig, shutdown_signal_handler) loop.add_signal_handler(sig, shutdown_signal_handler)
@ -8852,6 +9012,8 @@ async def start_gateway(config: Optional[GatewayConfig] = None, replace: bool =
loop.add_signal_handler(signal.SIGUSR1, restart_signal_handler) loop.add_signal_handler(signal.SIGUSR1, restart_signal_handler)
except NotImplementedError: except NotImplementedError:
pass pass
else:
logger.info("Skipping signal handlers (not running in main thread).")
# Start the gateway # Start the gateway
success = await runner.start() success = await runner.start()

View file

@ -878,7 +878,8 @@ class SessionStore:
Used by ``/resume`` to restore a previously-named session. Used by ``/resume`` to restore a previously-named session.
Ends the current session in SQLite (like reset), but instead of Ends the current session in SQLite (like reset), but instead of
generating a fresh session ID, re-uses ``target_session_id`` so the generating a fresh session ID, re-uses ``target_session_id`` so the
old transcript is loaded on the next message. old transcript is loaded on the next message. If the target session was
previously ended, re-open it so gateway resume semantics match the CLI.
""" """
db_end_session_id = None db_end_session_id = None
new_entry = None new_entry = None
@ -918,6 +919,12 @@ class SessionStore:
except Exception as e: except Exception as e:
logger.debug("Session DB end_session failed: %s", e) logger.debug("Session DB end_session failed: %s", e)
if self._db:
try:
self._db.reopen_session(target_session_id)
except Exception as e:
logger.debug("Session DB reopen_session failed: %s", e)
return new_entry return new_entry
def list_sessions(self, active_minutes: Optional[int] = None) -> List[SessionEntry]: def list_sessions(self, active_minutes: Optional[int] = None) -> List[SessionEntry]:

View file

@ -290,6 +290,15 @@ def acquire_scoped_lock(scope: str, identity: str, metadata: Optional[dict[str,
} }
existing = _read_json_file(lock_path) existing = _read_json_file(lock_path)
if existing is None and lock_path.exists():
# Lock file exists but is empty or contains invalid JSON — treat as
# stale. This happens when a previous process was killed between
# O_CREAT|O_EXCL and the subsequent json.dump() (e.g. DNS failure
# during rapid Slack reconnect retries).
try:
lock_path.unlink(missing_ok=True)
except OSError:
pass
if existing: if existing:
try: try:
existing_pid = int(existing["pid"]) existing_pid = int(existing["pid"])

View file

@ -11,5 +11,5 @@ Provides subcommands for:
- hermes cron - Manage cron jobs - hermes cron - Manage cron jobs
""" """
__version__ = "0.8.0" __version__ = "0.9.0"
__release_date__ = "2026.4.8" __release_date__ = "2026.4.13"

View file

@ -127,6 +127,7 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
auth_type="api_key", auth_type="api_key",
inference_base_url=DEFAULT_GITHUB_MODELS_BASE_URL, inference_base_url=DEFAULT_GITHUB_MODELS_BASE_URL,
api_key_env_vars=("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"), api_key_env_vars=("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"),
base_url_env_var="COPILOT_API_BASE_URL",
), ),
"copilot-acp": ProviderConfig( "copilot-acp": ProviderConfig(
id="copilot-acp", id="copilot-acp",
@ -159,6 +160,13 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
api_key_env_vars=("KIMI_API_KEY",), api_key_env_vars=("KIMI_API_KEY",),
base_url_env_var="KIMI_BASE_URL", base_url_env_var="KIMI_BASE_URL",
), ),
"kimi-coding-cn": ProviderConfig(
id="kimi-coding-cn",
name="Kimi / Moonshot (China)",
auth_type="api_key",
inference_base_url="https://api.moonshot.cn/v1",
api_key_env_vars=("KIMI_CN_API_KEY",),
),
"minimax": ProviderConfig( "minimax": ProviderConfig(
id="minimax", id="minimax",
name="MiniMax", name="MiniMax",
@ -307,44 +315,6 @@ def _resolve_kimi_base_url(api_key: str, default_url: str, env_override: str) ->
return default_url return default_url
def _gh_cli_candidates() -> list[str]:
"""Return candidate ``gh`` binary paths, including common Homebrew installs."""
candidates: list[str] = []
resolved = shutil.which("gh")
if resolved:
candidates.append(resolved)
for candidate in (
"/opt/homebrew/bin/gh",
"/usr/local/bin/gh",
str(Path.home() / ".local" / "bin" / "gh"),
):
if candidate in candidates:
continue
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
candidates.append(candidate)
return candidates
def _try_gh_cli_token() -> Optional[str]:
"""Return a token from ``gh auth token`` when the GitHub CLI is available."""
for gh_path in _gh_cli_candidates():
try:
result = subprocess.run(
[gh_path, "auth", "token"],
capture_output=True,
text=True,
timeout=5,
)
except (FileNotFoundError, subprocess.TimeoutExpired) as exc:
logger.debug("gh CLI token lookup failed (%s): %s", gh_path, exc)
continue
if result.returncode == 0 and result.stdout.strip():
return result.stdout.strip()
return None
_PLACEHOLDER_SECRET_VALUES = { _PLACEHOLDER_SECRET_VALUES = {
"*", "*",
@ -929,6 +899,7 @@ def resolve_provider(
"glm": "zai", "z-ai": "zai", "z.ai": "zai", "zhipu": "zai", "glm": "zai", "z-ai": "zai", "z.ai": "zai", "zhipu": "zai",
"google": "gemini", "google-gemini": "gemini", "google-ai-studio": "gemini", "google": "gemini", "google-gemini": "gemini", "google-ai-studio": "gemini",
"kimi": "kimi-coding", "kimi-for-coding": "kimi-coding", "moonshot": "kimi-coding", "kimi": "kimi-coding", "kimi-for-coding": "kimi-coding", "moonshot": "kimi-coding",
"kimi-cn": "kimi-coding-cn", "moonshot-cn": "kimi-coding-cn",
"minimax-china": "minimax-cn", "minimax_cn": "minimax-cn", "minimax-china": "minimax-cn", "minimax_cn": "minimax-cn",
"claude": "anthropic", "claude-code": "anthropic", "claude": "anthropic", "claude-code": "anthropic",
"github": "copilot", "github-copilot": "copilot", "github": "copilot", "github-copilot": "copilot",

View file

@ -36,25 +36,23 @@ _OAUTH_CAPABLE_PROVIDERS = {"anthropic", "nous", "openai-codex", "qwen-oauth"}
def _get_custom_provider_names() -> list: def _get_custom_provider_names() -> list:
"""Return list of (display_name, pool_key) tuples for custom_providers in config.""" """Return list of (display_name, pool_key, provider_key) tuples."""
try: try:
from hermes_cli.config import load_config from hermes_cli.config import get_compatible_custom_providers, load_config
config = load_config() config = load_config()
except Exception: except Exception:
return [] return []
custom_providers = config.get("custom_providers")
if not isinstance(custom_providers, list):
return []
result = [] result = []
for entry in custom_providers: for entry in get_compatible_custom_providers(config):
if not isinstance(entry, dict): if not isinstance(entry, dict):
continue continue
name = entry.get("name") name = entry.get("name")
if not isinstance(name, str) or not name.strip(): if not isinstance(name, str) or not name.strip():
continue continue
pool_key = f"{CUSTOM_POOL_PREFIX}{_normalize_custom_pool_name(name)}" pool_key = f"{CUSTOM_POOL_PREFIX}{_normalize_custom_pool_name(name)}"
result.append((name.strip(), pool_key)) provider_key = str(entry.get("provider_key", "") or "").strip()
result.append((name.strip(), pool_key, provider_key))
return result return result
@ -66,9 +64,11 @@ def _resolve_custom_provider_input(raw: str) -> str | None:
# Direct match on 'custom:name' format # Direct match on 'custom:name' format
if normalized.startswith(CUSTOM_POOL_PREFIX): if normalized.startswith(CUSTOM_POOL_PREFIX):
return normalized return normalized
for display_name, pool_key in _get_custom_provider_names(): for display_name, pool_key, provider_key in _get_custom_provider_names():
if _normalize_custom_pool_name(display_name) == normalized: if _normalize_custom_pool_name(display_name) == normalized:
return pool_key return pool_key
if provider_key and provider_key.strip().lower() == normalized:
return pool_key
return None return None
@ -405,7 +405,7 @@ def _pick_provider(prompt: str = "Provider") -> str:
known = sorted(set(list(PROVIDER_REGISTRY.keys()) + ["openrouter"])) known = sorted(set(list(PROVIDER_REGISTRY.keys()) + ["openrouter"]))
custom_names = _get_custom_provider_names() custom_names = _get_custom_provider_names()
if custom_names: if custom_names:
custom_display = [name for name, _key in custom_names] custom_display = [name for name, _key, _provider_key in custom_names]
print(f"\nKnown providers: {', '.join(known)}") print(f"\nKnown providers: {', '.join(known)}")
print(f"Custom endpoints: {', '.join(custom_display)}") print(f"Custom endpoints: {', '.join(custom_display)}")
else: else:

View file

@ -8,14 +8,22 @@ Backup and import commands for hermes CLI.
HERMES_HOME root. HERMES_HOME root.
""" """
import json
import logging
import os import os
import shutil
import sqlite3
import sys import sys
import tempfile
import time import time
import zipfile import zipfile
from datetime import datetime from datetime import datetime, timezone
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Optional
from hermes_constants import get_default_hermes_root, display_hermes_home from hermes_constants import get_default_hermes_root, get_hermes_home, display_hermes_home
logger = logging.getLogger(__name__)
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@ -63,6 +71,33 @@ def _should_exclude(rel_path: Path) -> bool:
return False return False
# ---------------------------------------------------------------------------
# SQLite safe copy
# ---------------------------------------------------------------------------
def _safe_copy_db(src: Path, dst: Path) -> bool:
"""Copy a SQLite database safely using the backup() API.
Handles WAL mode produces a consistent snapshot even while
the DB is being written to. Falls back to raw copy on failure.
"""
try:
conn = sqlite3.connect(f"file:{src}?mode=ro", uri=True)
backup_conn = sqlite3.connect(str(dst))
conn.backup(backup_conn)
backup_conn.close()
conn.close()
return True
except Exception as exc:
logger.warning("SQLite safe copy failed for %s: %s", src, exc)
try:
shutil.copy2(src, dst)
return True
except Exception as exc2:
logger.error("Raw copy also failed for %s: %s", src, exc2)
return False
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Backup # Backup
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@ -151,6 +186,19 @@ def run_backup(args) -> None:
with zipfile.ZipFile(out_path, "w", zipfile.ZIP_DEFLATED, compresslevel=6) as zf: with zipfile.ZipFile(out_path, "w", zipfile.ZIP_DEFLATED, compresslevel=6) as zf:
for i, (abs_path, rel_path) in enumerate(files_to_add, 1): for i, (abs_path, rel_path) in enumerate(files_to_add, 1):
try: try:
# Safe copy for SQLite databases (handles WAL mode)
if abs_path.suffix == ".db":
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp:
tmp_db = Path(tmp.name)
if _safe_copy_db(abs_path, tmp_db):
zf.write(tmp_db, arcname=str(rel_path))
total_bytes += tmp_db.stat().st_size
tmp_db.unlink(missing_ok=True)
else:
tmp_db.unlink(missing_ok=True)
errors.append(f" {rel_path}: SQLite safe copy failed")
continue
else:
zf.write(abs_path, arcname=str(rel_path)) zf.write(abs_path, arcname=str(rel_path))
total_bytes += abs_path.stat().st_size total_bytes += abs_path.stat().st_size
except (PermissionError, OSError) as exc: except (PermissionError, OSError) as exc:
@ -201,7 +249,7 @@ def _validate_backup_zip(zf: zipfile.ZipFile) -> tuple[bool, str]:
return False, "zip archive is empty" return False, "zip archive is empty"
# Look for telltale files that a hermes home would have # Look for telltale files that a hermes home would have
markers = {"config.yaml", ".env", "hermes_state.db", "memory_store.db"} markers = {"config.yaml", ".env", "state.db"}
found = set() found = set()
for n in names: for n in names:
# Could be at the root or one level deep (if someone zipped the directory) # Could be at the root or one level deep (if someone zipped the directory)
@ -397,3 +445,211 @@ def run_import(args) -> None:
print(f" hermes -p {pname} gateway install") print(f" hermes -p {pname} gateway install")
print("Done. Your Hermes configuration has been restored.") print("Done. Your Hermes configuration has been restored.")
# ---------------------------------------------------------------------------
# Quick state snapshots (used by /snapshot slash command and hermes backup --quick)
# ---------------------------------------------------------------------------
# Critical state files to include in quick snapshots (relative to HERMES_HOME).
# Everything else is either regeneratable (logs, cache) or managed separately
# (skills, repo, sessions/).
_QUICK_STATE_FILES = (
"state.db",
"config.yaml",
".env",
"auth.json",
"cron/jobs.json",
"gateway_state.json",
"channel_directory.json",
"processes.json",
)
_QUICK_SNAPSHOTS_DIR = "state-snapshots"
_QUICK_DEFAULT_KEEP = 20
def _quick_snapshot_root(hermes_home: Optional[Path] = None) -> Path:
home = hermes_home or get_hermes_home()
return home / _QUICK_SNAPSHOTS_DIR
def create_quick_snapshot(
label: Optional[str] = None,
hermes_home: Optional[Path] = None,
) -> Optional[str]:
"""Create a quick state snapshot of critical files.
Copies STATE_FILES to a timestamped directory under state-snapshots/.
Auto-prunes old snapshots beyond the keep limit.
Returns:
Snapshot ID (timestamp-based), or None if no files found.
"""
home = hermes_home or get_hermes_home()
root = _quick_snapshot_root(home)
ts = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
snap_id = f"{ts}-{label}" if label else ts
snap_dir = root / snap_id
snap_dir.mkdir(parents=True, exist_ok=True)
manifest: Dict[str, int] = {} # rel_path -> file size
for rel in _QUICK_STATE_FILES:
src = home / rel
if not src.exists() or not src.is_file():
continue
dst = snap_dir / rel
dst.parent.mkdir(parents=True, exist_ok=True)
try:
if src.suffix == ".db":
if not _safe_copy_db(src, dst):
continue
else:
shutil.copy2(src, dst)
manifest[rel] = dst.stat().st_size
except (OSError, PermissionError) as exc:
logger.warning("Could not snapshot %s: %s", rel, exc)
if not manifest:
shutil.rmtree(snap_dir, ignore_errors=True)
return None
# Write manifest
meta = {
"id": snap_id,
"timestamp": ts,
"label": label,
"file_count": len(manifest),
"total_size": sum(manifest.values()),
"files": manifest,
}
with open(snap_dir / "manifest.json", "w") as f:
json.dump(meta, f, indent=2)
# Auto-prune
_prune_quick_snapshots(root, keep=_QUICK_DEFAULT_KEEP)
logger.info("State snapshot created: %s (%d files)", snap_id, len(manifest))
return snap_id
def list_quick_snapshots(
limit: int = 20,
hermes_home: Optional[Path] = None,
) -> List[Dict[str, Any]]:
"""List existing quick state snapshots, most recent first."""
root = _quick_snapshot_root(hermes_home)
if not root.exists():
return []
results = []
for d in sorted(root.iterdir(), reverse=True):
if not d.is_dir():
continue
manifest_path = d / "manifest.json"
if manifest_path.exists():
try:
with open(manifest_path) as f:
results.append(json.load(f))
except (json.JSONDecodeError, OSError):
results.append({"id": d.name, "file_count": 0, "total_size": 0})
if len(results) >= limit:
break
return results
def restore_quick_snapshot(
snapshot_id: str,
hermes_home: Optional[Path] = None,
) -> bool:
"""Restore state from a quick snapshot.
Overwrites current state files with the snapshot's copies.
Returns True if at least one file was restored.
"""
home = hermes_home or get_hermes_home()
root = _quick_snapshot_root(home)
snap_dir = root / snapshot_id
if not snap_dir.is_dir():
return False
manifest_path = snap_dir / "manifest.json"
if not manifest_path.exists():
return False
with open(manifest_path) as f:
meta = json.load(f)
restored = 0
for rel in meta.get("files", {}):
src = snap_dir / rel
if not src.exists():
continue
dst = home / rel
dst.parent.mkdir(parents=True, exist_ok=True)
try:
if dst.suffix == ".db":
# Atomic-ish replace for databases
tmp = dst.parent / f".{dst.name}.snap_restore"
shutil.copy2(src, tmp)
dst.unlink(missing_ok=True)
shutil.move(str(tmp), str(dst))
else:
shutil.copy2(src, dst)
restored += 1
except (OSError, PermissionError) as exc:
logger.error("Failed to restore %s: %s", rel, exc)
logger.info("Restored %d files from snapshot %s", restored, snapshot_id)
return restored > 0
def _prune_quick_snapshots(root: Path, keep: int = _QUICK_DEFAULT_KEEP) -> int:
"""Remove oldest quick snapshots beyond the keep limit. Returns count deleted."""
if not root.exists():
return 0
dirs = sorted(
(d for d in root.iterdir() if d.is_dir()),
key=lambda d: d.name,
reverse=True,
)
deleted = 0
for d in dirs[keep:]:
try:
shutil.rmtree(d)
deleted += 1
except OSError as exc:
logger.warning("Failed to prune snapshot %s: %s", d.name, exc)
return deleted
def prune_quick_snapshots(
keep: int = _QUICK_DEFAULT_KEEP,
hermes_home: Optional[Path] = None,
) -> int:
"""Manually prune quick snapshots. Returns count deleted."""
return _prune_quick_snapshots(_quick_snapshot_root(hermes_home), keep=keep)
def run_quick_backup(args) -> None:
"""CLI entry point for hermes backup --quick."""
label = getattr(args, "label", None)
snap_id = create_quick_snapshot(label=label)
if snap_id:
print(f"State snapshot created: {snap_id}")
snaps = list_quick_snapshots()
print(f" {len(snaps)} snapshot(s) stored in {display_hermes_home()}/state-snapshots/")
print(f" Restore with: /snapshot restore {snap_id}")
else:
print("No state files found to snapshot.")

View file

@ -11,6 +11,7 @@ Usage:
import importlib.util import importlib.util
import logging import logging
import subprocess
import sys import sys
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
@ -52,6 +53,99 @@ _OPENCLAW_SCRIPT_INSTALLED = (
# Known OpenClaw directory names (current + legacy) # Known OpenClaw directory names (current + legacy)
_OPENCLAW_DIR_NAMES = (".openclaw", ".clawdbot", ".moltbot") _OPENCLAW_DIR_NAMES = (".openclaw", ".clawdbot", ".moltbot")
def _detect_openclaw_processes() -> list[str]:
"""Detect running OpenClaw processes and services.
Returns a list of human-readable descriptions of what was found.
An empty list means nothing was detected.
"""
found: list[str] = []
# -- systemd service (Linux) ------------------------------------------
if sys.platform != "win32":
try:
result = subprocess.run(
["systemctl", "--user", "is-active", "openclaw-gateway.service"],
capture_output=True, text=True, timeout=5,
)
if result.stdout.strip() == "active":
found.append("systemd service: openclaw-gateway.service")
except (FileNotFoundError, subprocess.TimeoutExpired):
pass
# -- process scan ------------------------------------------------------
if sys.platform == "win32":
try:
for exe in ("openclaw.exe", "clawd.exe"):
result = subprocess.run(
["tasklist", "/FI", f"IMAGENAME eq {exe}"],
capture_output=True, text=True, timeout=5,
)
if exe in result.stdout.lower():
found.append(f"process: {exe}")
# Node.js-hosted OpenClaw — tasklist doesn't show command lines,
# so fall back to PowerShell.
ps_cmd = (
'Get-CimInstance Win32_Process -Filter "Name = \'node.exe\'" | '
'Where-Object { $_.CommandLine -match "openclaw|clawd" } | '
'Select-Object -First 1 ProcessId'
)
result = subprocess.run(
["powershell", "-NoProfile", "-Command", ps_cmd],
capture_output=True, text=True, timeout=5,
)
if result.stdout.strip():
found.append(f"node.exe process with openclaw in command line (PID {result.stdout.strip()})")
except Exception:
pass
else:
try:
result = subprocess.run(
["pgrep", "-f", "openclaw"],
capture_output=True, text=True, timeout=3,
)
if result.returncode == 0:
pids = result.stdout.strip().split()
found.append(f"openclaw process(es) (PIDs: {', '.join(pids)})")
except (FileNotFoundError, subprocess.TimeoutExpired):
pass
return found
def _warn_if_openclaw_running(auto_yes: bool) -> None:
"""Warn if OpenClaw is still running before migration.
Telegram, Discord, and Slack only allow one active connection per bot
token. Migrating while OpenClaw is running causes both to fight for the
same token.
"""
running = _detect_openclaw_processes()
if not running:
return
print()
print_error("OpenClaw appears to be running:")
for detail in running:
print_info(f" * {detail}")
print_info(
"Messaging platforms (Telegram, Discord, Slack) only allow one "
"active session per bot token. If you continue, both OpenClaw and "
"Hermes may try to use the same token, causing disconnects."
)
print_info("Recommendation: stop OpenClaw before migrating.")
print()
if auto_yes:
return
if not sys.stdin.isatty():
print_info("Non-interactive session — continuing to preview only.")
return
if not prompt_yes_no("Continue anyway?", default=False):
print_info("Migration cancelled. Stop OpenClaw and try again.")
sys.exit(0)
def _warn_if_gateway_running(auto_yes: bool) -> None: def _warn_if_gateway_running(auto_yes: bool) -> None:
"""Check if a Hermes gateway is running with connected platforms. """Check if a Hermes gateway is running with connected platforms.
@ -287,8 +381,11 @@ def _cmd_migrate(args):
print_info(f"Workspace: {workspace_target}") print_info(f"Workspace: {workspace_target}")
print() print()
# Check if a gateway is running with connected platforms — migrating tokens # Check if OpenClaw is still running — migrating tokens while both are
# while the gateway is active will cause conflicts (e.g. Telegram 409). # active will cause conflicts (e.g. Telegram 409).
_warn_if_openclaw_running(auto_yes)
# Check if a Hermes gateway is running with connected platforms.
_warn_if_gateway_running(auto_yes) _warn_if_gateway_running(auto_yes)
# Ensure config.yaml exists before migration tries to read it # Ensure config.yaml exists before migration tries to read it
@ -430,6 +527,28 @@ def _cmd_cleanup(args):
print_success("No OpenClaw directories found. Nothing to clean up.") print_success("No OpenClaw directories found. Nothing to clean up.")
return return
# Warn if OpenClaw is still running — archiving while the service is
# active causes it to recreate an empty skeleton directory (#8502).
running = _detect_openclaw_processes()
if running:
print()
print_error("OpenClaw appears to be still running:")
for detail in running:
print_info(f" * {detail}")
print_info(
"Archiving .openclaw/ while the service is active may cause it to "
"immediately recreate an empty skeleton directory, destroying your config."
)
print_info("Stop OpenClaw first: systemctl --user stop openclaw-gateway.service")
print()
if not auto_yes:
if not sys.stdin.isatty():
print_info("Non-interactive session — aborting. Stop OpenClaw and re-run.")
return
if not prompt_yes_no("Proceed anyway?", default=False):
print_info("Aborted. Stop OpenClaw first, then re-run: hermes claw cleanup")
return
total_archived = 0 total_archived = 0
for source_dir in dirs_to_check: for source_dir in dirs_to_check:

View file

@ -73,7 +73,9 @@ COMMAND_REGISTRY: list[CommandDef] = [
args_hint="[focus topic]"), args_hint="[focus topic]"),
CommandDef("rollback", "List or restore filesystem checkpoints", "Session", CommandDef("rollback", "List or restore filesystem checkpoints", "Session",
args_hint="[number]"), args_hint="[number]"),
CommandDef("stop", "Kill all running registered subprocesses", "Session"), CommandDef("snapshot", "Create or restore state snapshots of Hermes config/state", "Session",
aliases=("snap",), args_hint="[create|restore <id>|prune]"),
CommandDef("stop", "Kill all running background processes", "Session"),
CommandDef("approve", "Approve a pending dangerous command", "Session", CommandDef("approve", "Approve a pending dangerous command", "Session",
gateway_only=True, args_hint="[session|always]"), gateway_only=True, args_hint="[session|always]"),
CommandDef("deny", "Deny a pending dangerous command", "Session", CommandDef("deny", "Deny a pending dangerous command", "Session",
@ -131,6 +133,7 @@ COMMAND_REGISTRY: list[CommandDef] = [
CommandDef("cron", "Manage scheduled tasks", "Tools & Skills", CommandDef("cron", "Manage scheduled tasks", "Tools & Skills",
cli_only=True, args_hint="[subcommand]", cli_only=True, args_hint="[subcommand]",
subcommands=("list", "add", "create", "edit", "pause", "resume", "run", "remove")), subcommands=("list", "add", "create", "edit", "pause", "resume", "run", "remove")),
CommandDef("reload", "Reload .env variables into the running session", "Tools & Skills"),
CommandDef("reload-mcp", "Reload MCP servers from config", "Tools & Skills", CommandDef("reload-mcp", "Reload MCP servers from config", "Tools & Skills",
aliases=("reload_mcp",)), aliases=("reload_mcp",)),
CommandDef("browser", "Connect browser tools to your live Chrome via CDP", "Tools & Skills", CommandDef("browser", "Connect browser tools to your live Chrome via CDP", "Tools & Skills",
@ -158,6 +161,7 @@ COMMAND_REGISTRY: list[CommandDef] = [
cli_only=True, args_hint="<path>"), cli_only=True, args_hint="<path>"),
CommandDef("update", "Update Hermes Agent to the latest version", "Info", CommandDef("update", "Update Hermes Agent to the latest version", "Info",
gateway_only=True), gateway_only=True),
CommandDef("debug", "Upload debug report (system info + logs) and get shareable links", "Info"),
# Exit # Exit
CommandDef("quit", "Exit the CLI", "Exit", CommandDef("quit", "Exit the CLI", "Exit",

View file

@ -148,25 +148,6 @@ def managed_error(action: str = "modify configuration"):
# Container-aware CLI (NixOS container mode) # Container-aware CLI (NixOS container mode)
# ============================================================================= # =============================================================================
def _is_inside_container() -> bool:
"""Detect if we're already running inside a Docker/Podman container."""
# Standard Docker/Podman indicators
if os.path.exists("/.dockerenv"):
return True
# Podman uses /run/.containerenv
if os.path.exists("/run/.containerenv"):
return True
# Check cgroup for container runtime evidence (works for both Docker & Podman)
try:
with open("/proc/1/cgroup", "r") as f:
cgroup = f.read()
if "docker" in cgroup or "podman" in cgroup or "/lxc/" in cgroup:
return True
except OSError:
pass
return False
def get_container_exec_info() -> Optional[dict]: def get_container_exec_info() -> Optional[dict]:
"""Read container mode metadata from HERMES_HOME/.container-mode. """Read container mode metadata from HERMES_HOME/.container-mode.
@ -181,7 +162,8 @@ def get_container_exec_info() -> Optional[dict]:
if os.environ.get("HERMES_DEV") == "1": if os.environ.get("HERMES_DEV") == "1":
return None return None
if _is_inside_container(): from hermes_constants import is_container
if is_container():
return None return None
container_mode_file = get_hermes_home() / ".container-mode" container_mode_file = get_hermes_home() / ".container-mode"
@ -355,6 +337,10 @@ DEFAULT_CONFIG = {
# threshold before escalating to a full timeout. The warning fires # threshold before escalating to a full timeout. The warning fires
# once per run and does not interrupt the agent. 0 = disable warning. # once per run and does not interrupt the agent. 0 = disable warning.
"gateway_timeout_warning": 900, "gateway_timeout_warning": 900,
# Periodic "still working" notification interval (seconds).
# Sends a status message every N seconds so the user knows the
# agent hasn't died during long tasks. 0 = disable notifications.
"gateway_notify_interval": 600,
}, },
"terminal": { "terminal": {
@ -428,9 +414,7 @@ DEFAULT_CONFIG = {
"threshold": 0.50, # compress when context usage exceeds this ratio "threshold": 0.50, # compress when context usage exceeds this ratio
"target_ratio": 0.20, # fraction of threshold to preserve as recent tail "target_ratio": 0.20, # fraction of threshold to preserve as recent tail
"protect_last_n": 20, # minimum recent messages to keep uncompressed "protect_last_n": 20, # minimum recent messages to keep uncompressed
"summary_model": "", # empty = use main configured model
"summary_provider": "auto",
"summary_base_url": None,
}, },
"smart_model_routing": { "smart_model_routing": {
"enabled": False, "enabled": False,
@ -716,7 +700,7 @@ DEFAULT_CONFIG = {
}, },
# Config schema version - bump this when adding new required fields # Config schema version - bump this when adding new required fields
"_config_version": 16, "_config_version": 17,
} }
# ============================================================================= # =============================================================================
@ -832,6 +816,14 @@ OPTIONAL_ENV_VARS = {
"category": "provider", "category": "provider",
"advanced": True, "advanced": True,
}, },
"KIMI_CN_API_KEY": {
"description": "Kimi / Moonshot China API key",
"prompt": "Kimi (China) API key",
"url": "https://platform.moonshot.cn/",
"password": True,
"category": "provider",
"advanced": True,
},
"MINIMAX_API_KEY": { "MINIMAX_API_KEY": {
"description": "MiniMax API key (international)", "description": "MiniMax API key (international)",
"prompt": "MiniMax API key", "prompt": "MiniMax API key",
@ -1560,6 +1552,136 @@ def get_missing_skill_config_vars() -> List[Dict[str, Any]]:
return missing return missing
def _normalize_custom_provider_entry(
entry: Any,
*,
provider_key: str = "",
) -> Optional[Dict[str, Any]]:
"""Return a runtime-compatible custom provider entry or ``None``."""
if not isinstance(entry, dict):
return None
base_url = ""
for url_key in ("api", "url", "base_url"):
raw_url = entry.get(url_key)
if isinstance(raw_url, str) and raw_url.strip():
base_url = raw_url.strip()
break
if not base_url:
return None
name = ""
raw_name = entry.get("name")
if isinstance(raw_name, str) and raw_name.strip():
name = raw_name.strip()
elif provider_key.strip():
name = provider_key.strip()
if not name:
return None
normalized: Dict[str, Any] = {
"name": name,
"base_url": base_url,
}
provider_key = provider_key.strip()
if provider_key:
normalized["provider_key"] = provider_key
api_key = entry.get("api_key")
if isinstance(api_key, str) and api_key.strip():
normalized["api_key"] = api_key.strip()
key_env = entry.get("key_env")
if isinstance(key_env, str) and key_env.strip():
normalized["key_env"] = key_env.strip()
api_mode = entry.get("api_mode") or entry.get("transport")
if isinstance(api_mode, str) and api_mode.strip():
normalized["api_mode"] = api_mode.strip()
model_name = entry.get("model") or entry.get("default_model")
if isinstance(model_name, str) and model_name.strip():
normalized["model"] = model_name.strip()
models = entry.get("models")
if isinstance(models, dict) and models:
normalized["models"] = models
context_length = entry.get("context_length")
if isinstance(context_length, int) and context_length > 0:
normalized["context_length"] = context_length
rate_limit_delay = entry.get("rate_limit_delay")
if isinstance(rate_limit_delay, (int, float)) and rate_limit_delay >= 0:
normalized["rate_limit_delay"] = rate_limit_delay
return normalized
def providers_dict_to_custom_providers(providers_dict: Any) -> List[Dict[str, Any]]:
"""Normalize ``providers`` config entries into the legacy custom-provider shape."""
if not isinstance(providers_dict, dict):
return []
custom_providers: List[Dict[str, Any]] = []
for key, entry in providers_dict.items():
normalized = _normalize_custom_provider_entry(entry, provider_key=str(key))
if normalized is not None:
custom_providers.append(normalized)
return custom_providers
def get_compatible_custom_providers(
config: Optional[Dict[str, Any]] = None,
) -> List[Dict[str, Any]]:
"""Return a deduplicated custom-provider view across legacy and v12+ config.
``custom_providers`` remains the on-disk legacy format, while ``providers``
is the newer keyed schema. Runtime and picker flows still need a single
list-shaped view, but we should not materialise that compatibility layer
back into config.yaml because it duplicates entries in UIs.
"""
if config is None:
config = load_config()
compatible: List[Dict[str, Any]] = []
seen_provider_keys: set = set()
seen_name_url_pairs: set = set()
def _append_if_new(entry: Optional[Dict[str, Any]]) -> None:
if entry is None:
return
provider_key = str(entry.get("provider_key", "") or "").strip().lower()
name = str(entry.get("name", "") or "").strip().lower()
base_url = str(entry.get("base_url", "") or "").strip().rstrip("/").lower()
pair = (name, base_url)
if provider_key and provider_key in seen_provider_keys:
return
if name and base_url and pair in seen_name_url_pairs:
return
compatible.append(entry)
if provider_key:
seen_provider_keys.add(provider_key)
if name and base_url:
seen_name_url_pairs.add(pair)
custom_providers = config.get("custom_providers")
if custom_providers is not None:
if not isinstance(custom_providers, list):
return []
for entry in custom_providers:
_append_if_new(_normalize_custom_provider_entry(entry))
for entry in providers_dict_to_custom_providers(config.get("providers")):
_append_if_new(entry)
return compatible
def check_config_version() -> Tuple[int, int]: def check_config_version() -> Tuple[int, int]:
""" """
Check config version. Check config version.
@ -1877,8 +1999,8 @@ def migrate_config(interactive: bool = True, quiet: bool = False) -> Dict[str, A
if migrated_count > 0: if migrated_count > 0:
config["providers"] = providers_dict config["providers"] = providers_dict
# Remove the old list # Remove the old list — runtime reads via get_compatible_custom_providers()
del config["custom_providers"] config.pop("custom_providers", None)
save_config(config) save_config(config)
if not quiet: if not quiet:
print(f" ✓ Migrated {migrated_count} custom provider(s) to providers: section") print(f" ✓ Migrated {migrated_count} custom provider(s) to providers: section")
@ -1989,6 +2111,43 @@ def migrate_config(interactive: bool = True, quiet: bool = False) -> Dict[str, A
print(f" ✓ Migrated tool_progress_overrides → display.platforms: {migrated}") print(f" ✓ Migrated tool_progress_overrides → display.platforms: {migrated}")
results["config_added"].append("display.platforms (migrated from tool_progress_overrides)") results["config_added"].append("display.platforms (migrated from tool_progress_overrides)")
# ── Version 16 → 17: remove legacy compression.summary_* keys ──
if current_ver < 17:
config = read_raw_config()
comp = config.get("compression", {})
if isinstance(comp, dict):
s_model = comp.pop("summary_model", None)
s_provider = comp.pop("summary_provider", None)
s_base_url = comp.pop("summary_base_url", None)
migrated_keys = []
# Migrate non-empty, non-default values to auxiliary.compression
if s_model and str(s_model).strip():
aux = config.setdefault("auxiliary", {})
aux_comp = aux.setdefault("compression", {})
if not aux_comp.get("model"):
aux_comp["model"] = str(s_model).strip()
migrated_keys.append(f"model={s_model}")
if s_provider and str(s_provider).strip() not in ("", "auto"):
aux = config.setdefault("auxiliary", {})
aux_comp = aux.setdefault("compression", {})
if not aux_comp.get("provider") or aux_comp.get("provider") == "auto":
aux_comp["provider"] = str(s_provider).strip()
migrated_keys.append(f"provider={s_provider}")
if s_base_url and str(s_base_url).strip():
aux = config.setdefault("auxiliary", {})
aux_comp = aux.setdefault("compression", {})
if not aux_comp.get("base_url"):
aux_comp["base_url"] = str(s_base_url).strip()
migrated_keys.append(f"base_url={s_base_url}")
if migrated_keys or s_model is not None or s_provider is not None or s_base_url is not None:
config["compression"] = comp
save_config(config)
if not quiet:
if migrated_keys:
print(f" ✓ Migrated compression.summary_* → auxiliary.compression: {', '.join(migrated_keys)}")
else:
print(" ✓ Removed unused compression.summary_* keys")
if current_ver < latest_ver and not quiet: if current_ver < latest_ver and not quiet:
print(f"Config version: {current_ver}{latest_ver}") print(f"Config version: {current_ver}{latest_ver}")
@ -2301,6 +2460,7 @@ _FALLBACK_COMMENT = """
# nous (OAuth — hermes auth) — Nous Portal # nous (OAuth — hermes auth) — Nous Portal
# zai (ZAI_API_KEY) — Z.AI / GLM # zai (ZAI_API_KEY) — Z.AI / GLM
# kimi-coding (KIMI_API_KEY) — Kimi / Moonshot # kimi-coding (KIMI_API_KEY) — Kimi / Moonshot
# kimi-coding-cn (KIMI_CN_API_KEY) — Kimi / Moonshot (China)
# minimax (MINIMAX_API_KEY) — MiniMax # minimax (MINIMAX_API_KEY) — MiniMax
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China) # minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
# #
@ -2344,6 +2504,7 @@ _COMMENTED_SECTIONS = """
# nous (OAuth — hermes auth) — Nous Portal # nous (OAuth — hermes auth) — Nous Portal
# zai (ZAI_API_KEY) — Z.AI / GLM # zai (ZAI_API_KEY) — Z.AI / GLM
# kimi-coding (KIMI_API_KEY) — Kimi / Moonshot # kimi-coding (KIMI_API_KEY) — Kimi / Moonshot
# kimi-coding-cn (KIMI_CN_API_KEY) — Kimi / Moonshot (China)
# minimax (MINIMAX_API_KEY) — MiniMax # minimax (MINIMAX_API_KEY) — MiniMax
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China) # minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
# #
@ -2398,7 +2559,13 @@ def save_config(config: Dict[str, Any]):
def load_env() -> Dict[str, str]: def load_env() -> Dict[str, str]:
"""Load environment variables from ~/.hermes/.env.""" """Load environment variables from ~/.hermes/.env.
Sanitizes lines before parsing so that corrupted files (e.g.
concatenated KEY=VALUE pairs on a single line) are handled
gracefully instead of producing mangled values such as duplicated
bot tokens. See #8908.
"""
env_path = get_env_path() env_path = get_env_path()
env_vars = {} env_vars = {}
@ -2407,7 +2574,11 @@ def load_env() -> Dict[str, str]:
# fail on UTF-8 .env files. Use explicit UTF-8 only on Windows. # fail on UTF-8 .env files. Use explicit UTF-8 only on Windows.
open_kw = {"encoding": "utf-8", "errors": "replace"} if _IS_WINDOWS else {} open_kw = {"encoding": "utf-8", "errors": "replace"} if _IS_WINDOWS else {}
with open(env_path, **open_kw) as f: with open(env_path, **open_kw) as f:
for line in f: raw_lines = f.readlines()
# Sanitize before parsing: split concatenated lines & drop stale
# placeholders so corrupted .env files don't produce invalid tokens.
lines = _sanitize_env_lines(raw_lines)
for line in lines:
line = line.strip() line = line.strip()
if line and not line.startswith('#') and '=' in line: if line and not line.startswith('#') and '=' in line:
key, _, value = line.partition('=') key, _, value = line.partition('=')
@ -2417,7 +2588,7 @@ def load_env() -> Dict[str, str]:
def _sanitize_env_lines(lines: list) -> list: def _sanitize_env_lines(lines: list) -> list:
"""Fix corrupted .env lines before writing. """Fix corrupted .env lines before reading or writing.
Handles two known corruption patterns: Handles two known corruption patterns:
1. Concatenated KEY=VALUE pairs on a single line (missing newline between 1. Concatenated KEY=VALUE pairs on a single line (missing newline between
@ -2650,6 +2821,28 @@ def save_env_value_secure(key: str, value: str) -> Dict[str, Any]:
def reload_env() -> int:
"""Re-read ~/.hermes/.env into os.environ. Returns count of vars updated.
Adds/updates vars that changed and removes vars that were deleted from
the .env file (but only vars known to Hermes OPTIONAL_ENV_VARS and
_EXTRA_ENV_KEYS to avoid clobbering unrelated environment).
"""
env_vars = load_env()
known_keys = set(OPTIONAL_ENV_VARS.keys()) | _EXTRA_ENV_KEYS
count = 0
for key, value in env_vars.items():
if os.environ.get(key) != value:
os.environ[key] = value
count += 1
# Remove known Hermes vars that are no longer in .env
for key in known_keys:
if key not in env_vars and key in os.environ:
del os.environ[key]
count += 1
return count
def get_env_value(key: str) -> Optional[str]: def get_env_value(key: str) -> Optional[str]:
"""Get a value from ~/.hermes/.env or environment.""" """Get a value from ~/.hermes/.env or environment."""
# Check environment first # Check environment first
@ -2772,10 +2965,11 @@ def show_config():
print(f" Threshold: {compression.get('threshold', 0.50) * 100:.0f}%") print(f" Threshold: {compression.get('threshold', 0.50) * 100:.0f}%")
print(f" Target ratio: {compression.get('target_ratio', 0.20) * 100:.0f}% of threshold preserved") print(f" Target ratio: {compression.get('target_ratio', 0.20) * 100:.0f}% of threshold preserved")
print(f" Protect last: {compression.get('protect_last_n', 20)} messages") print(f" Protect last: {compression.get('protect_last_n', 20)} messages")
_sm = compression.get('summary_model', '') or '(main model)' _aux_comp = config.get('auxiliary', {}).get('compression', {})
_sm = _aux_comp.get('model', '') or '(auto)'
print(f" Model: {_sm}") print(f" Model: {_sm}")
comp_provider = compression.get('summary_provider', 'auto') comp_provider = _aux_comp.get('provider', 'auto')
if comp_provider != 'auto': if comp_provider and comp_provider != 'auto':
print(f" Provider: {comp_provider}") print(f" Provider: {comp_provider}")
# Auxiliary models # Auxiliary models

View file

@ -117,14 +117,30 @@ def _gh_cli_candidates() -> list[str]:
def _try_gh_cli_token() -> Optional[str]: def _try_gh_cli_token() -> Optional[str]:
"""Return a token from ``gh auth token`` when the GitHub CLI is available.""" """Return a token from ``gh auth token`` when the GitHub CLI is available.
When COPILOT_GH_HOST is set, passes ``--hostname`` so gh returns the
correct host's token. Also strips GITHUB_TOKEN / GH_TOKEN from the
subprocess environment so ``gh`` reads from its own credential store
(hosts.yml) instead of just echoing the env var back.
"""
hostname = os.getenv("COPILOT_GH_HOST", "").strip()
# Build a clean env so gh doesn't short-circuit on GITHUB_TOKEN / GH_TOKEN
clean_env = {k: v for k, v in os.environ.items()
if k not in ("GITHUB_TOKEN", "GH_TOKEN")}
for gh_path in _gh_cli_candidates(): for gh_path in _gh_cli_candidates():
cmd = [gh_path, "auth", "token"]
if hostname:
cmd += ["--hostname", hostname]
try: try:
result = subprocess.run( result = subprocess.run(
[gh_path, "auth", "token"], cmd,
capture_output=True, capture_output=True,
text=True, text=True,
timeout=5, timeout=5,
env=clean_env,
) )
except (FileNotFoundError, subprocess.TimeoutExpired) as exc: except (FileNotFoundError, subprocess.TimeoutExpired) as exc:
logger.debug("gh CLI token lookup failed (%s): %s", gh_path, exc) logger.debug("gh CLI token lookup failed (%s): %s", gh_path, exc)

336
hermes_cli/debug.py Normal file
View file

@ -0,0 +1,336 @@
"""``hermes debug`` — debug tools for Hermes Agent.
Currently supports:
hermes debug share Upload debug report (system info + logs) to a
paste service and print a shareable URL.
"""
import io
import sys
import urllib.error
import urllib.parse
import urllib.request
from pathlib import Path
from typing import Optional
from hermes_constants import get_hermes_home
# ---------------------------------------------------------------------------
# Paste services — try paste.rs first, dpaste.com as fallback.
# ---------------------------------------------------------------------------
_PASTE_RS_URL = "https://paste.rs/"
_DPASTE_COM_URL = "https://dpaste.com/api/"
# Maximum bytes to read from a single log file for upload.
# paste.rs caps at ~1 MB; we stay under that with headroom.
_MAX_LOG_BYTES = 512_000
def _upload_paste_rs(content: str) -> str:
"""Upload to paste.rs. Returns the paste URL.
paste.rs accepts a plain POST body and returns the URL directly.
"""
data = content.encode("utf-8")
req = urllib.request.Request(
_PASTE_RS_URL, data=data, method="POST",
headers={
"Content-Type": "text/plain; charset=utf-8",
"User-Agent": "hermes-agent/debug-share",
},
)
with urllib.request.urlopen(req, timeout=30) as resp:
url = resp.read().decode("utf-8").strip()
if not url.startswith("http"):
raise ValueError(f"Unexpected response from paste.rs: {url[:200]}")
return url
def _upload_dpaste_com(content: str, expiry_days: int = 7) -> str:
"""Upload to dpaste.com. Returns the paste URL.
dpaste.com uses multipart form data.
"""
boundary = "----HermesDebugBoundary9f3c"
def _field(name: str, value: str) -> str:
return (
f"--{boundary}\r\n"
f'Content-Disposition: form-data; name="{name}"\r\n'
f"\r\n"
f"{value}\r\n"
)
body = (
_field("content", content)
+ _field("syntax", "text")
+ _field("expiry_days", str(expiry_days))
+ f"--{boundary}--\r\n"
).encode("utf-8")
req = urllib.request.Request(
_DPASTE_COM_URL, data=body, method="POST",
headers={
"Content-Type": f"multipart/form-data; boundary={boundary}",
"User-Agent": "hermes-agent/debug-share",
},
)
with urllib.request.urlopen(req, timeout=30) as resp:
url = resp.read().decode("utf-8").strip()
if not url.startswith("http"):
raise ValueError(f"Unexpected response from dpaste.com: {url[:200]}")
return url
def upload_to_pastebin(content: str, expiry_days: int = 7) -> str:
"""Upload *content* to a paste service, trying paste.rs then dpaste.com.
Returns the paste URL on success, raises on total failure.
"""
errors: list[str] = []
# Try paste.rs first (simple, fast)
try:
return _upload_paste_rs(content)
except Exception as exc:
errors.append(f"paste.rs: {exc}")
# Fallback: dpaste.com (supports expiry)
try:
return _upload_dpaste_com(content, expiry_days=expiry_days)
except Exception as exc:
errors.append(f"dpaste.com: {exc}")
raise RuntimeError(
"Failed to upload to any paste service:\n " + "\n ".join(errors)
)
# ---------------------------------------------------------------------------
# Log file reading
# ---------------------------------------------------------------------------
def _resolve_log_path(log_name: str) -> Optional[Path]:
"""Find the log file for *log_name*, falling back to the .1 rotation.
Returns the path if found, or None.
"""
from hermes_cli.logs import LOG_FILES
filename = LOG_FILES.get(log_name)
if not filename:
return None
log_dir = get_hermes_home() / "logs"
primary = log_dir / filename
if primary.exists() and primary.stat().st_size > 0:
return primary
# Fall back to the most recent rotated file (.1).
rotated = log_dir / f"{filename}.1"
if rotated.exists() and rotated.stat().st_size > 0:
return rotated
return None
def _read_log_tail(log_name: str, num_lines: int) -> str:
"""Read the last *num_lines* from a log file, or return a placeholder."""
from hermes_cli.logs import _read_last_n_lines
log_path = _resolve_log_path(log_name)
if log_path is None:
return "(file not found)"
try:
lines = _read_last_n_lines(log_path, num_lines)
return "".join(lines).rstrip("\n")
except Exception as exc:
return f"(error reading: {exc})"
def _read_full_log(log_name: str, max_bytes: int = _MAX_LOG_BYTES) -> Optional[str]:
"""Read a log file for standalone upload.
Returns the file content (last *max_bytes* if truncated), or None if the
file doesn't exist or is empty.
"""
log_path = _resolve_log_path(log_name)
if log_path is None:
return None
try:
size = log_path.stat().st_size
if size == 0:
return None
if size <= max_bytes:
return log_path.read_text(encoding="utf-8", errors="replace")
# File is larger than max_bytes — read the tail.
with open(log_path, "rb") as f:
f.seek(size - max_bytes)
# Skip partial line at the seek point.
f.readline()
content = f.read().decode("utf-8", errors="replace")
return f"[... truncated — showing last ~{max_bytes // 1024}KB ...]\n{content}"
except Exception:
return None
# ---------------------------------------------------------------------------
# Debug report collection
# ---------------------------------------------------------------------------
def _capture_dump() -> str:
"""Run ``hermes dump`` and return its stdout as a string."""
from hermes_cli.dump import run_dump
class _FakeArgs:
show_keys = False
old_stdout = sys.stdout
sys.stdout = capture = io.StringIO()
try:
run_dump(_FakeArgs())
except SystemExit:
pass
finally:
sys.stdout = old_stdout
return capture.getvalue()
def collect_debug_report(*, log_lines: int = 200, dump_text: str = "") -> str:
"""Build the summary debug report: system dump + log tails.
Parameters
----------
log_lines
Number of recent lines to include per log file.
dump_text
Pre-captured dump output. If empty, ``hermes dump`` is run
internally.
Returns the report as a plain-text string ready for upload.
"""
buf = io.StringIO()
if not dump_text:
dump_text = _capture_dump()
buf.write(dump_text)
# ── Recent log tails (summary only) ──────────────────────────────────
buf.write("\n\n")
buf.write(f"--- agent.log (last {log_lines} lines) ---\n")
buf.write(_read_log_tail("agent", log_lines))
buf.write("\n\n")
errors_lines = min(log_lines, 100)
buf.write(f"--- errors.log (last {errors_lines} lines) ---\n")
buf.write(_read_log_tail("errors", errors_lines))
buf.write("\n\n")
buf.write(f"--- gateway.log (last {errors_lines} lines) ---\n")
buf.write(_read_log_tail("gateway", errors_lines))
buf.write("\n")
return buf.getvalue()
# ---------------------------------------------------------------------------
# CLI entry points
# ---------------------------------------------------------------------------
def run_debug_share(args):
"""Collect debug report + full logs, upload each, print URLs."""
log_lines = getattr(args, "lines", 200)
expiry = getattr(args, "expire", 7)
local_only = getattr(args, "local", False)
print("Collecting debug report...")
# Capture dump once — prepended to every paste for context.
dump_text = _capture_dump()
report = collect_debug_report(log_lines=log_lines, dump_text=dump_text)
agent_log = _read_full_log("agent")
gateway_log = _read_full_log("gateway")
# Prepend dump header to each full log so every paste is self-contained.
if agent_log:
agent_log = dump_text + "\n\n--- full agent.log ---\n" + agent_log
if gateway_log:
gateway_log = dump_text + "\n\n--- full gateway.log ---\n" + gateway_log
if local_only:
print(report)
if agent_log:
print(f"\n\n{'=' * 60}")
print("FULL agent.log")
print(f"{'=' * 60}\n")
print(agent_log)
if gateway_log:
print(f"\n\n{'=' * 60}")
print("FULL gateway.log")
print(f"{'=' * 60}\n")
print(gateway_log)
return
print("Uploading...")
urls: dict[str, str] = {}
failures: list[str] = []
# 1. Summary report (required)
try:
urls["Report"] = upload_to_pastebin(report, expiry_days=expiry)
except RuntimeError as exc:
print(f"\nUpload failed: {exc}", file=sys.stderr)
print("\nFull report printed below — copy-paste it manually:\n")
print(report)
sys.exit(1)
# 2. Full agent.log (optional)
if agent_log:
try:
urls["agent.log"] = upload_to_pastebin(agent_log, expiry_days=expiry)
except Exception as exc:
failures.append(f"agent.log: {exc}")
# 3. Full gateway.log (optional)
if gateway_log:
try:
urls["gateway.log"] = upload_to_pastebin(gateway_log, expiry_days=expiry)
except Exception as exc:
failures.append(f"gateway.log: {exc}")
# Print results
label_width = max(len(k) for k in urls)
print(f"\nDebug report uploaded:")
for label, url in urls.items():
print(f" {label:<{label_width}} {url}")
if failures:
print(f"\n (failed to upload: {', '.join(failures)})")
print(f"\nShare these links with the Hermes team for support.")
def run_debug(args):
"""Route debug subcommands."""
subcmd = getattr(args, "debug_command", None)
if subcmd == "share":
run_debug_share(args)
else:
# Default: show help
print("Usage: hermes debug share [--lines N] [--expire N] [--local]")
print()
print("Commands:")
print(" share Upload debug report to a paste service and print URL")
print()
print("Options:")
print(" --lines N Number of log lines to include (default: 200)")
print(" --expire N Paste expiry in days (default: 7)")
print(" --local Print report locally instead of uploading")

View file

@ -721,6 +721,7 @@ def run_doctor(args):
_apikey_providers = [ _apikey_providers = [
("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True), ("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True),
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True), ("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True),
("Kimi / Moonshot (China)", ("KIMI_CN_API_KEY",), "https://api.moonshot.cn/v1/models", None, True),
("DeepSeek", ("DEEPSEEK_API_KEY",), "https://api.deepseek.com/v1/models", "DEEPSEEK_BASE_URL", True), ("DeepSeek", ("DEEPSEEK_API_KEY",), "https://api.deepseek.com/v1/models", "DEEPSEEK_BASE_URL", True),
("Hugging Face", ("HF_TOKEN",), "https://router.huggingface.co/v1/models", "HF_BASE_URL", True), ("Hugging Face", ("HF_TOKEN",), "https://router.huggingface.co/v1/models", "HF_BASE_URL", True),
("Alibaba/DashScope", ("DASHSCOPE_API_KEY",), "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", "DASHSCOPE_BASE_URL", True), ("Alibaba/DashScope", ("DASHSCOPE_API_KEY",), "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", "DASHSCOPE_BASE_URL", True),

View file

@ -44,6 +44,16 @@ def _redact(value: str) -> str:
def _gateway_status() -> str: def _gateway_status() -> str:
"""Return a short gateway status string.""" """Return a short gateway status string."""
if sys.platform.startswith("linux"): if sys.platform.startswith("linux"):
from hermes_constants import is_container
if is_container():
try:
from hermes_cli.gateway import find_gateway_pids
pids = find_gateway_pids()
if pids:
return f"running (docker, pid {pids[0]})"
return "stopped (docker)"
except Exception:
return "stopped (docker)"
try: try:
from hermes_cli.gateway import get_service_name from hermes_cli.gateway import get_service_name
svc = get_service_name() svc = get_service_name()

View file

@ -15,6 +15,51 @@ def _load_dotenv_with_fallback(path: Path, *, override: bool) -> None:
load_dotenv(dotenv_path=path, override=override, encoding="latin-1") load_dotenv(dotenv_path=path, override=override, encoding="latin-1")
def _sanitize_env_file_if_needed(path: Path) -> None:
"""Pre-sanitize a .env file before python-dotenv reads it.
python-dotenv does not handle corrupted lines where multiple
KEY=VALUE pairs are concatenated on a single line (missing newline).
This produces mangled values e.g. a bot token duplicated 8×
(see #8908).
We delegate to ``hermes_cli.config._sanitize_env_lines`` which
already knows all valid Hermes env-var names and can split
concatenated lines correctly.
"""
if not path.exists():
return
try:
from hermes_cli.config import _sanitize_env_lines
except ImportError:
return # early bootstrap — config module not available yet
read_kw = {"encoding": "utf-8", "errors": "replace"}
try:
with open(path, **read_kw) as f:
original = f.readlines()
sanitized = _sanitize_env_lines(original)
if sanitized != original:
import tempfile
fd, tmp = tempfile.mkstemp(
dir=str(path.parent), suffix=".tmp", prefix=".env_"
)
try:
with os.fdopen(fd, "w", encoding="utf-8") as f:
f.writelines(sanitized)
f.flush()
os.fsync(f.fileno())
os.replace(tmp, path)
except BaseException:
try:
os.unlink(tmp)
except OSError:
pass
raise
except Exception:
pass # best-effort — don't block gateway startup
def load_hermes_dotenv( def load_hermes_dotenv(
*, *,
hermes_home: str | os.PathLike | None = None, hermes_home: str | os.PathLike | None = None,
@ -34,6 +79,10 @@ def load_hermes_dotenv(
user_env = home_path / ".env" user_env = home_path / ".env"
project_env_path = Path(project_env) if project_env else None project_env_path = Path(project_env) if project_env else None
# Fix corrupted .env files before python-dotenv parses them (#8908).
if user_env.exists():
_sanitize_env_file_if_needed(user_env)
if user_env.exists(): if user_env.exists():
_load_dotenv_with_fallback(user_env, override=True) _load_dotenv_with_fallback(user_env, override=True)
loaded.append(user_env) loaded.append(user_env)

View file

@ -331,7 +331,7 @@ def is_linux() -> bool:
return sys.platform.startswith('linux') return sys.platform.startswith('linux')
from hermes_constants import is_termux, is_wsl from hermes_constants import is_container, is_termux, is_wsl
def _wsl_systemd_operational() -> bool: def _wsl_systemd_operational() -> bool:
@ -353,7 +353,9 @@ def _wsl_systemd_operational() -> bool:
def supports_systemd_services() -> bool: def supports_systemd_services() -> bool:
if not is_linux() or is_termux(): if not is_linux() or is_termux() or is_container():
return False
if shutil.which("systemctl") is None:
return False return False
if is_wsl(): if is_wsl():
return _wsl_systemd_operational() return _wsl_systemd_operational()
@ -483,6 +485,21 @@ def _journalctl_cmd(system: bool = False) -> list[str]:
return ["journalctl"] if system else ["journalctl", "--user"] return ["journalctl"] if system else ["journalctl", "--user"]
def _run_systemctl(args: list[str], *, system: bool = False, **kwargs) -> subprocess.CompletedProcess:
"""Run a systemctl command, raising RuntimeError if systemctl is missing.
Defense-in-depth: callers are gated by ``supports_systemd_services()``,
but this ensures any future caller that bypasses the gate still gets a
clear error instead of a raw ``FileNotFoundError`` traceback.
"""
try:
return subprocess.run(_systemctl_cmd(system) + args, **kwargs)
except FileNotFoundError:
raise RuntimeError(
"systemctl is not available on this system"
) from None
def _service_scope_label(system: bool = False) -> str: def _service_scope_label(system: bool = False) -> str:
return "system" if system else "user" return "system" if system else "user"
@ -751,14 +768,22 @@ def _remap_path_for_user(path: str, target_home_dir: str) -> str:
/root/.hermes/hermes-agent -> /home/alice/.hermes/hermes-agent /root/.hermes/hermes-agent -> /home/alice/.hermes/hermes-agent
/opt/hermes -> /opt/hermes (kept as-is) /opt/hermes -> /opt/hermes (kept as-is)
Note: this function intentionally does NOT resolve symlinks. A venv's
``bin/python`` is typically a symlink to the base interpreter (e.g. a
uv-managed CPython at ``~/.local/share/uv/python/.../python3.11``);
resolving that symlink swaps the unit's ``ExecStart`` to a bare Python
that has none of the venv's site-packages, so the service crashes on
the first ``import``. Keep the symlinked path so the venv activates
its own environment. Lexical expansion only via ``expanduser``.
""" """
current_home = Path.home().resolve() current_home = Path.home()
resolved = Path(path).resolve() p = Path(path).expanduser()
try: try:
relative = resolved.relative_to(current_home) relative = p.relative_to(current_home)
return str(Path(target_home_dir) / relative) return str(Path(target_home_dir) / relative)
except ValueError: except ValueError:
return str(resolved) return str(p)
def _hermes_home_for_target_user(target_home_dir: str) -> str: def _hermes_home_for_target_user(target_home_dir: str) -> str:
@ -929,7 +954,7 @@ def refresh_systemd_unit_if_needed(system: bool = False) -> bool:
expected_user = _read_systemd_user_from_unit(unit_path) if system else None expected_user = _read_systemd_user_from_unit(unit_path) if system else None
unit_path.write_text(generate_systemd_unit(system=system, run_as_user=expected_user), encoding="utf-8") unit_path.write_text(generate_systemd_unit(system=system, run_as_user=expected_user), encoding="utf-8")
subprocess.run(_systemctl_cmd(system) + ["daemon-reload"], check=True, timeout=30) _run_systemctl(["daemon-reload"], system=system, check=True, timeout=30)
print(f"↻ Updated gateway {_service_scope_label(system)} service definition to match the current Hermes install") print(f"↻ Updated gateway {_service_scope_label(system)} service definition to match the current Hermes install")
return True return True
@ -1025,7 +1050,7 @@ def systemd_install(force: bool = False, system: bool = False, run_as_user: str
if not systemd_unit_is_current(system=system): if not systemd_unit_is_current(system=system):
print(f"↻ Repairing outdated {_service_scope_label(system)} systemd service at: {unit_path}") print(f"↻ Repairing outdated {_service_scope_label(system)} systemd service at: {unit_path}")
refresh_systemd_unit_if_needed(system=system) refresh_systemd_unit_if_needed(system=system)
subprocess.run(_systemctl_cmd(system) + ["enable", get_service_name()], check=True, timeout=30) _run_systemctl(["enable", get_service_name()], system=system, check=True, timeout=30)
print(f"{_service_scope_label(system).capitalize()} service definition updated") print(f"{_service_scope_label(system).capitalize()} service definition updated")
return return
print(f"Service already installed at: {unit_path}") print(f"Service already installed at: {unit_path}")
@ -1036,8 +1061,8 @@ def systemd_install(force: bool = False, system: bool = False, run_as_user: str
print(f"Installing {_service_scope_label(system)} systemd service to: {unit_path}") print(f"Installing {_service_scope_label(system)} systemd service to: {unit_path}")
unit_path.write_text(generate_systemd_unit(system=system, run_as_user=run_as_user), encoding="utf-8") unit_path.write_text(generate_systemd_unit(system=system, run_as_user=run_as_user), encoding="utf-8")
subprocess.run(_systemctl_cmd(system) + ["daemon-reload"], check=True, timeout=30) _run_systemctl(["daemon-reload"], system=system, check=True, timeout=30)
subprocess.run(_systemctl_cmd(system) + ["enable", get_service_name()], check=True, timeout=30) _run_systemctl(["enable", get_service_name()], system=system, check=True, timeout=30)
print() print()
print(f"{_service_scope_label(system).capitalize()} service installed and enabled!") print(f"{_service_scope_label(system).capitalize()} service installed and enabled!")
@ -1063,15 +1088,15 @@ def systemd_uninstall(system: bool = False):
if system: if system:
_require_root_for_system_service("uninstall") _require_root_for_system_service("uninstall")
subprocess.run(_systemctl_cmd(system) + ["stop", get_service_name()], check=False, timeout=90) _run_systemctl(["stop", get_service_name()], system=system, check=False, timeout=90)
subprocess.run(_systemctl_cmd(system) + ["disable", get_service_name()], check=False, timeout=30) _run_systemctl(["disable", get_service_name()], system=system, check=False, timeout=30)
unit_path = get_systemd_unit_path(system=system) unit_path = get_systemd_unit_path(system=system)
if unit_path.exists(): if unit_path.exists():
unit_path.unlink() unit_path.unlink()
print(f"✓ Removed {unit_path}") print(f"✓ Removed {unit_path}")
subprocess.run(_systemctl_cmd(system) + ["daemon-reload"], check=True, timeout=30) _run_systemctl(["daemon-reload"], system=system, check=True, timeout=30)
print(f"{_service_scope_label(system).capitalize()} service uninstalled") print(f"{_service_scope_label(system).capitalize()} service uninstalled")
@ -1080,7 +1105,7 @@ def systemd_start(system: bool = False):
if system: if system:
_require_root_for_system_service("start") _require_root_for_system_service("start")
refresh_systemd_unit_if_needed(system=system) refresh_systemd_unit_if_needed(system=system)
subprocess.run(_systemctl_cmd(system) + ["start", get_service_name()], check=True, timeout=30) _run_systemctl(["start", get_service_name()], system=system, check=True, timeout=30)
print(f"{_service_scope_label(system).capitalize()} service started") print(f"{_service_scope_label(system).capitalize()} service started")
@ -1089,7 +1114,7 @@ def systemd_stop(system: bool = False):
system = _select_systemd_scope(system) system = _select_systemd_scope(system)
if system: if system:
_require_root_for_system_service("stop") _require_root_for_system_service("stop")
subprocess.run(_systemctl_cmd(system) + ["stop", get_service_name()], check=True, timeout=90) _run_systemctl(["stop", get_service_name()], system=system, check=True, timeout=90)
print(f"{_service_scope_label(system).capitalize()} service stopped") print(f"{_service_scope_label(system).capitalize()} service stopped")
@ -1105,7 +1130,7 @@ def systemd_restart(system: bool = False):
if pid is not None and _request_gateway_self_restart(pid): if pid is not None and _request_gateway_self_restart(pid):
print(f"{_service_scope_label(system).capitalize()} service restart requested") print(f"{_service_scope_label(system).capitalize()} service restart requested")
return return
subprocess.run(_systemctl_cmd(system) + ["reload-or-restart", get_service_name()], check=True, timeout=90) _run_systemctl(["reload-or-restart", get_service_name()], system=system, check=True, timeout=90)
print(f"{_service_scope_label(system).capitalize()} service restarted") print(f"{_service_scope_label(system).capitalize()} service restarted")
@ -1129,14 +1154,16 @@ def systemd_status(deep: bool = False, system: bool = False):
print(f" Run: {'sudo ' if system else ''}hermes gateway restart{scope_flag} # auto-refreshes the unit") print(f" Run: {'sudo ' if system else ''}hermes gateway restart{scope_flag} # auto-refreshes the unit")
print() print()
subprocess.run( _run_systemctl(
_systemctl_cmd(system) + ["status", get_service_name(), "--no-pager"], ["status", get_service_name(), "--no-pager"],
system=system,
capture_output=False, capture_output=False,
timeout=10, timeout=10,
) )
result = subprocess.run( result = _run_systemctl(
_systemctl_cmd(system) + ["is-active", get_service_name()], ["is-active", get_service_name()],
system=system,
capture_output=True, capture_output=True,
text=True, text=True,
timeout=10, timeout=10,
@ -2100,12 +2127,6 @@ def _setup_dingtalk():
_setup_standard_platform(dingtalk_platform) _setup_standard_platform(dingtalk_platform)
def _setup_feishu():
"""Configure Feishu / Lark via the standard platform setup."""
feishu_platform = next(p for p in _PLATFORMS if p["key"] == "feishu")
_setup_standard_platform(feishu_platform)
def _setup_wecom(): def _setup_wecom():
"""Configure WeCom (Enterprise WeChat) via the standard platform setup.""" """Configure WeCom (Enterprise WeChat) via the standard platform setup."""
wecom_platform = next(p for p in _PLATFORMS if p["key"] == "wecom") wecom_platform = next(p for p in _PLATFORMS if p["key"] == "wecom")
@ -2129,24 +2150,24 @@ def _is_service_running() -> bool:
if user_unit_exists: if user_unit_exists:
try: try:
result = subprocess.run( result = _run_systemctl(
_systemctl_cmd(False) + ["is-active", get_service_name()], ["is-active", get_service_name()],
capture_output=True, text=True, timeout=10, system=False, capture_output=True, text=True, timeout=10,
) )
if result.stdout.strip() == "active": if result.stdout.strip() == "active":
return True return True
except subprocess.TimeoutExpired: except (RuntimeError, subprocess.TimeoutExpired):
pass pass
if system_unit_exists: if system_unit_exists:
try: try:
result = subprocess.run( result = _run_systemctl(
_systemctl_cmd(True) + ["is-active", get_service_name()], ["is-active", get_service_name()],
capture_output=True, text=True, timeout=10, system=True, capture_output=True, text=True, timeout=10,
) )
if result.stdout.strip() == "active": if result.stdout.strip() == "active":
return True return True
except subprocess.TimeoutExpired: except (RuntimeError, subprocess.TimeoutExpired):
pass pass
return False return False
@ -2290,6 +2311,178 @@ def _setup_weixin():
print_info(f" User ID: {user_id}") print_info(f" User ID: {user_id}")
def _setup_feishu():
"""Interactive setup for Feishu / Lark — scan-to-create or manual credentials."""
print()
print(color(" ─── 🪽 Feishu / Lark Setup ───", Colors.CYAN))
existing_app_id = get_env_value("FEISHU_APP_ID")
existing_secret = get_env_value("FEISHU_APP_SECRET")
if existing_app_id and existing_secret:
print()
print_success("Feishu / Lark is already configured.")
if not prompt_yes_no(" Reconfigure Feishu / Lark?", False):
return
# ── Choose setup method ──
print()
method_choices = [
"Scan QR code to create a new bot automatically (recommended)",
"Enter existing App ID and App Secret manually",
]
method_idx = prompt_choice(" How would you like to set up Feishu / Lark?", method_choices, 0)
credentials = None
used_qr = False
if method_idx == 0:
# ── QR scan-to-create ──
try:
from gateway.platforms.feishu import qr_register
except Exception as exc:
print_error(f" Feishu / Lark onboard import failed: {exc}")
qr_register = None
if qr_register is not None:
try:
credentials = qr_register()
except KeyboardInterrupt:
print()
print_warning(" Feishu / Lark setup cancelled.")
return
except Exception as exc:
print_warning(f" QR registration failed: {exc}")
if credentials:
used_qr = True
if not credentials:
print_info(" QR setup did not complete. Continuing with manual input.")
# ── Manual credential input ──
if not credentials:
print()
print_info(" Go to https://open.feishu.cn/ (or https://open.larksuite.com/ for Lark)")
print_info(" Create an app, enable the Bot capability, and copy the credentials.")
print()
app_id = prompt(" App ID", password=False)
if not app_id:
print_warning(" Skipped — Feishu / Lark won't work without an App ID.")
return
app_secret = prompt(" App Secret", password=True)
if not app_secret:
print_warning(" Skipped — Feishu / Lark won't work without an App Secret.")
return
domain_choices = ["feishu (China)", "lark (International)"]
domain_idx = prompt_choice(" Domain", domain_choices, 0)
domain = "lark" if domain_idx == 1 else "feishu"
# Try to probe the bot with manual credentials
bot_name = None
try:
from gateway.platforms.feishu import probe_bot
bot_info = probe_bot(app_id, app_secret, domain)
if bot_info:
bot_name = bot_info.get("bot_name")
print_success(f" Credentials verified — bot: {bot_name or 'unnamed'}")
else:
print_warning(" Could not verify bot connection. Credentials saved anyway.")
except Exception as exc:
print_warning(f" Credential verification skipped: {exc}")
credentials = {
"app_id": app_id,
"app_secret": app_secret,
"domain": domain,
"open_id": None,
"bot_name": bot_name,
}
# ── Save core credentials ──
app_id = credentials["app_id"]
app_secret = credentials["app_secret"]
domain = credentials.get("domain", "feishu")
open_id = credentials.get("open_id")
bot_name = credentials.get("bot_name")
save_env_value("FEISHU_APP_ID", app_id)
save_env_value("FEISHU_APP_SECRET", app_secret)
save_env_value("FEISHU_DOMAIN", domain)
# Bot identity is resolved at runtime via _hydrate_bot_identity().
# ── Connection mode ──
if used_qr:
connection_mode = "websocket"
else:
print()
mode_choices = [
"WebSocket (recommended — no public URL needed)",
"Webhook (requires a reachable HTTP endpoint)",
]
mode_idx = prompt_choice(" Connection mode", mode_choices, 0)
connection_mode = "webhook" if mode_idx == 1 else "websocket"
if connection_mode == "webhook":
print_info(" Webhook defaults: 127.0.0.1:8765/feishu/webhook")
print_info(" Override with FEISHU_WEBHOOK_HOST / FEISHU_WEBHOOK_PORT / FEISHU_WEBHOOK_PATH")
print_info(" For signature verification, set FEISHU_ENCRYPT_KEY and FEISHU_VERIFICATION_TOKEN")
save_env_value("FEISHU_CONNECTION_MODE", connection_mode)
if bot_name:
print()
print_success(f" Bot created: {bot_name}")
# ── DM security policy ──
print()
access_choices = [
"Use DM pairing approval (recommended)",
"Allow all direct messages",
"Only allow listed user IDs",
]
access_idx = prompt_choice(" How should direct messages be authorized?", access_choices, 0)
if access_idx == 0:
save_env_value("FEISHU_ALLOW_ALL_USERS", "false")
save_env_value("FEISHU_ALLOWED_USERS", "")
print_success(" DM pairing enabled.")
print_info(" Unknown users can request access; approve with `hermes pairing approve`.")
elif access_idx == 1:
save_env_value("FEISHU_ALLOW_ALL_USERS", "true")
save_env_value("FEISHU_ALLOWED_USERS", "")
print_warning(" Open DM access enabled for Feishu / Lark.")
else:
save_env_value("FEISHU_ALLOW_ALL_USERS", "false")
default_allow = open_id or ""
allowlist = prompt(" Allowed user IDs (comma-separated)", default_allow, password=False).replace(" ", "")
save_env_value("FEISHU_ALLOWED_USERS", allowlist)
print_success(" Allowlist saved.")
# ── Group policy ──
print()
group_choices = [
"Respond only when @mentioned in groups (recommended)",
"Disable group chats",
]
group_idx = prompt_choice(" How should group chats be handled?", group_choices, 0)
if group_idx == 0:
save_env_value("FEISHU_GROUP_POLICY", "open")
print_info(" Group chats enabled (bot must be @mentioned).")
else:
save_env_value("FEISHU_GROUP_POLICY", "disabled")
print_info(" Group chats disabled.")
# ── Home channel ──
print()
home_channel = prompt(" Home chat ID (optional, for cron/notifications)", password=False)
if home_channel:
save_env_value("FEISHU_HOME_CHANNEL", home_channel)
print_success(f" Home channel set to {home_channel}")
print()
print_success("🪽 Feishu / Lark configured!")
print_info(f" App ID: {app_id}")
print_info(f" Domain: {domain}")
if bot_name:
print_info(f" Bot: {bot_name}")
def _setup_signal(): def _setup_signal():
"""Interactive setup for Signal messenger.""" """Interactive setup for Signal messenger."""
import shutil import shutil
@ -2467,6 +2660,8 @@ def gateway_setup():
_setup_signal() _setup_signal()
elif platform["key"] == "weixin": elif platform["key"] == "weixin":
_setup_weixin() _setup_weixin()
elif platform["key"] == "feishu":
_setup_feishu()
else: else:
_setup_standard_platform(platform) _setup_standard_platform(platform)
@ -2606,6 +2801,15 @@ def gateway_command(args):
print(" tmux new -s hermes 'hermes gateway run' # persistent via tmux") print(" tmux new -s hermes 'hermes gateway run' # persistent via tmux")
print(" nohup hermes gateway run > ~/.hermes/logs/gateway.log 2>&1 & # background") print(" nohup hermes gateway run > ~/.hermes/logs/gateway.log 2>&1 & # background")
sys.exit(1) sys.exit(1)
elif is_container():
print("Service installation is not needed inside a Docker container.")
print("The container runtime is your service manager — use Docker restart policies instead:")
print()
print(" docker run --restart unless-stopped ... # auto-restart on crash/reboot")
print(" docker restart <container> # manual restart")
print()
print("To run the gateway: hermes gateway run")
sys.exit(0)
else: else:
print("Service installation not supported on this platform.") print("Service installation not supported on this platform.")
print("Run manually: hermes gateway run") print("Run manually: hermes gateway run")
@ -2624,6 +2828,13 @@ def gateway_command(args):
systemd_uninstall(system=system) systemd_uninstall(system=system)
elif is_macos(): elif is_macos():
launchd_uninstall() launchd_uninstall()
elif is_container():
print("Service uninstall is not applicable inside a Docker container.")
print("To stop the gateway, stop or remove the container:")
print()
print(" docker stop <container>")
print(" docker rm <container>")
sys.exit(0)
else: else:
print("Not supported on this platform.") print("Not supported on this platform.")
sys.exit(1) sys.exit(1)
@ -2648,6 +2859,15 @@ def gateway_command(args):
print() print()
print("To enable systemd: add systemd=true to /etc/wsl.conf and run 'wsl --shutdown' from PowerShell.") print("To enable systemd: add systemd=true to /etc/wsl.conf and run 'wsl --shutdown' from PowerShell.")
sys.exit(1) sys.exit(1)
elif is_container():
print("Service start is not applicable inside a Docker container.")
print("The gateway runs as the container's main process.")
print()
print(" docker start <container> # start a stopped container")
print(" docker restart <container> # restart a running container")
print()
print("Or run the gateway directly: hermes gateway run")
sys.exit(0)
else: else:
print("Not supported on this platform.") print("Not supported on this platform.")
sys.exit(1) sys.exit(1)

View file

@ -1213,7 +1213,7 @@ def select_provider_and_model(args=None):
from hermes_cli.auth import ( from hermes_cli.auth import (
resolve_provider, AuthError, format_auth_error, resolve_provider, AuthError, format_auth_error,
) )
from hermes_cli.config import load_config, get_env_value from hermes_cli.config import get_compatible_custom_providers, load_config, get_env_value
config = load_config() config = load_config()
current_model = config.get("model") current_model = config.get("model")
@ -1248,28 +1248,9 @@ def select_provider_and_model(args=None):
if active == "openrouter" and get_env_value("OPENAI_BASE_URL"): if active == "openrouter" and get_env_value("OPENAI_BASE_URL"):
active = "custom" active = "custom"
provider_labels = { from hermes_cli.models import CANONICAL_PROVIDERS, _PROVIDER_LABELS
"openrouter": "OpenRouter",
"nous": "Nous Portal", provider_labels = dict(_PROVIDER_LABELS) # derive from canonical list
"openai-codex": "OpenAI Codex",
"qwen-oauth": "Qwen OAuth",
"copilot-acp": "GitHub Copilot ACP",
"copilot": "GitHub Copilot",
"anthropic": "Anthropic",
"gemini": "Google AI Studio",
"zai": "Z.AI / GLM",
"kimi-coding": "Kimi / Moonshot",
"minimax": "MiniMax",
"minimax-cn": "MiniMax (China)",
"opencode-zen": "OpenCode Zen",
"opencode-go": "OpenCode Go",
"ai-gateway": "AI Gateway",
"kilocode": "Kilo Code",
"alibaba": "Alibaba Cloud (DashScope)",
"huggingface": "Hugging Face",
"xiaomi": "Xiaomi MiMo",
"custom": "Custom endpoint",
}
active_label = provider_labels.get(active, active) if active else "none" active_label = provider_labels.get(active, active) if active else "none"
print() print()
@ -1278,37 +1259,13 @@ def select_provider_and_model(args=None):
print() print()
# Step 1: Provider selection — top providers shown first, rest behind "More..." # Step 1: Provider selection — top providers shown first, rest behind "More..."
top_providers = [ # Derived from CANONICAL_PROVIDERS (single source of truth)
("nous", "Nous Portal (Nous Research subscription)"), top_providers = [(p.slug, p.tui_desc) for p in CANONICAL_PROVIDERS if p.tier == "top"]
("openrouter", "OpenRouter (100+ models, pay-per-use)"), extended_providers = [(p.slug, p.tui_desc) for p in CANONICAL_PROVIDERS if p.tier == "extended"]
("anthropic", "Anthropic (Claude models — API key or Claude Code)"),
("openai-codex", "OpenAI Codex"),
("qwen-oauth", "Qwen OAuth (reuses local Qwen CLI login)"),
("copilot", "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"),
("huggingface", "Hugging Face Inference Providers (20+ open models)"),
]
extended_providers = [
("copilot-acp", "GitHub Copilot ACP (spawns `copilot --acp --stdio`)"),
("gemini", "Google AI Studio (Gemini models — OpenAI-compatible endpoint)"),
("zai", "Z.AI / GLM (Zhipu AI direct API)"),
("kimi-coding", "Kimi / Moonshot (Moonshot AI direct API)"),
("minimax", "MiniMax (global direct API)"),
("minimax-cn", "MiniMax China (domestic direct API)"),
("kilocode", "Kilo Code (Kilo Gateway API)"),
("opencode-zen", "OpenCode Zen (35+ curated models, pay-as-you-go)"),
("opencode-go", "OpenCode Go (open models, $10/month subscription)"),
("ai-gateway", "AI Gateway (Vercel — 200+ models, pay-per-use)"),
("alibaba", "Alibaba Cloud / DashScope Coding (Qwen + multi-provider)"),
("xiaomi", "Xiaomi MiMo (MiMo-V2 models — pro, omni, flash)"),
]
def _named_custom_provider_map(cfg) -> dict[str, dict[str, str]]: def _named_custom_provider_map(cfg) -> dict[str, dict[str, str]]:
custom_providers_cfg = cfg.get("custom_providers") or []
custom_provider_map = {} custom_provider_map = {}
if not isinstance(custom_providers_cfg, list): for entry in get_compatible_custom_providers(cfg):
return custom_provider_map
for entry in custom_providers_cfg:
if not isinstance(entry, dict): if not isinstance(entry, dict):
continue continue
name = (entry.get("name") or "").strip() name = (entry.get("name") or "").strip()
@ -1316,12 +1273,20 @@ def select_provider_and_model(args=None):
if not name or not base_url: if not name or not base_url:
continue continue
key = "custom:" + name.lower().replace(" ", "-") key = "custom:" + name.lower().replace(" ", "-")
provider_key = (entry.get("provider_key") or "").strip()
if provider_key:
try:
resolve_provider(provider_key)
except AuthError:
key = provider_key
custom_provider_map[key] = { custom_provider_map[key] = {
"name": name, "name": name,
"base_url": base_url, "base_url": base_url,
"api_key": entry.get("api_key", ""), "api_key": entry.get("api_key", ""),
"key_env": entry.get("key_env", ""),
"model": entry.get("model", ""), "model": entry.get("model", ""),
"api_mode": entry.get("api_mode", ""), "api_mode": entry.get("api_mode", ""),
"provider_key": provider_key,
} }
return custom_provider_map return custom_provider_map
@ -1371,7 +1336,8 @@ def select_provider_and_model(args=None):
if selected_provider == "more": if selected_provider == "more":
ext_ordered = list(extended_providers) ext_ordered = list(extended_providers)
ext_ordered.append(("custom", "Custom endpoint (enter URL manually)")) ext_ordered.append(("custom", "Custom endpoint (enter URL manually)"))
if _custom_provider_map: _has_saved_custom_list = isinstance(config.get("custom_providers"), list) and bool(config.get("custom_providers"))
if _has_saved_custom_list:
ext_ordered.append(("remove-custom", "Remove a saved custom provider")) ext_ordered.append(("remove-custom", "Remove a saved custom provider"))
ext_ordered.append(("cancel", "Cancel")) ext_ordered.append(("cancel", "Cancel"))
@ -1398,7 +1364,7 @@ def select_provider_and_model(args=None):
_model_flow_copilot(config, current_model) _model_flow_copilot(config, current_model)
elif selected_provider == "custom": elif selected_provider == "custom":
_model_flow_custom(config) _model_flow_custom(config)
elif selected_provider.startswith("custom:"): elif selected_provider.startswith("custom:") or selected_provider in _custom_provider_map:
provider_info = _named_custom_provider_map(load_config()).get(selected_provider) provider_info = _named_custom_provider_map(load_config()).get(selected_provider)
if provider_info is None: if provider_info is None:
print( print(
@ -1413,7 +1379,7 @@ def select_provider_and_model(args=None):
_model_flow_anthropic(config, current_model) _model_flow_anthropic(config, current_model)
elif selected_provider == "kimi-coding": elif selected_provider == "kimi-coding":
_model_flow_kimi(config, current_model) _model_flow_kimi(config, current_model)
elif selected_provider in ("gemini", "zai", "minimax", "minimax-cn", "kilocode", "opencode-zen", "opencode-go", "ai-gateway", "alibaba", "huggingface", "xiaomi"): elif selected_provider in ("gemini", "deepseek", "xai", "zai", "kimi-coding-cn", "minimax", "minimax-cn", "kilocode", "opencode-zen", "opencode-go", "ai-gateway", "alibaba", "huggingface", "xiaomi"):
_model_flow_api_key_provider(config, selected_provider, current_model) _model_flow_api_key_provider(config, selected_provider, current_model)
# ── Post-switch cleanup: clear stale OPENAI_BASE_URL ────────────── # ── Post-switch cleanup: clear stale OPENAI_BASE_URL ──────────────
@ -2083,7 +2049,9 @@ def _model_flow_named_custom(config, provider_info):
name = provider_info["name"] name = provider_info["name"]
base_url = provider_info["base_url"] base_url = provider_info["base_url"]
api_key = provider_info.get("api_key", "") api_key = provider_info.get("api_key", "")
key_env = provider_info.get("key_env", "")
saved_model = provider_info.get("model", "") saved_model = provider_info.get("model", "")
provider_key = (provider_info.get("provider_key") or "").strip()
print(f" Provider: {name}") print(f" Provider: {name}")
print(f" URL: {base_url}") print(f" URL: {base_url}")
@ -2166,6 +2134,11 @@ def _model_flow_named_custom(config, provider_info):
if not isinstance(model, dict): if not isinstance(model, dict):
model = {"default": model} if model else {} model = {"default": model} if model else {}
cfg["model"] = model cfg["model"] = model
if provider_key:
model["provider"] = provider_key
model.pop("base_url", None)
model.pop("api_key", None)
else:
model["provider"] = "custom" model["provider"] = "custom"
model["base_url"] = base_url model["base_url"] = base_url
if api_key: if api_key:
@ -2179,6 +2152,21 @@ def _model_flow_named_custom(config, provider_info):
save_config(cfg) save_config(cfg)
deactivate_provider() deactivate_provider()
# Persist the selected model back to whichever schema owns this endpoint.
if provider_key:
cfg = load_config()
providers_cfg = cfg.get("providers")
if isinstance(providers_cfg, dict):
provider_entry = providers_cfg.get(provider_key)
if isinstance(provider_entry, dict):
provider_entry["default_model"] = model_name
if api_key and not str(provider_entry.get("api_key", "") or "").strip():
provider_entry["api_key"] = api_key
if key_env and not str(provider_entry.get("key_env", "") or "").strip():
provider_entry["key_env"] = key_env
cfg["providers"] = providers_cfg
save_config(cfg)
else:
# Save model name to the custom_providers entry for next time # Save model name to the custom_providers entry for next time
_save_custom_provider(base_url, api_key, model_name) _save_custom_provider(base_url, api_key, model_name)
@ -3048,6 +3036,12 @@ def cmd_dump(args):
run_dump(args) run_dump(args)
def cmd_debug(args):
"""Debug tools (share report, etc.)."""
from hermes_cli.debug import run_debug
run_debug(args)
def cmd_config(args): def cmd_config(args):
"""Configuration management.""" """Configuration management."""
from hermes_cli.config import config_command from hermes_cli.config import config_command
@ -3056,6 +3050,10 @@ def cmd_config(args):
def cmd_backup(args): def cmd_backup(args):
"""Back up Hermes home directory to a zip file.""" """Back up Hermes home directory to a zip file."""
if getattr(args, "quick", False):
from hermes_cli.backup import run_quick_backup
run_quick_backup(args)
else:
from hermes_cli.backup import run_backup from hermes_cli.backup import run_backup
run_backup(args) run_backup(args)
@ -3184,6 +3182,44 @@ def _gateway_prompt(prompt_text: str, default: str = "", timeout: float = 300.0)
return default return default
def _build_web_ui(web_dir: Path, *, fatal: bool = False) -> bool:
"""Build the web UI frontend if npm is available.
Args:
web_dir: Path to the ``web/`` source directory.
fatal: If True, print error guidance and return False on failure
instead of a soft warning (used by ``hermes web``).
Returns True if the build succeeded or was skipped (no package.json).
"""
if not (web_dir / "package.json").exists():
return True
import shutil
npm = shutil.which("npm")
if not npm:
if fatal:
print("Web UI frontend not built and npm is not available.")
print("Install Node.js, then run: cd web && npm install && npm run build")
return not fatal
print("→ Building web UI...")
r1 = subprocess.run([npm, "install", "--silent"], cwd=web_dir, capture_output=True)
if r1.returncode != 0:
print(f" {'' if fatal else ''} Web UI npm install failed"
+ ("" if fatal else " (hermes web will not be available)"))
if fatal:
print(" Run manually: cd web && npm install && npm run build")
return False
r2 = subprocess.run([npm, "run", "build"], cwd=web_dir, capture_output=True)
if r2.returncode != 0:
print(f" {'' if fatal else ''} Web UI build failed"
+ ("" if fatal else " (hermes web will not be available)"))
if fatal:
print(" Run manually: cd web && npm install && npm run build")
return False
print(" ✓ Web UI built")
return True
def _update_via_zip(args): def _update_via_zip(args):
"""Update Hermes Agent by downloading a ZIP archive. """Update Hermes Agent by downloading a ZIP archive.
@ -3280,6 +3316,7 @@ def _update_via_zip(args):
_install_python_dependencies_with_optional_fallback(pip_cmd) _install_python_dependencies_with_optional_fallback(pip_cmd)
_update_node_dependencies() _update_node_dependencies()
_build_web_ui(PROJECT_ROOT / "web")
# Sync skills # Sync skills
try: try:
@ -4055,6 +4092,7 @@ def cmd_update(args):
_install_python_dependencies_with_optional_fallback(pip_cmd) _install_python_dependencies_with_optional_fallback(pip_cmd)
_update_node_dependencies() _update_node_dependencies()
_build_web_ui(PROJECT_ROOT / "web")
print() print()
print("✓ Code updated!") print("✓ Code updated!")
@ -4337,7 +4375,7 @@ def _coalesce_session_name_args(argv: list) -> list:
"chat", "model", "gateway", "setup", "whatsapp", "login", "logout", "auth", "chat", "model", "gateway", "setup", "whatsapp", "login", "logout", "auth",
"status", "cron", "doctor", "config", "pairing", "skills", "tools", "status", "cron", "doctor", "config", "pairing", "skills", "tools",
"mcp", "sessions", "insights", "version", "update", "uninstall", "mcp", "sessions", "insights", "version", "update", "uninstall",
"profile", "profile", "dashboard",
} }
_SESSION_FLAGS = {"-c", "--continue", "-r", "--resume"} _SESSION_FLAGS = {"-c", "--continue", "-r", "--resume"}
@ -4487,18 +4525,24 @@ def cmd_profile(args):
print(f' Add to your shell config (~/.bashrc or ~/.zshrc):') print(f' Add to your shell config (~/.bashrc or ~/.zshrc):')
print(f' export PATH="$HOME/.local/bin:$PATH"') print(f' export PATH="$HOME/.local/bin:$PATH"')
# Profile dir for display
try:
profile_dir_display = "~/" + str(profile_dir.relative_to(Path.home()))
except ValueError:
profile_dir_display = str(profile_dir)
# Next steps # Next steps
print(f"\nNext steps:") print(f"\nNext steps:")
print(f" {name} setup Configure API keys and model") print(f" {name} setup Configure API keys and model")
print(f" {name} chat Start chatting") print(f" {name} chat Start chatting")
print(f" {name} gateway start Start the messaging gateway") print(f" {name} gateway start Start the messaging gateway")
if clone or clone_all: if clone or clone_all:
try:
profile_dir_display = "~/" + str(profile_dir.relative_to(Path.home()))
except ValueError:
profile_dir_display = str(profile_dir)
print(f"\n Edit {profile_dir_display}/.env for different API keys") print(f"\n Edit {profile_dir_display}/.env for different API keys")
print(f" Edit {profile_dir_display}/SOUL.md for different personality") print(f" Edit {profile_dir_display}/SOUL.md for different personality")
else:
print(f"\n ⚠ This profile has no API keys yet. Run '{name} setup' first,")
print(f" or it will inherit keys from your shell environment.")
print(f" Edit {profile_dir_display}/SOUL.md to customize personality")
print() print()
except (ValueError, FileExistsError, FileNotFoundError) as e: except (ValueError, FileExistsError, FileNotFoundError) as e:
@ -4609,6 +4653,27 @@ def cmd_profile(args):
sys.exit(1) sys.exit(1)
def cmd_dashboard(args):
"""Start the web UI server."""
try:
import fastapi # noqa: F401
import uvicorn # noqa: F401
except ImportError:
print("Web UI dependencies not installed.")
print("Install them with: pip install hermes-agent[web]")
sys.exit(1)
if not _build_web_ui(PROJECT_ROOT / "web", fatal=True):
sys.exit(1)
from hermes_cli.web_server import start_server
start_server(
host=args.host,
port=args.port,
open_browser=not args.no_open,
)
def cmd_completion(args): def cmd_completion(args):
"""Print shell completion script.""" """Print shell completion script."""
from hermes_cli.profiles import generate_bash_completion, generate_zsh_completion from hermes_cli.profiles import generate_bash_completion, generate_zsh_completion
@ -4674,6 +4739,7 @@ Examples:
hermes logs -f Follow agent.log in real time hermes logs -f Follow agent.log in real time
hermes logs errors View errors.log hermes logs errors View errors.log
hermes logs --since 1h Lines from the last hour hermes logs --since 1h Lines from the last hour
hermes debug share Upload debug report for support
hermes update Update to latest version hermes update Update to latest version
For more help on a command: For more help on a command:
@ -4773,7 +4839,7 @@ For more help on a command:
) )
chat_parser.add_argument( chat_parser.add_argument(
"--provider", "--provider",
choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "xiaomi"], choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "kimi-coding-cn", "minimax", "minimax-cn", "kilocode", "xiaomi"],
default=None, default=None,
help="Inference provider (default: auto)" help="Inference provider (default: auto)"
) )
@ -5229,6 +5295,43 @@ For more help on a command:
) )
dump_parser.set_defaults(func=cmd_dump) dump_parser.set_defaults(func=cmd_dump)
# =========================================================================
# debug command
# =========================================================================
debug_parser = subparsers.add_parser(
"debug",
help="Debug tools — upload logs and system info for support",
description="Debug utilities for Hermes Agent. Use 'hermes debug share' to "
"upload a debug report (system info + recent logs) to a paste "
"service and get a shareable URL.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""\
Examples:
hermes debug share Upload debug report and print URL
hermes debug share --lines 500 Include more log lines
hermes debug share --expire 30 Keep paste for 30 days
hermes debug share --local Print report locally (no upload)
""",
)
debug_sub = debug_parser.add_subparsers(dest="debug_command")
share_parser = debug_sub.add_parser(
"share",
help="Upload debug report to a paste service and print a shareable URL",
)
share_parser.add_argument(
"--lines", type=int, default=200,
help="Number of log lines to include per log file (default: 200)",
)
share_parser.add_argument(
"--expire", type=int, default=7,
help="Paste expiry in days (default: 7)",
)
share_parser.add_argument(
"--local", action="store_true",
help="Print the report locally instead of uploading",
)
debug_parser.set_defaults(func=cmd_debug)
# ========================================================================= # =========================================================================
# backup command # backup command
# ========================================================================= # =========================================================================
@ -5236,12 +5339,22 @@ For more help on a command:
"backup", "backup",
help="Back up Hermes home directory to a zip file", help="Back up Hermes home directory to a zip file",
description="Create a zip archive of your entire Hermes configuration, " description="Create a zip archive of your entire Hermes configuration, "
"skills, sessions, and data (excludes the hermes-agent codebase)" "skills, sessions, and data (excludes the hermes-agent codebase). "
"Use --quick for a fast snapshot of just critical state files."
) )
backup_parser.add_argument( backup_parser.add_argument(
"-o", "--output", "-o", "--output",
help="Output path for the zip file (default: ~/hermes-backup-<timestamp>.zip)" help="Output path for the zip file (default: ~/hermes-backup-<timestamp>.zip)"
) )
backup_parser.add_argument(
"-q", "--quick",
action="store_true",
help="Quick snapshot: only critical state files (config, state.db, .env, auth, cron)"
)
backup_parser.add_argument(
"-l", "--label",
help="Label for the snapshot (only used with --quick)"
)
backup_parser.set_defaults(func=cmd_backup) backup_parser.set_defaults(func=cmd_backup)
# ========================================================================= # =========================================================================
@ -6082,6 +6195,19 @@ For more help on a command:
) )
completion_parser.set_defaults(func=cmd_completion) completion_parser.set_defaults(func=cmd_completion)
# =========================================================================
# dashboard command
# =========================================================================
dashboard_parser = subparsers.add_parser(
"dashboard",
help="Start the web UI dashboard",
description="Launch the Hermes Agent web dashboard for managing config, API keys, and sessions",
)
dashboard_parser.add_argument("--port", type=int, default=9119, help="Port (default 9119)")
dashboard_parser.add_argument("--host", default="127.0.0.1", help="Host (default 127.0.0.1)")
dashboard_parser.add_argument("--no-open", action="store_true", help="Don't open browser automatically")
dashboard_parser.set_defaults(func=cmd_dashboard)
# ========================================================================= # =========================================================================
# logs command # logs command
# ========================================================================= # =========================================================================

View file

@ -8,8 +8,9 @@ Different LLM providers expect model identifiers in different formats:
hyphens: ``claude-sonnet-4-6``. hyphens: ``claude-sonnet-4-6``.
- **Copilot** expects bare names *with* dots preserved: - **Copilot** expects bare names *with* dots preserved:
``claude-sonnet-4.6``. ``claude-sonnet-4.6``.
- **OpenCode Zen** follows the same dot-to-hyphen convention as - **OpenCode Zen** preserves dots for GPT/GLM/Gemini/Kimi/MiniMax-style
Anthropic: ``claude-sonnet-4-6``. model IDs, but Claude still uses hyphenated native names like
``claude-sonnet-4-6``.
- **OpenCode Go** preserves dots in model names: ``minimax-m2.7``. - **OpenCode Go** preserves dots in model names: ``minimax-m2.7``.
- **DeepSeek** only accepts two model identifiers: - **DeepSeek** only accepts two model identifiers:
``deepseek-chat`` and ``deepseek-reasoner``. ``deepseek-chat`` and ``deepseek-reasoner``.
@ -67,7 +68,6 @@ _AGGREGATOR_PROVIDERS: frozenset[str] = frozenset({
# Providers that want bare names with dots replaced by hyphens. # Providers that want bare names with dots replaced by hyphens.
_DOT_TO_HYPHEN_PROVIDERS: frozenset[str] = frozenset({ _DOT_TO_HYPHEN_PROVIDERS: frozenset[str] = frozenset({
"anthropic", "anthropic",
"opencode-zen",
}) })
# Providers that want bare names with dots preserved. # Providers that want bare names with dots preserved.
@ -88,6 +88,7 @@ _AUTHORITATIVE_NATIVE_PROVIDERS: frozenset[str] = frozenset({
_MATCHING_PREFIX_STRIP_PROVIDERS: frozenset[str] = frozenset({ _MATCHING_PREFIX_STRIP_PROVIDERS: frozenset[str] = frozenset({
"zai", "zai",
"kimi-coding", "kimi-coding",
"kimi-coding-cn",
"minimax", "minimax",
"minimax-cn", "minimax-cn",
"alibaba", "alibaba",
@ -329,6 +330,9 @@ def normalize_model_for_provider(model_input: str, target_provider: str) -> str:
>>> normalize_model_for_provider("claude-sonnet-4.6", "opencode-zen") >>> normalize_model_for_provider("claude-sonnet-4.6", "opencode-zen")
'claude-sonnet-4-6' 'claude-sonnet-4-6'
>>> normalize_model_for_provider("minimax-m2.5-free", "opencode-zen")
'minimax-m2.5-free'
>>> normalize_model_for_provider("deepseek-v3", "deepseek") >>> normalize_model_for_provider("deepseek-v3", "deepseek")
'deepseek-chat' 'deepseek-chat'
@ -351,7 +355,16 @@ def normalize_model_for_provider(model_input: str, target_provider: str) -> str:
if provider in _AGGREGATOR_PROVIDERS: if provider in _AGGREGATOR_PROVIDERS:
return _prepend_vendor(name) return _prepend_vendor(name)
# --- Anthropic / OpenCode: strip matching provider prefix, dots -> hyphens --- # --- OpenCode Zen: Claude stays hyphenated; other models keep dots ---
if provider == "opencode-zen":
bare = _strip_matching_provider_prefix(name, provider)
if "/" in bare:
return bare
if bare.lower().startswith("claude-"):
return _dots_to_hyphens(bare)
return bare
# --- Anthropic: strip matching provider prefix, dots -> hyphens ---
if provider in _DOT_TO_HYPHEN_PROVIDERS: if provider in _DOT_TO_HYPHEN_PROVIDERS:
bare = _strip_matching_provider_prefix(name, provider) bare = _strip_matching_provider_prefix(name, provider)
if "/" in bare: if "/" in bare:

View file

@ -21,6 +21,7 @@ OpenRouter variant suffixes (``:free``, ``:extended``, ``:fast``).
from __future__ import annotations from __future__ import annotations
import logging import logging
import re
from dataclasses import dataclass from dataclasses import dataclass
from typing import List, NamedTuple, Optional from typing import List, NamedTuple, Optional
@ -57,10 +58,36 @@ _HERMES_MODEL_WARNING = (
"(Claude, GPT, Gemini, DeepSeek, etc.)." "(Claude, GPT, Gemini, DeepSeek, etc.)."
) )
# Match only the real Nous Research Hermes 3 / Hermes 4 chat families.
# The previous substring check (`"hermes" in name.lower()`) false-positived on
# unrelated local Modelfiles like ``hermes-brain:qwen3-14b-ctx16k`` that just
# happen to carry "hermes" in their tag but are fully tool-capable.
#
# Positive examples the regex must match:
# NousResearch/Hermes-3-Llama-3.1-70B, hermes-4-405b, openrouter/hermes3:70b
# Negative examples it must NOT match:
# hermes-brain:qwen3-14b-ctx16k, qwen3:14b, claude-opus-4-6
_NOUS_HERMES_NON_AGENTIC_RE = re.compile(
r"(?:^|[/:])hermes[-_ ]?[34](?:[-_.:]|$)",
re.IGNORECASE,
)
def is_nous_hermes_non_agentic(model_name: str) -> bool:
"""Return True if *model_name* is a real Nous Hermes 3/4 chat model.
Used to decide whether to surface the non-agentic warning at startup.
Callers in :mod:`cli.py` and here should go through this single helper
so the two sites don't drift.
"""
if not model_name:
return False
return bool(_NOUS_HERMES_NON_AGENTIC_RE.search(model_name))
def _check_hermes_model_warning(model_name: str) -> str: def _check_hermes_model_warning(model_name: str) -> str:
"""Return a warning string if *model_name* looks like a Hermes LLM model.""" """Return a warning string if *model_name* is a Nous Hermes 3/4 chat model."""
if "hermes" in model_name.lower(): if is_nous_hermes_non_agentic(model_name):
return _HERMES_MODEL_WARNING return _HERMES_MODEL_WARNING
return "" return ""
@ -908,6 +935,65 @@ def list_authenticated_providers(
seen_slugs.add(pid) seen_slugs.add(pid)
seen_slugs.add(hermes_slug) seen_slugs.add(hermes_slug)
# --- 2b. Cross-check canonical provider list ---
# Catches providers that are in CANONICAL_PROVIDERS but weren't found
# in PROVIDER_TO_MODELS_DEV or HERMES_OVERLAYS (keeps /model in sync
# with `hermes model`).
try:
from hermes_cli.models import CANONICAL_PROVIDERS as _canon_provs
except ImportError:
_canon_provs = []
for _cp in _canon_provs:
if _cp.slug in seen_slugs:
continue
# Check credentials via PROVIDER_REGISTRY (auth.py)
_cp_config = _auth_registry.get(_cp.slug)
_cp_has_creds = False
if _cp_config and _cp_config.api_key_env_vars:
_cp_has_creds = any(os.environ.get(ev) for ev in _cp_config.api_key_env_vars)
# Also check auth store and credential pool
if not _cp_has_creds:
try:
from hermes_cli.auth import _load_auth_store
_cp_store = _load_auth_store()
_cp_providers_store = _cp_store.get("providers", {})
_cp_pool_store = _cp_store.get("credential_pool", {})
if _cp_store and (
_cp.slug in _cp_providers_store
or _cp.slug in _cp_pool_store
):
_cp_has_creds = True
except Exception:
pass
if not _cp_has_creds:
try:
from agent.credential_pool import load_pool
_cp_pool = load_pool(_cp.slug)
if _cp_pool.has_credentials():
_cp_has_creds = True
except Exception:
pass
if not _cp_has_creds:
continue
_cp_model_ids = curated.get(_cp.slug, [])
_cp_total = len(_cp_model_ids)
_cp_top = _cp_model_ids[:max_models]
results.append({
"slug": _cp.slug,
"name": _cp.label,
"is_current": _cp.slug == current_provider,
"is_user_defined": False,
"models": _cp_top,
"total_models": _cp_total,
"source": "canonical",
})
seen_slugs.add(_cp.slug)
# --- 3. User-defined endpoints from config --- # --- 3. User-defined endpoints from config ---
if user_providers and isinstance(user_providers, dict): if user_providers and isinstance(user_providers, dict):
for ep_name, ep_cfg in user_providers.items(): for ep_name, ep_cfg in user_providers.items():
@ -917,9 +1003,16 @@ def list_authenticated_providers(
api_url = ep_cfg.get("api", "") or ep_cfg.get("url", "") or "" api_url = ep_cfg.get("api", "") or ep_cfg.get("url", "") or ""
default_model = ep_cfg.get("default_model", "") default_model = ep_cfg.get("default_model", "")
# Build models list from both default_model and full models array
models_list = [] models_list = []
if default_model: if default_model:
models_list.append(default_model) models_list.append(default_model)
# Also include the full models list from config
cfg_models = ep_cfg.get("models", [])
if isinstance(cfg_models, list):
for m in cfg_models:
if m and m not in models_list:
models_list.append(m)
# Try to probe /v1/models if URL is set (but don't block on it) # Try to probe /v1/models if URL is set (but don't block on it)
# For now just show what we know from config # For now just show what we know from config

View file

@ -12,7 +12,7 @@ import os
import urllib.request import urllib.request
import urllib.error import urllib.error
from difflib import get_close_matches from difflib import get_close_matches
from typing import Any, Optional from typing import Any, NamedTuple, Optional
COPILOT_BASE_URL = "https://api.githubcopilot.com" COPILOT_BASE_URL = "https://api.githubcopilot.com"
COPILOT_MODELS_URL = f"{COPILOT_BASE_URL}/models" COPILOT_MODELS_URL = f"{COPILOT_BASE_URL}/models"
@ -70,13 +70,13 @@ def _codex_curated_models() -> list[str]:
_PROVIDER_MODELS: dict[str, list[str]] = { _PROVIDER_MODELS: dict[str, list[str]] = {
"nous": [ "nous": [
"xiaomi/mimo-v2-pro",
"anthropic/claude-opus-4.6", "anthropic/claude-opus-4.6",
"anthropic/claude-sonnet-4.6", "anthropic/claude-sonnet-4.6",
"anthropic/claude-sonnet-4.5", "anthropic/claude-sonnet-4.5",
"anthropic/claude-haiku-4.5", "anthropic/claude-haiku-4.5",
"openai/gpt-5.4", "openai/gpt-5.4",
"openai/gpt-5.4-mini", "openai/gpt-5.4-mini",
"xiaomi/mimo-v2-pro",
"openai/gpt-5.3-codex", "openai/gpt-5.3-codex",
"google/gemini-3-pro-preview", "google/gemini-3-pro-preview",
"google/gemini-3-flash-preview", "google/gemini-3-flash-preview",
@ -130,6 +130,7 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"gemma-4-26b-it", "gemma-4-26b-it",
], ],
"zai": [ "zai": [
"glm-5.1",
"glm-5", "glm-5",
"glm-5-turbo", "glm-5-turbo",
"glm-4.7", "glm-4.7",
@ -157,6 +158,12 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"kimi-k2-turbo-preview", "kimi-k2-turbo-preview",
"kimi-k2-0905-preview", "kimi-k2-0905-preview",
], ],
"kimi-coding-cn": [
"kimi-k2.5",
"kimi-k2-thinking",
"kimi-k2-turbo-preview",
"kimi-k2-0905-preview",
],
"moonshot": [ "moonshot": [
"kimi-k2.5", "kimi-k2.5",
"kimi-k2-thinking", "kimi-k2-thinking",
@ -478,29 +485,55 @@ def check_nous_free_tier() -> bool:
return False # default to paid on error — don't block users return False # default to paid on error — don't block users
_PROVIDER_LABELS = { # ---------------------------------------------------------------------------
"openrouter": "OpenRouter", # Canonical provider list — single source of truth for provider identity.
"openai-codex": "OpenAI Codex", # Every code path that lists, displays, or iterates providers derives from
"copilot-acp": "GitHub Copilot ACP", # this list: hermes model, /model, /provider, list_authenticated_providers.
"nous": "Nous Portal", #
"copilot": "GitHub Copilot", # Fields:
"gemini": "Google AI Studio", # slug — internal provider ID (used in config.yaml, --provider flag)
"zai": "Z.AI / GLM", # label — short display name
"kimi-coding": "Kimi / Moonshot", # tier — "top" (shown first) or "extended" (behind "More...")
"minimax": "MiniMax", # tui_desc — longer description for the `hermes model` interactive picker
"minimax-cn": "MiniMax (China)", # ---------------------------------------------------------------------------
"anthropic": "Anthropic",
"deepseek": "DeepSeek", class ProviderEntry(NamedTuple):
"opencode-zen": "OpenCode Zen", slug: str
"opencode-go": "OpenCode Go", label: str
"ai-gateway": "AI Gateway", tier: str # "top" or "extended"
"kilocode": "Kilo Code", tui_desc: str # detailed description for `hermes model` TUI
"alibaba": "Alibaba Cloud (DashScope)",
"qwen-oauth": "Qwen OAuth (Portal)",
"huggingface": "Hugging Face", CANONICAL_PROVIDERS: list[ProviderEntry] = [
"xiaomi": "Xiaomi MiMo", # -- Top tier (shown by default) --
"custom": "Custom endpoint", ProviderEntry("nous", "Nous Portal", "top", "Nous Portal (Nous Research subscription)"),
} ProviderEntry("openrouter", "OpenRouter", "top", "OpenRouter (100+ models, pay-per-use)"),
ProviderEntry("anthropic", "Anthropic", "top", "Anthropic (Claude models — API key or Claude Code)"),
ProviderEntry("openai-codex", "OpenAI Codex", "top", "OpenAI Codex"),
ProviderEntry("qwen-oauth", "Qwen OAuth (Portal)", "top", "Qwen OAuth (reuses local Qwen CLI login)"),
ProviderEntry("copilot", "GitHub Copilot", "top", "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"),
ProviderEntry("huggingface", "Hugging Face", "top", "Hugging Face Inference Providers (20+ open models)"),
# -- Extended tier (behind "More..." in hermes model) --
ProviderEntry("copilot-acp", "GitHub Copilot ACP", "extended", "GitHub Copilot ACP (spawns `copilot --acp --stdio`)"),
ProviderEntry("gemini", "Google AI Studio", "extended", "Google AI Studio (Gemini models — OpenAI-compatible endpoint)"),
ProviderEntry("deepseek", "DeepSeek", "extended", "DeepSeek (DeepSeek-V3, R1, coder — direct API)"),
ProviderEntry("xai", "xAI", "extended", "xAI (Grok models — direct API)"),
ProviderEntry("zai", "Z.AI / GLM", "extended", "Z.AI / GLM (Zhipu AI direct API)"),
ProviderEntry("kimi-coding", "Kimi / Moonshot", "extended", "Kimi / Moonshot (Moonshot AI direct API)"),
ProviderEntry("kimi-coding-cn", "Kimi / Moonshot (China)", "extended", "Kimi / Moonshot China (Moonshot CN direct API)"),
ProviderEntry("minimax", "MiniMax", "extended", "MiniMax (global direct API)"),
ProviderEntry("minimax-cn", "MiniMax (China)", "extended", "MiniMax China (domestic direct API)"),
ProviderEntry("kilocode", "Kilo Code", "extended", "Kilo Code (Kilo Gateway API)"),
ProviderEntry("opencode-zen", "OpenCode Zen", "extended", "OpenCode Zen (35+ curated models, pay-as-you-go)"),
ProviderEntry("opencode-go", "OpenCode Go", "extended", "OpenCode Go (open models, $10/month subscription)"),
ProviderEntry("ai-gateway", "AI Gateway", "extended", "AI Gateway (Vercel — 200+ models, pay-per-use)"),
ProviderEntry("alibaba", "Alibaba Cloud (DashScope)","extended", "Alibaba Cloud / DashScope Coding (Qwen + multi-provider)"),
ProviderEntry("xiaomi", "Xiaomi MiMo", "extended", "Xiaomi MiMo (MiMo-V2 models — pro, omni, flash)"),
]
# Derived dicts — used throughout the codebase
_PROVIDER_LABELS = {p.slug: p.label for p in CANONICAL_PROVIDERS}
_PROVIDER_LABELS["custom"] = "Custom endpoint" # special case: not a named provider
_PROVIDER_ALIASES = { _PROVIDER_ALIASES = {
"glm": "zai", "glm": "zai",
@ -518,6 +551,8 @@ _PROVIDER_ALIASES = {
"google-ai-studio": "gemini", "google-ai-studio": "gemini",
"kimi": "kimi-coding", "kimi": "kimi-coding",
"moonshot": "kimi-coding", "moonshot": "kimi-coding",
"kimi-cn": "kimi-coding-cn",
"moonshot-cn": "kimi-coding-cn",
"minimax-china": "minimax-cn", "minimax-china": "minimax-cn",
"minimax_cn": "minimax-cn", "minimax_cn": "minimax-cn",
"claude": "anthropic", "claude": "anthropic",
@ -543,6 +578,9 @@ _PROVIDER_ALIASES = {
"huggingface-hub": "huggingface", "huggingface-hub": "huggingface",
"mimo": "xiaomi", "mimo": "xiaomi",
"xiaomi-mimo": "xiaomi", "xiaomi-mimo": "xiaomi",
"grok": "xai",
"x-ai": "xai",
"x.ai": "xai",
} }
@ -835,23 +873,20 @@ def list_available_providers() -> list[dict[str, str]]:
Each dict has ``id``, ``label``, and ``aliases``. Each dict has ``id``, ``label``, and ``aliases``.
Checks which providers have valid credentials configured. Checks which providers have valid credentials configured.
Derives the provider list from :data:`CANONICAL_PROVIDERS` (single
source of truth shared with ``hermes model``, ``/model``, etc.).
""" """
# Canonical providers in display order # Derive display order from canonical list + custom
_PROVIDER_ORDER = [ provider_order = [p.slug for p in CANONICAL_PROVIDERS] + ["custom"]
"openrouter", "nous", "openai-codex", "copilot", "copilot-acp",
"gemini", "huggingface",
"zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic", "alibaba",
"qwen-oauth", "xiaomi",
"opencode-zen", "opencode-go",
"ai-gateway", "deepseek", "custom",
]
# Build reverse alias map # Build reverse alias map
aliases_for: dict[str, list[str]] = {} aliases_for: dict[str, list[str]] = {}
for alias, canonical in _PROVIDER_ALIASES.items(): for alias, canonical in _PROVIDER_ALIASES.items():
aliases_for.setdefault(canonical, []).append(alias) aliases_for.setdefault(canonical, []).append(alias)
result = [] result = []
for pid in _PROVIDER_ORDER: for pid in provider_order:
label = _PROVIDER_LABELS.get(pid, pid) label = _PROVIDER_LABELS.get(pid, pid)
alias_list = aliases_for.get(pid, []) alias_list = aliases_for.get(pid, [])
# Check if this provider has credentials available # Check if this provider has credentials available

View file

@ -459,6 +459,16 @@ def create_profile(
dst.parent.mkdir(parents=True, exist_ok=True) dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(src, dst) shutil.copy2(src, dst)
# Seed a default SOUL.md so the user has a file to customize immediately.
# Skipped when the profile already has one (from --clone / --clone-all).
soul_path = profile_dir / "SOUL.md"
if not soul_path.exists():
try:
from hermes_cli.default_soul import DEFAULT_SOUL_MD
soul_path.write_text(DEFAULT_SOUL_MD, encoding="utf-8")
except Exception:
pass # best-effort — don't fail profile creation over this
return profile_dir return profile_dir

View file

@ -179,6 +179,7 @@ ALIASES: Dict[str, str] = {
# kimi-for-coding (models.dev ID) # kimi-for-coding (models.dev ID)
"kimi": "kimi-for-coding", "kimi": "kimi-for-coding",
"kimi-coding": "kimi-for-coding", "kimi-coding": "kimi-for-coding",
"kimi-coding-cn": "kimi-for-coding",
"moonshot": "kimi-for-coding", "moonshot": "kimi-for-coding",
# minimax-cn # minimax-cn

View file

@ -26,7 +26,7 @@ from hermes_cli.auth import (
resolve_external_process_provider_credentials, resolve_external_process_provider_credentials,
has_usable_secret, has_usable_secret,
) )
from hermes_cli.config import load_config from hermes_cli.config import get_compatible_custom_providers, load_config
from hermes_constants import OPENROUTER_BASE_URL from hermes_constants import OPENROUTER_BASE_URL
@ -275,8 +275,46 @@ def _get_named_custom_provider(requested_provider: str) -> Optional[Dict[str, An
return None return None
config = load_config() config = load_config()
# First check providers: dict (new-style user-defined providers)
providers = config.get("providers")
if isinstance(providers, dict):
for ep_name, entry in providers.items():
if not isinstance(entry, dict):
continue
# Match exact name or normalized name
name_norm = _normalize_custom_provider_name(ep_name)
# Resolve the API key from the env var name stored in key_env
key_env = str(entry.get("key_env", "") or "").strip()
resolved_api_key = os.getenv(key_env, "").strip() if key_env else ""
if requested_norm in {ep_name, name_norm, f"custom:{name_norm}"}:
# Found match by provider key
base_url = entry.get("api") or entry.get("url") or entry.get("base_url") or ""
if base_url:
return {
"name": entry.get("name", ep_name),
"base_url": base_url.strip(),
"api_key": resolved_api_key,
"model": entry.get("default_model", ""),
}
# Also check the 'name' field if present
display_name = entry.get("name", "")
if display_name:
display_norm = _normalize_custom_provider_name(display_name)
if requested_norm in {display_name, display_norm, f"custom:{display_norm}"}:
# Found match by display name
base_url = entry.get("api") or entry.get("url") or entry.get("base_url") or ""
if base_url:
return {
"name": display_name,
"base_url": base_url.strip(),
"api_key": resolved_api_key,
"model": entry.get("default_model", ""),
}
# Fall back to custom_providers: list (legacy format)
custom_providers = config.get("custom_providers") custom_providers = config.get("custom_providers")
if not isinstance(custom_providers, list):
if isinstance(custom_providers, dict): if isinstance(custom_providers, dict):
logger.warning( logger.warning(
"custom_providers in config.yaml is a dict, not a list. " "custom_providers in config.yaml is a dict, not a list. "
@ -285,6 +323,10 @@ def _get_named_custom_provider(requested_provider: str) -> Optional[Dict[str, An
) )
return None return None
custom_providers = get_compatible_custom_providers(config)
if not custom_providers:
return None
for entry in custom_providers: for entry in custom_providers:
if not isinstance(entry, dict): if not isinstance(entry, dict):
continue continue
@ -294,13 +336,21 @@ def _get_named_custom_provider(requested_provider: str) -> Optional[Dict[str, An
continue continue
name_norm = _normalize_custom_provider_name(name) name_norm = _normalize_custom_provider_name(name)
menu_key = f"custom:{name_norm}" menu_key = f"custom:{name_norm}"
if requested_norm not in {name_norm, menu_key}: provider_key = str(entry.get("provider_key", "") or "").strip()
provider_key_norm = _normalize_custom_provider_name(provider_key) if provider_key else ""
provider_menu_key = f"custom:{provider_key_norm}" if provider_key_norm else ""
if requested_norm not in {name_norm, menu_key, provider_key_norm, provider_menu_key}:
continue continue
result = { result = {
"name": name.strip(), "name": name.strip(),
"base_url": base_url.strip(), "base_url": base_url.strip(),
"api_key": str(entry.get("api_key", "") or "").strip(), "api_key": str(entry.get("api_key", "") or "").strip(),
} }
key_env = str(entry.get("key_env", "") or "").strip()
if key_env:
result["key_env"] = key_env
if provider_key:
result["provider_key"] = provider_key
api_mode = _parse_api_mode(entry.get("api_mode")) api_mode = _parse_api_mode(entry.get("api_mode"))
if api_mode: if api_mode:
result["api_mode"] = api_mode result["api_mode"] = api_mode
@ -342,6 +392,7 @@ def _resolve_named_custom_runtime(
api_key_candidates = [ api_key_candidates = [
(explicit_api_key or "").strip(), (explicit_api_key or "").strip(),
str(custom_provider.get("api_key", "") or "").strip(), str(custom_provider.get("api_key", "") or "").strip(),
os.getenv(str(custom_provider.get("key_env", "") or "").strip(), "").strip(),
os.getenv("OPENAI_API_KEY", "").strip(), os.getenv("OPENAI_API_KEY", "").strip(),
os.getenv("OPENROUTER_API_KEY", "").strip(), os.getenv("OPENROUTER_API_KEY", "").strip(),
] ]
@ -557,7 +608,7 @@ def _resolve_explicit_runtime(
base_url = explicit_base_url base_url = explicit_base_url
if not base_url: if not base_url:
if provider == "kimi-coding": if provider in ("kimi-coding", "kimi-coding-cn"):
creds = resolve_api_key_provider_credentials(provider) creds = resolve_api_key_provider_credentials(provider)
base_url = creds.get("base_url", "").rstrip("/") base_url = creds.get("base_url", "").rstrip("/")
else: else:

View file

@ -104,8 +104,9 @@ _DEFAULT_PROVIDER_MODELS = {
"gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.5-flash-lite", "gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.5-flash-lite",
"gemma-4-31b-it", "gemma-4-26b-it", "gemma-4-31b-it", "gemma-4-26b-it",
], ],
"zai": ["glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"], "zai": ["glm-5.1", "glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"],
"kimi-coding": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"], "kimi-coding": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"],
"kimi-coding-cn": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"],
"minimax": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"], "minimax": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"],
"minimax-cn": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"], "minimax-cn": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"],
"ai-gateway": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5", "google/gemini-3-flash"], "ai-gateway": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5", "google/gemini-3-flash"],
@ -815,6 +816,7 @@ def setup_model_provider(config: dict, *, quick: bool = False):
"copilot-acp": "GitHub Copilot ACP", "copilot-acp": "GitHub Copilot ACP",
"zai": "Z.AI / GLM", "zai": "Z.AI / GLM",
"kimi-coding": "Kimi / Moonshot", "kimi-coding": "Kimi / Moonshot",
"kimi-coding-cn": "Kimi / Moonshot (China)",
"minimax": "MiniMax", "minimax": "MiniMax",
"minimax-cn": "MiniMax CN", "minimax-cn": "MiniMax CN",
"anthropic": "Anthropic", "anthropic": "Anthropic",
@ -2232,6 +2234,7 @@ def setup_gateway(config: dict):
from hermes_cli.gateway import ( from hermes_cli.gateway import (
_is_service_installed, _is_service_installed,
_is_service_running, _is_service_running,
supports_systemd_services,
has_conflicting_systemd_units, has_conflicting_systemd_units,
install_linux_gateway_from_setup, install_linux_gateway_from_setup,
print_systemd_scope_conflict_warning, print_systemd_scope_conflict_warning,
@ -2244,16 +2247,18 @@ def setup_gateway(config: dict):
service_installed = _is_service_installed() service_installed = _is_service_installed()
service_running = _is_service_running() service_running = _is_service_running()
supports_systemd = supports_systemd_services()
supports_service_manager = supports_systemd or _is_macos
print() print()
if _is_linux and has_conflicting_systemd_units(): if supports_systemd and has_conflicting_systemd_units():
print_systemd_scope_conflict_warning() print_systemd_scope_conflict_warning()
print() print()
if service_running: if service_running:
if prompt_yes_no(" Restart the gateway to pick up changes?", True): if prompt_yes_no(" Restart the gateway to pick up changes?", True):
try: try:
if _is_linux: if supports_systemd:
systemd_restart() systemd_restart()
elif _is_macos: elif _is_macos:
launchd_restart() launchd_restart()
@ -2262,14 +2267,14 @@ def setup_gateway(config: dict):
elif service_installed: elif service_installed:
if prompt_yes_no(" Start the gateway service?", True): if prompt_yes_no(" Start the gateway service?", True):
try: try:
if _is_linux: if supports_systemd:
systemd_start() systemd_start()
elif _is_macos: elif _is_macos:
launchd_start() launchd_start()
except Exception as e: except Exception as e:
print_error(f" Start failed: {e}") print_error(f" Start failed: {e}")
elif _is_linux or _is_macos: elif supports_service_manager:
svc_name = "systemd" if _is_linux else "launchd" svc_name = "systemd" if supports_systemd else "launchd"
if prompt_yes_no( if prompt_yes_no(
f" Install the gateway as a {svc_name} service? (runs in background, starts on boot)", f" Install the gateway as a {svc_name} service? (runs in background, starts on boot)",
True, True,
@ -2277,7 +2282,7 @@ def setup_gateway(config: dict):
try: try:
installed_scope = None installed_scope = None
did_install = False did_install = False
if _is_linux: if supports_systemd:
installed_scope, did_install = install_linux_gateway_from_setup(force=False) installed_scope, did_install = install_linux_gateway_from_setup(force=False)
else: else:
launchd_install(force=False) launchd_install(force=False)
@ -2285,7 +2290,7 @@ def setup_gateway(config: dict):
print() print()
if did_install and prompt_yes_no(" Start the service now?", True): if did_install and prompt_yes_no(" Start the service now?", True):
try: try:
if _is_linux: if supports_systemd:
systemd_start(system=installed_scope == "system") systemd_start(system=installed_scope == "system")
elif _is_macos: elif _is_macos:
launchd_start() launchd_start()
@ -2296,9 +2301,18 @@ def setup_gateway(config: dict):
print_info(" You can try manually: hermes gateway install") print_info(" You can try manually: hermes gateway install")
else: else:
print_info(" You can install later: hermes gateway install") print_info(" You can install later: hermes gateway install")
if _is_linux: if supports_systemd:
print_info(" Or as a boot-time service: sudo hermes gateway install --system") print_info(" Or as a boot-time service: sudo hermes gateway install --system")
print_info(" Or run in foreground: hermes gateway") print_info(" Or run in foreground: hermes gateway")
else:
from hermes_constants import is_container
if is_container():
print_info("Start the gateway to bring your bots online:")
print_info(" hermes gateway run # Run as container main process")
print_info("")
print_info("For automatic restarts, use a Docker restart policy:")
print_info(" docker run --restart unless-stopped ...")
print_info(" docker restart <container> # Manual restart")
else: else:
print_info("Start the gateway to bring your bots online:") print_info("Start the gateway to bring your bots online:")
print_info(" hermes gateway # Run in foreground") print_info(" hermes gateway # Run in foreground")

View file

@ -335,7 +335,23 @@ def do_install(identifier: str, category: str = "", force: bool = False,
meta, bundle, _matched_source = _resolve_source_meta_and_bundle(identifier, sources) meta, bundle, _matched_source = _resolve_source_meta_and_bundle(identifier, sources)
if not bundle: if not bundle:
c.print(f"[bold red]Error:[/] Could not fetch '{identifier}' from any source.\n") # Check if any source hit GitHub API rate limit
rate_limited = any(
getattr(src, "is_rate_limited", False)
or getattr(getattr(src, "github", None), "is_rate_limited", False)
for src in sources
)
c.print(f"[bold red]Error:[/] Could not fetch '{identifier}' from any source.")
if rate_limited:
c.print(
"[yellow]Hint:[/] GitHub API rate limit exhausted "
"(unauthenticated: 60 requests/hour).\n"
"Set [bold]GITHUB_TOKEN[/] in your .env or install the "
"[bold]gh[/] CLI and run [bold]gh auth login[/] "
"to raise the limit to 5,000/hr.\n"
)
else:
c.print()
return return
# Auto-detect category for official skills (e.g. "official/autonomous-ai-agents/blackbox") # Auto-detect category for official skills (e.g. "official/autonomous-ai-agents/blackbox")

View file

@ -346,6 +346,18 @@ def show_status(args):
print(" Note: Android may stop background jobs when Termux is suspended") print(" Note: Android may stop background jobs when Termux is suspended")
elif sys.platform.startswith('linux'): elif sys.platform.startswith('linux'):
from hermes_constants import is_container
if is_container():
# Docker/Podman: no systemd — check for running gateway processes
try:
from hermes_cli.gateway import find_gateway_pids
gateway_pids = find_gateway_pids()
is_active = len(gateway_pids) > 0
except Exception:
is_active = False
print(f" Status: {check_mark(is_active)} {'running' if is_active else 'stopped'}")
print(" Manager: docker (foreground)")
else:
try: try:
from hermes_cli.gateway import get_service_name from hermes_cli.gateway import get_service_name
_gw_svc = get_service_name() _gw_svc = get_service_name()

1839
hermes_cli/web_server.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -190,6 +190,37 @@ def is_wsl() -> bool:
return _wsl_detected return _wsl_detected
_container_detected: bool | None = None
def is_container() -> bool:
"""Return True when running inside a Docker/Podman container.
Checks ``/.dockerenv`` (Docker), ``/run/.containerenv`` (Podman),
and ``/proc/1/cgroup`` for container runtime markers. Result is
cached for the process lifetime. Import-safe no heavy deps.
"""
global _container_detected
if _container_detected is not None:
return _container_detected
if os.path.exists("/.dockerenv"):
_container_detected = True
return True
if os.path.exists("/run/.containerenv"):
_container_detected = True
return True
try:
with open("/proc/1/cgroup", "r") as f:
cgroup = f.read()
if "docker" in cgroup or "podman" in cgroup or "/lxc/" in cgroup:
_container_detected = True
return True
except OSError:
pass
_container_detected = False
return False
# ─── Well-Known Paths ───────────────────────────────────────────────────────── # ─── Well-Known Paths ─────────────────────────────────────────────────────────

View file

@ -1995,7 +1995,9 @@ class Migrator:
if compaction.get("timeout"): if compaction.get("timeout"):
pass # No direct mapping pass # No direct mapping
if compaction.get("model"): if compaction.get("model"):
compression["summary_model"] = compaction["model"] aux = hermes_cfg.setdefault("auxiliary", {})
aux_comp = aux.setdefault("compression", {})
aux_comp["model"] = compaction["model"]
hermes_cfg["compression"] = compression hermes_cfg["compression"] = compression
changes = True changes = True

1784
package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -19,6 +19,9 @@
"agent-browser": "^0.13.0", "agent-browser": "^0.13.0",
"@askjo/camoufox-browser": "^1.0.0" "@askjo/camoufox-browser": "^1.0.0"
}, },
"overrides": {
"lodash": "4.18.1"
},
"engines": { "engines": {
"node": ">=18.0.0" "node": ">=18.0.0"
} }

View file

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "hermes-agent" name = "hermes-agent"
version = "0.8.0" version = "0.9.0"
description = "The self-improving AI agent — creates skills from experience, improves them during use, and runs anywhere" description = "The self-improving AI agent — creates skills from experience, improves them during use, and runs anywhere"
readme = "README.md" readme = "README.md"
requires-python = ">=3.11" requires-python = ">=3.11"
@ -76,6 +76,7 @@ termux = [
] ]
dingtalk = ["dingtalk-stream>=0.1.0,<1"] dingtalk = ["dingtalk-stream>=0.1.0,<1"]
feishu = ["lark-oapi>=1.5.3,<2"] feishu = ["lark-oapi>=1.5.3,<2"]
web = ["fastapi>=0.104.0,<1", "uvicorn[standard]>=0.24.0,<1"]
rl = [ rl = [
"atroposlib @ git+https://github.com/NousResearch/atropos.git", "atroposlib @ git+https://github.com/NousResearch/atropos.git",
"tinker @ git+https://github.com/thinking-machines-lab/tinker.git", "tinker @ git+https://github.com/thinking-machines-lab/tinker.git",
@ -107,6 +108,7 @@ all = [
"hermes-agent[dingtalk]", "hermes-agent[dingtalk]",
"hermes-agent[feishu]", "hermes-agent[feishu]",
"hermes-agent[mistral]", "hermes-agent[mistral]",
"hermes-agent[web]",
] ]
[project.scripts] [project.scripts]
@ -117,6 +119,9 @@ hermes-acp = "acp_adapter.entry:main"
[tool.setuptools] [tool.setuptools]
py-modules = ["run_agent", "model_tools", "toolsets", "batch_runner", "trajectory_compressor", "toolset_distributions", "cli", "hermes_constants", "hermes_state", "hermes_time", "hermes_logging", "rl_cli", "utils"] py-modules = ["run_agent", "model_tools", "toolsets", "batch_runner", "trajectory_compressor", "toolset_distributions", "cli", "hermes_constants", "hermes_state", "hermes_time", "hermes_logging", "rl_cli", "utils"]
[tool.setuptools.package-data]
hermes_cli = ["web_dist/**/*"]
[tool.setuptools.packages.find] [tool.setuptools.packages.find]
include = ["agent", "tools", "tools.*", "hermes_cli", "gateway", "gateway.*", "tui_gateway", "tui_gateway.*", "cron", "acp_adapter", "plugins", "plugins.*"] include = ["agent", "tools", "tools.*", "hermes_cli", "gateway", "gateway.*", "tui_gateway", "tui_gateway.*", "cron", "acp_adapter", "plugins", "plugins.*"]

View file

@ -460,6 +460,40 @@ def _sanitize_messages_non_ascii(messages: list) -> bool:
return found return found
def _sanitize_tools_non_ascii(tools: list) -> bool:
"""Strip non-ASCII characters from tool payloads in-place."""
return _sanitize_structure_non_ascii(tools)
def _sanitize_structure_non_ascii(payload: Any) -> bool:
"""Strip non-ASCII characters from nested dict/list payloads in-place."""
found = False
def _walk(node):
nonlocal found
if isinstance(node, dict):
for key, value in node.items():
if isinstance(value, str):
sanitized = _strip_non_ascii(value)
if sanitized != value:
node[key] = sanitized
found = True
elif isinstance(value, (dict, list)):
_walk(value)
elif isinstance(node, list):
for idx, value in enumerate(node):
if isinstance(value, str):
sanitized = _strip_non_ascii(value)
if sanitized != value:
node[idx] = sanitized
found = True
elif isinstance(value, (dict, list)):
_walk(value)
_walk(payload)
return found
@ -675,9 +709,17 @@ class AIAgent:
# on /v1/chat/completions by both OpenAI and OpenRouter. Also # on /v1/chat/completions by both OpenAI and OpenRouter. Also
# auto-upgrade for direct OpenAI URLs (api.openai.com) since all # auto-upgrade for direct OpenAI URLs (api.openai.com) since all
# newer tool-calling models prefer Responses there. # newer tool-calling models prefer Responses there.
if self.api_mode == "chat_completions" and ( # ACP runtimes are excluded: CopilotACPClient handles its own
# routing and does not implement the Responses API surface.
if (
self.api_mode == "chat_completions"
and self.provider != "copilot-acp"
and not str(self.base_url or "").lower().startswith("acp://copilot")
and not str(self.base_url or "").lower().startswith("acp+tcp://")
and (
self._is_direct_openai_url() self._is_direct_openai_url()
or self._model_requires_responses_api(self.model) or self._model_requires_responses_api(self.model)
)
): ):
self.api_mode = "codex_responses" self.api_mode = "codex_responses"
@ -737,6 +779,7 @@ class AIAgent:
self.service_tier = service_tier self.service_tier = service_tier
self.request_overrides = dict(request_overrides or {}) self.request_overrides = dict(request_overrides or {})
self.prefill_messages = prefill_messages or [] # Prefilled conversation turns self.prefill_messages = prefill_messages or [] # Prefilled conversation turns
self._force_ascii_payload = False
# Anthropic prompt caching: auto-enabled for Claude models via OpenRouter. # Anthropic prompt caching: auto-enabled for Claude models via OpenRouter.
# Reduces input costs by ~75% on multi-turn conversations by caching the # Reduces input costs by ~75% on multi-turn conversations by caching the
@ -1212,7 +1255,6 @@ class AIAgent:
_compression_cfg = {} _compression_cfg = {}
compression_threshold = float(_compression_cfg.get("threshold", 0.50)) compression_threshold = float(_compression_cfg.get("threshold", 0.50))
compression_enabled = str(_compression_cfg.get("enabled", True)).lower() in ("true", "1", "yes") compression_enabled = str(_compression_cfg.get("enabled", True)).lower() in ("true", "1", "yes")
compression_summary_model = _compression_cfg.get("summary_model") or None
compression_target_ratio = float(_compression_cfg.get("target_ratio", 0.20)) compression_target_ratio = float(_compression_cfg.get("target_ratio", 0.20))
compression_protect_last = int(_compression_cfg.get("protect_last_n", 20)) compression_protect_last = int(_compression_cfg.get("protect_last_n", 20))
@ -1233,8 +1275,13 @@ class AIAgent:
# Check custom_providers per-model context_length # Check custom_providers per-model context_length
if _config_context_length is None: if _config_context_length is None:
try:
from hermes_cli.config import get_compatible_custom_providers
_custom_providers = get_compatible_custom_providers(_agent_cfg)
except Exception:
_custom_providers = _agent_cfg.get("custom_providers") _custom_providers = _agent_cfg.get("custom_providers")
if isinstance(_custom_providers, list): if not isinstance(_custom_providers, list):
_custom_providers = []
for _cp_entry in _custom_providers: for _cp_entry in _custom_providers:
if not isinstance(_cp_entry, dict): if not isinstance(_cp_entry, dict):
continue continue
@ -1292,6 +1339,22 @@ class AIAgent:
if _selected_engine is not None: if _selected_engine is not None:
self.context_compressor = _selected_engine self.context_compressor = _selected_engine
# Resolve context_length for plugin engines — mirrors switch_model() path
from agent.model_metadata import get_model_context_length
_plugin_ctx_len = get_model_context_length(
self.model,
base_url=self.base_url,
api_key=getattr(self, "api_key", ""),
config_context_length=_config_context_length,
provider=self.provider,
)
self.context_compressor.update_model(
model=self.model,
context_length=_plugin_ctx_len,
base_url=self.base_url,
api_key=getattr(self, "api_key", ""),
provider=self.provider,
)
if not self.quiet_mode: if not self.quiet_mode:
logger.info("Using context engine: %s", _selected_engine.name) logger.info("Using context engine: %s", _selected_engine.name)
else: else:
@ -1301,7 +1364,7 @@ class AIAgent:
protect_first_n=3, protect_first_n=3,
protect_last_n=compression_protect_last, protect_last_n=compression_protect_last,
summary_target_ratio=compression_target_ratio, summary_target_ratio=compression_target_ratio,
summary_model_override=compression_summary_model, summary_model_override=None,
quiet_mode=self.quiet_mode, quiet_mode=self.quiet_mode,
base_url=self.base_url, base_url=self.base_url,
api_key=getattr(self, "api_key", ""), api_key=getattr(self, "api_key", ""),
@ -1748,10 +1811,25 @@ class AIAgent:
aux_base_url = str(getattr(client, "base_url", "")) aux_base_url = str(getattr(client, "base_url", ""))
aux_api_key = str(getattr(client, "api_key", "")) aux_api_key = str(getattr(client, "api_key", ""))
# Read user-configured context_length for the compression model.
# Custom endpoints often don't support /models API queries so
# get_model_context_length() falls through to the 128K default,
# ignoring the explicit config value. Pass it as the highest-
# priority hint so the configured value is always respected.
_aux_cfg = (self.config or {}).get("auxiliary", {}).get("compression", {})
_aux_context_config = _aux_cfg.get("context_length") if isinstance(_aux_cfg, dict) else None
if _aux_context_config is not None:
try:
_aux_context_config = int(_aux_context_config)
except (TypeError, ValueError):
_aux_context_config = None
aux_context = get_model_context_length( aux_context = get_model_context_length(
aux_model, aux_model,
base_url=aux_base_url, base_url=aux_base_url,
api_key=aux_api_key, api_key=aux_api_key,
config_context_length=_aux_context_config,
) )
threshold = self.context_compressor.threshold_tokens threshold = self.context_compressor.threshold_tokens
@ -1872,12 +1950,13 @@ class AIAgent:
if not content: if not content:
return "" return ""
# Strip all reasoning tag variants: <think>, <thinking>, <THINKING>, # Strip all reasoning tag variants: <think>, <thinking>, <THINKING>,
# <reasoning>, <REASONING_SCRATCHPAD> # <reasoning>, <REASONING_SCRATCHPAD>, <thought> (Gemma 4)
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL) content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL)
content = re.sub(r'<thinking>.*?</thinking>', '', content, flags=re.DOTALL | re.IGNORECASE) content = re.sub(r'<thinking>.*?</thinking>', '', content, flags=re.DOTALL | re.IGNORECASE)
content = re.sub(r'<reasoning>.*?</reasoning>', '', content, flags=re.DOTALL) content = re.sub(r'<reasoning>.*?</reasoning>', '', content, flags=re.DOTALL)
content = re.sub(r'<REASONING_SCRATCHPAD>.*?</REASONING_SCRATCHPAD>', '', content, flags=re.DOTALL) content = re.sub(r'<REASONING_SCRATCHPAD>.*?</REASONING_SCRATCHPAD>', '', content, flags=re.DOTALL)
content = re.sub(r'</?(?:think|thinking|reasoning|REASONING_SCRATCHPAD)>\s*', '', content, flags=re.IGNORECASE) content = re.sub(r'<thought>.*?</thought>', '', content, flags=re.DOTALL | re.IGNORECASE)
content = re.sub(r'</?(?:think|thinking|reasoning|thought|REASONING_SCRATCHPAD)>\s*', '', content, flags=re.IGNORECASE)
return content return content
def _looks_like_codex_intermediate_ack( def _looks_like_codex_intermediate_ack(
@ -2002,6 +2081,7 @@ class AIAgent:
inline_patterns = ( inline_patterns = (
r"<think>(.*?)</think>", r"<think>(.*?)</think>",
r"<thinking>(.*?)</thinking>", r"<thinking>(.*?)</thinking>",
r"<thought>(.*?)</thought>",
r"<reasoning>(.*?)</reasoning>", r"<reasoning>(.*?)</reasoning>",
r"<REASONING_SCRATCHPAD>(.*?)</REASONING_SCRATCHPAD>", r"<REASONING_SCRATCHPAD>(.*?)</REASONING_SCRATCHPAD>",
) )
@ -4262,6 +4342,7 @@ class AIAgent:
try: try:
with active_client.responses.stream(**api_kwargs) as stream: with active_client.responses.stream(**api_kwargs) as stream:
for event in stream: for event in stream:
self._touch_activity("receiving stream response")
if self._interrupt_requested: if self._interrupt_requested:
break break
event_type = getattr(event, "type", "") event_type = getattr(event, "type", "")
@ -4386,6 +4467,7 @@ class AIAgent:
collected_text_deltas: list = [] collected_text_deltas: list = []
try: try:
for event in stream_or_response: for event in stream_or_response:
self._touch_activity("receiving stream response")
event_type = getattr(event, "type", None) event_type = getattr(event, "type", None)
if not event_type and isinstance(event, dict): if not event_type and isinstance(event, dict):
event_type = event.get("type") event_type = event.get("type")
@ -4688,6 +4770,11 @@ class AIAgent:
Each worker thread gets its own OpenAI client instance. Interrupts only Each worker thread gets its own OpenAI client instance. Interrupts only
close that worker-local client, so retries and other requests never close that worker-local client, so retries and other requests never
inherit a closed transport. inherit a closed transport.
Includes a stale-call detector: if no response arrives within the
configured timeout, the connection is killed and an error raised so
the main retry loop can try again with backoff / credential rotation /
provider fallback.
""" """
result = {"response": None, "error": None} result = {"response": None, "error": None}
request_client_holder = {"client": None} request_client_holder = {"client": None}
@ -4713,10 +4800,86 @@ class AIAgent:
if request_client is not None: if request_client is not None:
self._close_request_openai_client(request_client, reason="request_complete") self._close_request_openai_client(request_client, reason="request_complete")
# ── Stale-call timeout (mirrors streaming stale detector) ────────
# Non-streaming calls return nothing until the full response is
# ready. Without this, a hung provider can block for the full
# httpx timeout (default 1800s) with zero feedback. The stale
# detector kills the connection early so the main retry loop can
# apply richer recovery (credential rotation, provider fallback).
_stale_base = float(os.getenv("HERMES_API_CALL_STALE_TIMEOUT", 300.0))
_base_url = getattr(self, "_base_url", None) or ""
if _stale_base == 300.0 and _base_url and is_local_endpoint(_base_url):
_stale_timeout = float("inf")
else:
_est_tokens = sum(len(str(v)) for v in api_kwargs.get("messages", [])) // 4
if _est_tokens > 100_000:
_stale_timeout = max(_stale_base, 600.0)
elif _est_tokens > 50_000:
_stale_timeout = max(_stale_base, 450.0)
else:
_stale_timeout = _stale_base
_call_start = time.time()
self._touch_activity("waiting for non-streaming API response")
t = threading.Thread(target=_call, daemon=True) t = threading.Thread(target=_call, daemon=True)
t.start() t.start()
_poll_count = 0
while t.is_alive(): while t.is_alive():
t.join(timeout=0.3) t.join(timeout=0.3)
_poll_count += 1
# Touch activity every ~30s so the gateway's inactivity
# monitor knows we're alive while waiting for the response.
if _poll_count % 100 == 0: # 100 × 0.3s = 30s
_elapsed = time.time() - _call_start
self._touch_activity(
f"waiting for non-streaming response ({int(_elapsed)}s elapsed)"
)
# Stale-call detector: kill the connection if no response
# arrives within the configured timeout.
_elapsed = time.time() - _call_start
if _elapsed > _stale_timeout:
_est_ctx = sum(len(str(v)) for v in api_kwargs.get("messages", [])) // 4
logger.warning(
"Non-streaming API call stale for %.0fs (threshold %.0fs). "
"model=%s context=~%s tokens. Killing connection.",
_elapsed, _stale_timeout,
api_kwargs.get("model", "unknown"), f"{_est_ctx:,}",
)
self._emit_status(
f"⚠️ No response from provider for {int(_elapsed)}s "
f"(non-streaming, model: {api_kwargs.get('model', 'unknown')}). "
f"Aborting call."
)
try:
if self.api_mode == "anthropic_messages":
from agent.anthropic_adapter import build_anthropic_client
self._anthropic_client.close()
self._anthropic_client = build_anthropic_client(
self._anthropic_api_key,
getattr(self, "_anthropic_base_url", None),
)
else:
rc = request_client_holder.get("client")
if rc is not None:
self._close_request_openai_client(rc, reason="stale_call_kill")
except Exception:
pass
self._touch_activity(
f"stale non-streaming call killed after {int(_elapsed)}s"
)
# Wait briefly for the thread to notice the closed connection.
t.join(timeout=2.0)
if result["error"] is None and result["response"] is None:
result["error"] = TimeoutError(
f"Non-streaming API call timed out after {int(_elapsed)}s "
f"with no response (threshold: {int(_stale_timeout)}s)"
)
break
if self._interrupt_requested: if self._interrupt_requested:
# Force-close the in-flight worker-local HTTP connection to stop # Force-close the in-flight worker-local HTTP connection to stop
# token generation without poisoning the shared client used to # token generation without poisoning the shared client used to
@ -4937,11 +5100,8 @@ class AIAgent:
role = "assistant" role = "assistant"
reasoning_parts: list = [] reasoning_parts: list = []
usage_obj = None usage_obj = None
_first_chunk_seen = False
for chunk in stream: for chunk in stream:
last_chunk_time["t"] = time.time() last_chunk_time["t"] = time.time()
if not _first_chunk_seen:
_first_chunk_seen = True
self._touch_activity("receiving stream response") self._touch_activity("receiving stream response")
if self._interrupt_requested: if self._interrupt_requested:
@ -5118,6 +5278,7 @@ class AIAgent:
# actively arriving (the chat_completions path # actively arriving (the chat_completions path
# already does this at the top of its chunk loop). # already does this at the top of its chunk loop).
last_chunk_time["t"] = time.time() last_chunk_time["t"] = time.time()
self._touch_activity("receiving stream response")
if self._interrupt_requested: if self._interrupt_requested:
break break
@ -5231,6 +5392,10 @@ class AIAgent:
f"({type(e).__name__}). Reconnecting… " f"({type(e).__name__}). Reconnecting… "
f"(attempt {_stream_attempt + 2}/{_max_stream_retries + 1})" f"(attempt {_stream_attempt + 2}/{_max_stream_retries + 1})"
) )
self._touch_activity(
f"stream retry {_stream_attempt + 2}/{_max_stream_retries + 1} "
f"after {type(e).__name__}"
)
# Close the stale request client before retry # Close the stale request client before retry
stale = request_client_holder.get("client") stale = request_client_holder.get("client")
if stale is not None: if stale is not None:
@ -5255,8 +5420,7 @@ class AIAgent:
"try again in a moment." "try again in a moment."
) )
logger.warning( logger.warning(
"Streaming exhausted %s retries on transient error, " "Streaming exhausted %s retries on transient error: %s",
"falling back to non-streaming: %s",
_max_stream_retries + 1, _max_stream_retries + 1,
e, e,
) )
@ -5267,25 +5431,24 @@ class AIAgent:
and "not supported" in _err_lower and "not supported" in _err_lower
) )
if _is_stream_unsupported: if _is_stream_unsupported:
self._disable_streaming = True
self._safe_print( self._safe_print(
"\n⚠ Streaming is not supported for this " "\n⚠ Streaming is not supported for this "
"model/provider. Falling back to non-streaming.\n" "model/provider. Switching to non-streaming.\n"
" To avoid this delay, set display.streaming: false " " To avoid this delay, set display.streaming: false "
"in config.yaml\n" "in config.yaml\n"
) )
logger.info( logger.info(
"Streaming failed before delivery, falling back to non-streaming: %s", "Streaming failed before delivery: %s",
e, e,
) )
try: # Propagate the error to the main retry loop instead of
# Reset stale timer — the non-streaming fallback # falling back to non-streaming inline. The main loop has
# uses its own client; prevent the stale detector # richer recovery: credential rotation, provider fallback,
# from firing on stale timestamps from failed streams. # backoff, and — for "stream not supported" — will switch
last_chunk_time["t"] = time.time() # to non-streaming on the next attempt via _disable_streaming.
result["response"] = self._interruptible_api_call(api_kwargs) result["error"] = e
except Exception as fallback_err:
result["error"] = fallback_err
return return
finally: finally:
request_client = request_client_holder.get("client") request_client = request_client_holder.get("client")
@ -5351,6 +5514,9 @@ class AIAgent:
# Reset the timer so we don't kill repeatedly while # Reset the timer so we don't kill repeatedly while
# the inner thread processes the closure. # the inner thread processes the closure.
last_chunk_time["t"] = time.time() last_chunk_time["t"] = time.time()
self._touch_activity(
f"stale stream detected after {int(_stale_elapsed)}s, reconnecting"
)
if self._interrupt_requested: if self._interrupt_requested:
try: try:
@ -5376,13 +5542,22 @@ class AIAgent:
# a new API call, creating a duplicate message. Return a # a new API call, creating a duplicate message. Return a
# partial "stop" response instead so the outer loop treats this # partial "stop" response instead so the outer loop treats this
# turn as complete (no retry, no fallback). # turn as complete (no retry, no fallback).
# Recover whatever content was already streamed to the user.
# _current_streamed_assistant_text accumulates text fired
# through _fire_stream_delta, so it has exactly what the
# user saw before the connection died.
_partial_text = (
getattr(self, "_current_streamed_assistant_text", "") or ""
).strip() or None
logger.warning( logger.warning(
"Partial stream delivered before error; returning stub " "Partial stream delivered before error; returning stub "
"response to prevent duplicate messages: %s", "response with %s chars of recovered content to prevent "
"duplicate messages: %s",
len(_partial_text or ""),
result["error"], result["error"],
) )
_stub_msg = SimpleNamespace( _stub_msg = SimpleNamespace(
role="assistant", content=None, tool_calls=None, role="assistant", content=_partial_text, tool_calls=None,
reasoning_content=None, reasoning_content=None,
) )
return SimpleNamespace( return SimpleNamespace(
@ -5841,11 +6016,12 @@ class AIAgent:
"""True when using an anthropic-compatible endpoint that preserves dots in model names. """True when using an anthropic-compatible endpoint that preserves dots in model names.
Alibaba/DashScope keeps dots (e.g. qwen3.5-plus). Alibaba/DashScope keeps dots (e.g. qwen3.5-plus).
MiniMax keeps dots (e.g. MiniMax-M2.7). MiniMax keeps dots (e.g. MiniMax-M2.7).
OpenCode Go keeps dots (e.g. minimax-m2.7).""" OpenCode Go/Zen keeps dots for non-Claude models (e.g. minimax-m2.5-free).
if (getattr(self, "provider", "") or "").lower() in {"alibaba", "minimax", "minimax-cn", "opencode-go"}: ZAI/Zhipu keeps dots (e.g. glm-4.7, glm-5.1)."""
if (getattr(self, "provider", "") or "").lower() in {"alibaba", "minimax", "minimax-cn", "opencode-go", "opencode-zen", "zai"}:
return True return True
base = (getattr(self, "base_url", "") or "").lower() base = (getattr(self, "base_url", "") or "").lower()
return "dashscope" in base or "aliyuncs" in base or "minimax" in base or "opencode.ai/zen/go" in base return "dashscope" in base or "aliyuncs" in base or "minimax" in base or "opencode.ai/zen/" in base or "bigmodel.cn" in base
def _is_qwen_portal(self) -> bool: def _is_qwen_portal(self) -> bool:
"""Return True when the base URL targets Qwen Portal.""" """Return True when the base URL targets Qwen Portal."""
@ -8078,6 +8254,8 @@ class AIAgent:
try: try:
self._reset_stream_delivery_tracking() self._reset_stream_delivery_tracking()
api_kwargs = self._build_api_kwargs(api_messages) api_kwargs = self._build_api_kwargs(api_messages)
if self._force_ascii_payload:
_sanitize_structure_non_ascii(api_kwargs)
if self.api_mode == "codex_responses": if self.api_mode == "codex_responses":
api_kwargs = self._preflight_codex_api_kwargs(api_kwargs, allow_stream=False) api_kwargs = self._preflight_codex_api_kwargs(api_kwargs, allow_stream=False)
@ -8125,7 +8303,12 @@ class AIAgent:
self.thinking_callback("") self.thinking_callback("")
_use_streaming = True _use_streaming = True
if not self._has_stream_consumers(): # Provider signaled "stream not supported" on a previous
# attempt — switch to non-streaming for the rest of this
# session instead of re-failing every retry.
if getattr(self, "_disable_streaming", False):
_use_streaming = False
elif not self._has_stream_consumers():
# No display/TTS consumer. Still prefer streaming for # No display/TTS consumer. Still prefer streaming for
# health checking, but skip for Mock clients in tests # health checking, but skip for Mock clients in tests
# (mocks return SimpleNamespace, not stream iterators). # (mocks return SimpleNamespace, not stream iterators).
@ -8225,7 +8408,8 @@ class AIAgent:
if self.thinking_callback: if self.thinking_callback:
self.thinking_callback("") self.thinking_callback("")
# This is often rate limiting or provider returning malformed response # Invalid response — could be rate limiting, provider timeout,
# upstream server error, or malformed response.
retry_count += 1 retry_count += 1
# Eager fallback: empty/malformed responses are a common # Eager fallback: empty/malformed responses are a common
@ -8261,11 +8445,44 @@ class AIAgent:
if self.verbose_logging: if self.verbose_logging:
logging.debug(f"Response attributes for invalid response: {resp_attrs}") logging.debug(f"Response attributes for invalid response: {resp_attrs}")
# Extract error code from response for contextual diagnostics
_resp_error_code = None
if response and hasattr(response, 'error') and response.error:
_code_raw = getattr(response.error, 'code', None)
if _code_raw is None and isinstance(response.error, dict):
_code_raw = response.error.get('code')
if _code_raw is not None:
try:
_resp_error_code = int(_code_raw)
except (TypeError, ValueError):
pass
# Build a human-readable failure hint from the error code
# and response time, instead of always assuming rate limiting.
if _resp_error_code == 524:
_failure_hint = f"upstream provider timed out (Cloudflare 524, {api_duration:.0f}s)"
elif _resp_error_code == 504:
_failure_hint = f"upstream gateway timeout (504, {api_duration:.0f}s)"
elif _resp_error_code == 429:
_failure_hint = f"rate limited by upstream provider (429)"
elif _resp_error_code in (500, 502):
_failure_hint = f"upstream server error ({_resp_error_code}, {api_duration:.0f}s)"
elif _resp_error_code in (503, 529):
_failure_hint = f"upstream provider overloaded ({_resp_error_code})"
elif _resp_error_code is not None:
_failure_hint = f"upstream error (code {_resp_error_code}, {api_duration:.0f}s)"
elif api_duration < 10:
_failure_hint = f"fast response ({api_duration:.1f}s) — likely rate limited"
elif api_duration > 60:
_failure_hint = f"slow response ({api_duration:.0f}s) — likely upstream timeout"
else:
_failure_hint = f"response time {api_duration:.1f}s"
self._vprint(f"{self.log_prefix}⚠️ Invalid API response (attempt {retry_count}/{max_retries}): {', '.join(error_details)}", force=True) self._vprint(f"{self.log_prefix}⚠️ Invalid API response (attempt {retry_count}/{max_retries}): {', '.join(error_details)}", force=True)
self._vprint(f"{self.log_prefix} 🏢 Provider: {provider_name}", force=True) self._vprint(f"{self.log_prefix} 🏢 Provider: {provider_name}", force=True)
cleaned_provider_error = self._clean_error_message(error_msg) cleaned_provider_error = self._clean_error_message(error_msg)
self._vprint(f"{self.log_prefix} 📝 Provider message: {cleaned_provider_error}", force=True) self._vprint(f"{self.log_prefix} 📝 Provider message: {cleaned_provider_error}", force=True)
self._vprint(f"{self.log_prefix} ⏱️ Response time: {api_duration:.2f}s (fast response often indicates rate limiting)", force=True) self._vprint(f"{self.log_prefix} ⏱️ {_failure_hint}", force=True)
if retry_count >= max_retries: if retry_count >= max_retries:
# Try fallback before giving up # Try fallback before giving up
@ -8282,31 +8499,39 @@ class AIAgent:
"messages": messages, "messages": messages,
"completed": False, "completed": False,
"api_calls": api_call_count, "api_calls": api_call_count,
"error": "Invalid API response shape. Likely rate limited or malformed provider response.", "error": f"Invalid API response after {max_retries} retries: {_failure_hint}",
"failed": True # Mark as failure for filtering "failed": True # Mark as failure for filtering
} }
# Longer backoff for rate limiting (likely cause of None choices) # Backoff before retry — jittered exponential: 5s base, 120s cap
# Jittered exponential: 5s base, 120s cap + random jitter
wait_time = jittered_backoff(retry_count, base_delay=5.0, max_delay=120.0) wait_time = jittered_backoff(retry_count, base_delay=5.0, max_delay=120.0)
self._vprint(f"{self.log_prefix}⏳ Retrying in {wait_time:.1f}s (extended backoff)...", force=True) self._vprint(f"{self.log_prefix}⏳ Retrying in {wait_time:.1f}s ({_failure_hint})...", force=True)
logging.warning(f"Invalid API response (retry {retry_count}/{max_retries}): {', '.join(error_details)} | Provider: {provider_name}") logging.warning(f"Invalid API response (retry {retry_count}/{max_retries}): {', '.join(error_details)} | Provider: {provider_name}")
# Sleep in small increments to stay responsive to interrupts # Sleep in small increments to stay responsive to interrupts
sleep_end = time.time() + wait_time sleep_end = time.time() + wait_time
_backoff_touch_counter = 0
while time.time() < sleep_end: while time.time() < sleep_end:
if self._interrupt_requested: if self._interrupt_requested:
self._vprint(f"{self.log_prefix}⚡ Interrupt detected during retry wait, aborting.", force=True) self._vprint(f"{self.log_prefix}⚡ Interrupt detected during retry wait, aborting.", force=True)
self._persist_session(messages, conversation_history) self._persist_session(messages, conversation_history)
self.clear_interrupt() self.clear_interrupt()
return { return {
"final_response": f"Operation interrupted: retrying API call after rate limit (retry {retry_count}/{max_retries}).", "final_response": f"Operation interrupted during retry ({_failure_hint}, attempt {retry_count}/{max_retries}).",
"messages": messages, "messages": messages,
"api_calls": api_call_count, "api_calls": api_call_count,
"completed": False, "completed": False,
"interrupted": True, "interrupted": True,
} }
time.sleep(0.2) time.sleep(0.2)
# Touch activity every ~30s so the gateway's inactivity
# monitor knows we're alive during backoff waits.
_backoff_touch_counter += 1
if _backoff_touch_counter % 150 == 0: # 150 × 0.2s = 30s
self._touch_activity(
f"retry backoff ({retry_count}/{max_retries}), "
f"{int(sleep_end - time.time())}s remaining"
)
continue # Retry the API call continue # Retry the API call
# Check finish_reason before proceeding # Check finish_reason before proceeding
@ -8661,18 +8886,56 @@ class AIAgent:
) )
continue continue
if _is_ascii_codec: if _is_ascii_codec:
self._force_ascii_payload = True
# ASCII codec: the system encoding can't handle # ASCII codec: the system encoding can't handle
# non-ASCII characters at all. Sanitize all # non-ASCII characters at all. Sanitize all
# non-ASCII content from messages and retry. # non-ASCII content from messages/tool schemas and retry.
if _sanitize_messages_non_ascii(messages): _messages_sanitized = _sanitize_messages_non_ascii(messages)
_prefill_sanitized = False
if isinstance(getattr(self, "prefill_messages", None), list):
_prefill_sanitized = _sanitize_messages_non_ascii(self.prefill_messages)
_tools_sanitized = False
if isinstance(getattr(self, "tools", None), list):
_tools_sanitized = _sanitize_tools_non_ascii(self.tools)
_system_sanitized = False
if isinstance(active_system_prompt, str):
_sanitized_system = _strip_non_ascii(active_system_prompt)
if _sanitized_system != active_system_prompt:
active_system_prompt = _sanitized_system
self._cached_system_prompt = _sanitized_system
_system_sanitized = True
if isinstance(getattr(self, "ephemeral_system_prompt", None), str):
_sanitized_ephemeral = _strip_non_ascii(self.ephemeral_system_prompt)
if _sanitized_ephemeral != self.ephemeral_system_prompt:
self.ephemeral_system_prompt = _sanitized_ephemeral
_system_sanitized = True
_headers_sanitized = False
_default_headers = (
self._client_kwargs.get("default_headers")
if isinstance(getattr(self, "_client_kwargs", None), dict)
else None
)
if isinstance(_default_headers, dict):
_headers_sanitized = _sanitize_structure_non_ascii(_default_headers)
if (
_messages_sanitized
or _prefill_sanitized
or _tools_sanitized
or _system_sanitized
or _headers_sanitized
):
self._unicode_sanitization_passes += 1 self._unicode_sanitization_passes += 1
self._vprint( self._vprint(
f"{self.log_prefix}⚠️ System encoding is ASCII — stripped non-ASCII characters from messages. Retrying...", f"{self.log_prefix}⚠️ System encoding is ASCII — stripped non-ASCII characters from request payload. Retrying...",
force=True, force=True,
) )
continue continue
# Nothing to sanitize in messages — might be in system # Nothing to sanitize in any payload component.
# prompt or prefill. Fall through to normal error path. # Fall through to normal error path.
status_code = getattr(api_error, "status_code", None) status_code = getattr(api_error, "status_code", None)
error_context = self._extract_api_error_context(api_error) error_context = self._extract_api_error_context(api_error)
@ -8779,6 +9042,9 @@ class AIAgent:
retry_count += 1 retry_count += 1
elapsed_time = time.time() - api_start_time elapsed_time = time.time() - api_start_time
self._touch_activity(
f"API error recovery (attempt {retry_count}/{max_retries})"
)
error_type = type(api_error).__name__ error_type = type(api_error).__name__
error_msg = str(api_error).lower() error_msg = str(api_error).lower()
@ -9305,6 +9571,7 @@ class AIAgent:
# Sleep in small increments so we can respond to interrupts quickly # Sleep in small increments so we can respond to interrupts quickly
# instead of blocking the entire wait_time in one sleep() call # instead of blocking the entire wait_time in one sleep() call
sleep_end = time.time() + wait_time sleep_end = time.time() + wait_time
_backoff_touch_counter = 0
while time.time() < sleep_end: while time.time() < sleep_end:
if self._interrupt_requested: if self._interrupt_requested:
self._vprint(f"{self.log_prefix}⚡ Interrupt detected during retry wait, aborting.", force=True) self._vprint(f"{self.log_prefix}⚡ Interrupt detected during retry wait, aborting.", force=True)
@ -9318,6 +9585,14 @@ class AIAgent:
"interrupted": True, "interrupted": True,
} }
time.sleep(0.2) # Check interrupt every 200ms time.sleep(0.2) # Check interrupt every 200ms
# Touch activity every ~30s so the gateway's inactivity
# monitor knows we're alive during backoff waits.
_backoff_touch_counter += 1
if _backoff_touch_counter % 150 == 0: # 150 × 0.2s = 30s
self._touch_activity(
f"error retry backoff ({retry_count}/{max_retries}), "
f"{int(sleep_end - time.time())}s remaining"
)
# If the API call was interrupted, skip response processing # If the API call was interrupted, skip response processing
if interrupted: if interrupted:
@ -9703,12 +9978,25 @@ class AIAgent:
# Pop thinking-only prefill message(s) before appending # Pop thinking-only prefill message(s) before appending
# (tool-call path — same rationale as the final-response path). # (tool-call path — same rationale as the final-response path).
_had_prefill = False
while ( while (
messages messages
and isinstance(messages[-1], dict) and isinstance(messages[-1], dict)
and messages[-1].get("_thinking_prefill") and messages[-1].get("_thinking_prefill")
): ):
messages.pop() messages.pop()
_had_prefill = True
# Reset prefill counter when tool calls follow a prefill
# recovery. Without this, the counter accumulates across
# the whole conversation — a model that intermittently
# empties (empty → prefill → tools → empty → prefill →
# tools) burns both prefill attempts and the third empty
# gets zero recovery. Resetting here treats each tool-
# call success as a fresh start.
if _had_prefill:
self._thinking_prefill_retries = 0
self._empty_content_retries = 0
messages.append(assistant_msg) messages.append(assistant_msg)
self._emit_interim_assistant_message(assistant_msg) self._emit_interim_assistant_message(assistant_msg)
@ -9827,6 +10115,30 @@ class AIAgent:
# Check if response only has think block with no actual content after it # Check if response only has think block with no actual content after it
if not self._has_content_after_think_block(final_response): if not self._has_content_after_think_block(final_response):
# ── Partial stream recovery ─────────────────────
# If content was already streamed to the user before
# the connection died, use it as the final response
# instead of falling through to prior-turn fallback
# or wasting API calls on retries.
_partial_streamed = (
getattr(self, "_current_streamed_assistant_text", "") or ""
)
if self._has_content_after_think_block(_partial_streamed):
_turn_exit_reason = "partial_stream_recovery"
_recovered = self._strip_think_blocks(_partial_streamed).strip()
logger.info(
"Partial stream content delivered (%d chars) "
"— using as final response",
len(_recovered),
)
self._emit_status(
"↻ Stream interrupted — using delivered content "
"as final response"
)
final_response = _recovered
self._response_was_previewed = True
break
# If the previous turn already delivered real content alongside # If the previous turn already delivered real content alongside
# tool calls (e.g. "You're welcome!" + memory save), the model # tool calls (e.g. "You're welcome!" + memory save), the model
# has nothing more to say. Use the earlier content immediately # has nothing more to say. Use the earlier content immediately
@ -9884,16 +10196,23 @@ class AIAgent:
self._save_session_log(messages) self._save_session_log(messages)
continue continue
# ── Empty response retry (no reasoning) ────── # ── Empty response retry ──────────────────────
# Model returned nothing — no content, no # Model returned nothing usable. Retry up to 3
# structured reasoning, no tool calls. Common # times before attempting fallback. This covers
# with open models (transient provider issues, # both truly empty responses (no content, no
# rate limits, sampling flukes). Retry up to 3 # reasoning) AND reasoning-only responses after
# times before attempting fallback. Skip when # prefill exhaustion — models like mimo-v2-pro
# content has inline <think> tags (model chose # always populate reasoning fields via OpenRouter,
# to reason, just no visible text). # so the old `not _has_structured` guard blocked
_truly_empty = not final_response.strip() # retries for every reasoning model after prefill.
if _truly_empty and not _has_structured and self._empty_content_retries < 3: _truly_empty = not self._strip_think_blocks(
final_response
).strip()
_prefill_exhausted = (
_has_structured
and self._thinking_prefill_retries >= 2
)
if _truly_empty and (not _has_structured or _prefill_exhausted) and self._empty_content_retries < 3:
self._empty_content_retries += 1 self._empty_content_retries += 1
logger.warning( logger.warning(
"Empty response (no content or reasoning) — " "Empty response (no content or reasoning) — "
@ -10087,17 +10406,11 @@ class AIAgent:
if final_response is None and ( if final_response is None and (
api_call_count >= self.max_iterations api_call_count >= self.max_iterations
or self.iteration_budget.remaining <= 0 or self.iteration_budget.remaining <= 0
) and not self._budget_exhausted_injected: ):
# Budget exhausted but we haven't tried asking the model to # Budget exhausted — ask the model for a summary via one extra
# summarise yet. Inject a user message and give it one grace # API call with tools stripped. _handle_max_iterations injects a
# API call to produce a text response. # user message and makes a single toolless request.
self._budget_exhausted_injected = True _turn_exit_reason = f"max_iterations_reached({api_call_count}/{self.max_iterations})"
self._budget_grace_call = True
_grace_msg = (
"Your tool budget ran out. Please give me the information "
"or actions you've completed so far."
)
messages.append({"role": "user", "content": _grace_msg})
self._emit_status( self._emit_status(
f"⚠️ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) " f"⚠️ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) "
"— asking model to summarise" "— asking model to summarise"
@ -10107,14 +10420,6 @@ class AIAgent:
f"\n⚠️ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) " f"\n⚠️ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) "
"— requesting summary..." "— requesting summary..."
) )
if final_response is None and (
api_call_count >= self.max_iterations
or self.iteration_budget.remaining <= 0
) and not self._budget_grace_call:
_turn_exit_reason = f"max_iterations_reached({api_call_count}/{self.max_iterations})"
if self.iteration_budget.remaining <= 0 and not self.quiet_mode:
print(f"\n⚠️ Iteration budget exhausted ({self.iteration_budget.used}/{self.iteration_budget.max_total} iterations used)")
final_response = self._handle_max_iterations(messages, api_call_count) final_response = self._handle_max_iterations(messages, api_call_count)
# Determine if conversation completed successfully # Determine if conversation completed successfully

View file

@ -0,0 +1,325 @@
#!/usr/bin/env python3
"""Build the Hermes Skills Index — a centralized JSON catalog of all skills.
This script crawls every skill source (skills.sh, GitHub taps, official,
clawhub, lobehub, claude-marketplace) and writes a JSON index with resolved
GitHub paths. The index is served as a static file on the docs site so that
`hermes skills search/install` can use it without hitting the GitHub API.
Usage:
# Local (uses gh CLI or GITHUB_TOKEN for auth)
python scripts/build_skills_index.py
# CI (set GITHUB_TOKEN as secret)
GITHUB_TOKEN=ghp_... python scripts/build_skills_index.py
Output: website/static/api/skills-index.json
"""
import json
import os
import sys
import time
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timezone
# Allow importing from repo root
REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, REPO_ROOT)
# Ensure HERMES_HOME is set (needed by tools/skills_hub.py imports)
os.environ.setdefault("HERMES_HOME", os.path.join(os.path.expanduser("~"), ".hermes"))
from tools.skills_hub import (
GitHubAuth,
GitHubSource,
SkillsShSource,
OptionalSkillSource,
WellKnownSkillSource,
ClawHubSource,
ClaudeMarketplaceSource,
LobeHubSource,
SkillMeta,
)
import httpx
OUTPUT_PATH = os.path.join(REPO_ROOT, "website", "static", "api", "skills-index.json")
INDEX_VERSION = 1
def _meta_to_dict(meta: SkillMeta) -> dict:
"""Convert a SkillMeta to a serializable dict."""
return {
"name": meta.name,
"description": meta.description,
"source": meta.source,
"identifier": meta.identifier,
"trust_level": meta.trust_level,
"repo": meta.repo or "",
"path": meta.path or "",
"tags": meta.tags or [],
"extra": meta.extra or {},
}
def crawl_source(source, source_name: str, limit: int) -> list:
"""Crawl a single source and return skill dicts."""
print(f" Crawling {source_name}...", flush=True)
start = time.time()
try:
results = source.search("", limit=limit)
except Exception as e:
print(f" Error crawling {source_name}: {e}", file=sys.stderr)
return []
skills = [_meta_to_dict(m) for m in results]
elapsed = time.time() - start
print(f" {source_name}: {len(skills)} skills ({elapsed:.1f}s)", flush=True)
return skills
def crawl_skills_sh(source: SkillsShSource) -> list:
"""Crawl skills.sh using popular queries for broad coverage."""
print(" Crawling skills.sh (popular queries)...", flush=True)
start = time.time()
queries = [
"", # featured
"react", "python", "web", "api", "database", "docker",
"testing", "scraping", "design", "typescript", "git",
"aws", "security", "data", "ml", "ai", "devops",
"frontend", "backend", "mobile", "cli", "documentation",
"kubernetes", "terraform", "rust", "go", "java",
]
all_skills: dict[str, dict] = {}
for query in queries:
try:
results = source.search(query, limit=50)
for meta in results:
entry = _meta_to_dict(meta)
if entry["identifier"] not in all_skills:
all_skills[entry["identifier"]] = entry
except Exception as e:
print(f" Warning: skills.sh search '{query}' failed: {e}",
file=sys.stderr)
elapsed = time.time() - start
print(f" skills.sh: {len(all_skills)} unique skills ({elapsed:.1f}s)",
flush=True)
return list(all_skills.values())
def _fetch_repo_tree(repo: str, auth: GitHubAuth) -> list:
"""Fetch the recursive tree for a repo. Returns list of tree entries."""
headers = auth.get_headers()
try:
resp = httpx.get(
f"https://api.github.com/repos/{repo}",
headers=headers, timeout=15, follow_redirects=True,
)
if resp.status_code != 200:
return []
branch = resp.json().get("default_branch", "main")
resp = httpx.get(
f"https://api.github.com/repos/{repo}/git/trees/{branch}",
params={"recursive": "1"},
headers=headers, timeout=30, follow_redirects=True,
)
if resp.status_code != 200:
return []
data = resp.json()
if data.get("truncated"):
return []
return data.get("tree", [])
except Exception:
return []
def batch_resolve_paths(skills: list, auth: GitHubAuth) -> list:
"""Resolve GitHub paths for skills.sh entries using batch tree lookups.
Instead of resolving each skill individually (N×M API calls), we:
1. Group skills by repo
2. Fetch one tree per repo (2 API calls per repo)
3. Find all SKILL.md files in the tree
4. Match skills to their resolved paths
"""
# Filter to skills.sh entries that need resolution
skills_sh = [s for s in skills if s["source"] in ("skills.sh", "skills-sh")]
if not skills_sh:
return skills
print(f" Resolving paths for {len(skills_sh)} skills.sh entries...",
flush=True)
start = time.time()
# Group by repo
by_repo: dict[str, list] = defaultdict(list)
for s in skills_sh:
repo = s.get("repo", "")
if repo:
by_repo[repo].append(s)
print(f" {len(by_repo)} unique repos to scan", flush=True)
resolved_count = 0
# Fetch trees in parallel (up to 6 concurrent)
def _resolve_repo(repo: str, entries: list):
tree = _fetch_repo_tree(repo, auth)
if not tree:
return 0
# Find all SKILL.md paths in this repo
skill_paths = {} # skill_dir_name -> full_path
for item in tree:
if item.get("type") != "blob":
continue
path = item.get("path", "")
if path.endswith("/SKILL.md"):
skill_dir = path[: -len("/SKILL.md")]
dir_name = skill_dir.split("/")[-1]
skill_paths[dir_name.lower()] = f"{repo}/{skill_dir}"
# Also check SKILL.md frontmatter name if we can match by path
# For now, just index by directory name
elif path == "SKILL.md":
# Root-level SKILL.md
skill_paths["_root_"] = f"{repo}"
count = 0
for entry in entries:
# Try to match the skill's name/path to a tree entry
skill_name = entry.get("name", "").lower()
skill_path = entry.get("path", "").lower()
identifier = entry.get("identifier", "")
# Extract the skill token from the identifier
# e.g. "skills-sh/d4vinci/scrapling/scrapling-official" -> "scrapling-official"
parts = identifier.replace("skills-sh/", "").replace("skills.sh/", "")
skill_token = parts.split("/")[-1].lower() if "/" in parts else ""
# Try matching in order of likelihood
for candidate in [skill_token, skill_name, skill_path]:
if not candidate:
continue
matched = skill_paths.get(candidate)
if matched:
entry["resolved_github_id"] = matched
count += 1
break
else:
# Try fuzzy: skill_token with common transformations
for tree_name, tree_path in skill_paths.items():
if (skill_token and (
tree_name.replace("-", "") == skill_token.replace("-", "")
or skill_token in tree_name
or tree_name in skill_token
)):
entry["resolved_github_id"] = tree_path
count += 1
break
return count
with ThreadPoolExecutor(max_workers=6) as pool:
futures = {
pool.submit(_resolve_repo, repo, entries): repo
for repo, entries in by_repo.items()
}
for future in as_completed(futures):
try:
resolved_count += future.result()
except Exception as e:
repo = futures[future]
print(f" Warning: {repo}: {e}", file=sys.stderr)
elapsed = time.time() - start
print(f" Resolved {resolved_count}/{len(skills_sh)} paths ({elapsed:.1f}s)",
flush=True)
return skills
def main():
print("Building Hermes Skills Index...", flush=True)
overall_start = time.time()
auth = GitHubAuth()
print(f"GitHub auth: {auth.auth_method()}")
if auth.auth_method() == "anonymous":
print("WARNING: No GitHub authentication — rate limit is 60/hr. "
"Set GITHUB_TOKEN for better results.", file=sys.stderr)
skills_sh_source = SkillsShSource(auth=auth)
sources = {
"official": OptionalSkillSource(),
"well-known": WellKnownSkillSource(),
"github": GitHubSource(auth=auth),
"clawhub": ClawHubSource(),
"claude-marketplace": ClaudeMarketplaceSource(auth=auth),
"lobehub": LobeHubSource(),
}
all_skills: list[dict] = []
# Crawl skills.sh
all_skills.extend(crawl_skills_sh(skills_sh_source))
# Crawl other sources in parallel
with ThreadPoolExecutor(max_workers=4) as pool:
futures = {}
for name, source in sources.items():
futures[pool.submit(crawl_source, source, name, 500)] = name
for future in as_completed(futures):
try:
all_skills.extend(future.result())
except Exception as e:
print(f" Error: {e}", file=sys.stderr)
# Batch resolve GitHub paths for skills.sh entries
all_skills = batch_resolve_paths(all_skills, auth)
# Deduplicate by identifier
seen: dict[str, dict] = {}
for skill in all_skills:
key = skill["identifier"]
if key not in seen:
seen[key] = skill
deduped = list(seen.values())
# Sort
source_order = {"official": 0, "skills-sh": 1, "skills.sh": 1,
"github": 2, "well-known": 3, "clawhub": 4,
"claude-marketplace": 5, "lobehub": 6}
deduped.sort(key=lambda s: (source_order.get(s["source"], 99), s["name"]))
# Build index
index = {
"version": INDEX_VERSION,
"generated_at": datetime.now(timezone.utc).isoformat(),
"skill_count": len(deduped),
"skills": deduped,
}
os.makedirs(os.path.dirname(OUTPUT_PATH), exist_ok=True)
with open(OUTPUT_PATH, "w") as f:
json.dump(index, f, separators=(",", ":"), ensure_ascii=False)
elapsed = time.time() - overall_start
file_size = os.path.getsize(OUTPUT_PATH)
print(f"\nDone! {len(deduped)} skills indexed in {elapsed:.0f}s")
print(f"Output: {OUTPUT_PATH} ({file_size / 1024:.0f} KB)")
from collections import Counter
by_source = Counter(s["source"] for s in deduped)
for src, count in sorted(by_source.items(), key=lambda x: -x[1]):
resolved = sum(1 for s in deduped
if s["source"] == src and s.get("resolved_github_id"))
extra = f" ({resolved} resolved)" if resolved else ""
print(f" {src}: {count}{extra}")
if __name__ == "__main__":
main()

View file

@ -15,9 +15,9 @@
} }
}, },
"node_modules/@borewit/text-codec": { "node_modules/@borewit/text-codec": {
"version": "0.2.1", "version": "0.2.2",
"resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.2.1.tgz", "resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.2.2.tgz",
"integrity": "sha512-k7vvKPbf7J2fZ5klGRD9AeKfUvojuZIQ3BT5u7Jfv+puwXkUBUT5PVyMDfJZpy30CBDXGMgw7fguK/lpOMBvgw==", "integrity": "sha512-DDaRehssg1aNrH4+2hnj1B7vnUGEjU6OIlyRdkMd0aUdIUvKXrJfXsy8LVtXAy7DRvYVluWbMspsRhz2lcW0mQ==",
"license": "MIT", "license": "MIT",
"funding": { "funding": {
"type": "github", "type": "github",
@ -1088,9 +1088,9 @@
} }
}, },
"node_modules/file-type": { "node_modules/file-type": {
"version": "21.3.0", "version": "21.3.4",
"resolved": "https://registry.npmjs.org/file-type/-/file-type-21.3.0.tgz", "resolved": "https://registry.npmjs.org/file-type/-/file-type-21.3.4.tgz",
"integrity": "sha512-8kPJMIGz1Yt/aPEwOsrR97ZyZaD1Iqm8PClb1nYFclUCkBi0Ma5IsYNQzvSFS9ib51lWyIw5mIT9rWzI/xjpzA==", "integrity": "sha512-Ievi/yy8DS3ygGvT47PjSfdFoX+2isQueoYP1cntFW1JLYAuS4GD7NUPGg4zv2iZfV52uDyk5w5Z0TdpRS6Q1g==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@tokenizer/inflate": "^0.4.1", "@tokenizer/inflate": "^0.4.1",
@ -1456,9 +1456,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/music-metadata": { "node_modules/music-metadata": {
"version": "11.12.1", "version": "11.12.3",
"resolved": "https://registry.npmjs.org/music-metadata/-/music-metadata-11.12.1.tgz", "resolved": "https://registry.npmjs.org/music-metadata/-/music-metadata-11.12.3.tgz",
"integrity": "sha512-j++ltLxHDb5VCXET9FzQ8bnueiLHwQKgCO7vcbkRH/3F7fRjPkv6qncGEJ47yFhmemcYtgvsOAlcQ1dRBTkDjg==", "integrity": "sha512-n6hSTZkuD59qWgHh6IP5dtDlDZQXoxk/bcA85Jywg8Z1iFrlNgl2+GTFgjZyn52W5UgQpV42V4XqrQZZAMbZTQ==",
"funding": [ "funding": [
{ {
"type": "github", "type": "github",
@ -1471,11 +1471,11 @@
], ],
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@borewit/text-codec": "^0.2.1", "@borewit/text-codec": "^0.2.2",
"@tokenizer/token": "^0.3.0", "@tokenizer/token": "^0.3.0",
"content-type": "^1.0.5", "content-type": "^1.0.5",
"debug": "^4.4.3", "debug": "^4.4.3",
"file-type": "^21.3.0", "file-type": "^21.3.1",
"media-typer": "^1.1.0", "media-typer": "^1.1.0",
"strtok3": "^10.3.4", "strtok3": "^10.3.4",
"token-types": "^6.1.2", "token-types": "^6.1.2",
@ -1589,9 +1589,9 @@
} }
}, },
"node_modules/path-to-regexp": { "node_modules/path-to-regexp": {
"version": "0.1.12", "version": "0.1.13",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz",
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", "integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/pino": { "node_modules/pino": {
@ -2002,9 +2002,9 @@
} }
}, },
"node_modules/strtok3": { "node_modules/strtok3": {
"version": "10.3.4", "version": "10.3.5",
"resolved": "https://registry.npmjs.org/strtok3/-/strtok3-10.3.4.tgz", "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-10.3.5.tgz",
"integrity": "sha512-KIy5nylvC5le1OdaaoCJ07L+8iQzJHGH6pWDuzS+d07Cu7n1MZ2x26P8ZKIWfbK02+XIL8Mp4RkWeqdUCrDMfg==", "integrity": "sha512-ki4hZQfh5rX0QDLLkOCj+h+CVNkqmp/CMf8v8kZpkNVK6jGQooMytqzLZYUVYIZcFZ6yDB70EfD8POcFXiF5oA==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@tokenizer/token": "^0.3.0" "@tokenizer/token": "^0.3.0"

View file

@ -19,7 +19,7 @@ What makes Hermes different:
- **Self-improving through skills** — Hermes learns from experience by saving reusable procedures as skills. When it solves a complex problem, discovers a workflow, or gets corrected, it can persist that knowledge as a skill document that loads into future sessions. Skills accumulate over time, making the agent better at your specific tasks and environment. - **Self-improving through skills** — Hermes learns from experience by saving reusable procedures as skills. When it solves a complex problem, discovers a workflow, or gets corrected, it can persist that knowledge as a skill document that loads into future sessions. Skills accumulate over time, making the agent better at your specific tasks and environment.
- **Persistent memory across sessions** — remembers who you are, your preferences, environment details, and lessons learned. Pluggable memory backends (built-in, Honcho, Mem0, and more) let you choose how memory works. - **Persistent memory across sessions** — remembers who you are, your preferences, environment details, and lessons learned. Pluggable memory backends (built-in, Honcho, Mem0, and more) let you choose how memory works.
- **Multi-platform gateway** — the same agent runs on Telegram, Discord, Slack, WhatsApp, Signal, Matrix, Email, and 8+ other platforms with full tool access, not just chat. - **Multi-platform gateway** — the same agent runs on Telegram, Discord, Slack, WhatsApp, Signal, Matrix, Email, and 10+ other platforms with full tool access, not just chat.
- **Provider-agnostic** — swap models and providers mid-workflow without changing anything else. Credential pools rotate across multiple API keys automatically. - **Provider-agnostic** — swap models and providers mid-workflow without changing anything else. Credential pools rotate across multiple API keys automatically.
- **Profiles** — run multiple independent Hermes instances with isolated configs, sessions, skills, and memory. - **Profiles** — run multiple independent Hermes instances with isolated configs, sessions, skills, and memory.
- **Extensible** — plugins, MCP servers, custom tools, webhook triggers, cron scheduling, and the full Python ecosystem. - **Extensible** — plugins, MCP servers, custom tools, webhook triggers, cron scheduling, and the full Python ecosystem.
@ -148,7 +148,7 @@ hermes gateway status Check status
hermes gateway setup Configure platforms hermes gateway setup Configure platforms
``` ```
Supported platforms: Telegram, Discord, Slack, WhatsApp, Signal, Email, SMS, Matrix, Mattermost, Home Assistant, DingTalk, Feishu, WeCom, API Server, Webhooks, Open WebUI. Supported platforms: Telegram, Discord, Slack, WhatsApp, Signal, Email, SMS, Matrix, Mattermost, Home Assistant, DingTalk, Feishu, WeCom, BlueBubbles (iMessage), Weixin (WeChat), API Server, Webhooks. Open WebUI connects via the API Server adapter.
Platform docs: https://hermes-agent.nousresearch.com/docs/user-guide/messaging/ Platform docs: https://hermes-agent.nousresearch.com/docs/user-guide/messaging/
@ -215,7 +215,7 @@ hermes insights [--days N] Usage analytics
hermes update Update to latest version hermes update Update to latest version
hermes pairing list/approve/revoke DM authorization hermes pairing list/approve/revoke DM authorization
hermes plugins list/install/remove Plugin management hermes plugins list/install/remove Plugin management
hermes honcho setup/status Honcho memory integration hermes honcho setup/status Honcho memory integration (requires honcho plugin)
hermes memory setup/status/off Memory provider config hermes memory setup/status/off Memory provider config
hermes completion bash|zsh Shell completions hermes completion bash|zsh Shell completions
hermes acp ACP server (IDE integration) hermes acp ACP server (IDE integration)
@ -269,6 +269,28 @@ Type these during an interactive chat session.
/plugins List plugins (CLI) /plugins List plugins (CLI)
``` ```
### Gateway
```
/approve Approve a pending command (gateway)
/deny Deny a pending command (gateway)
/restart Restart gateway (gateway)
/sethome Set current chat as home channel (gateway)
/update Update Hermes to latest (gateway)
/platforms (/gateway) Show platform connection status (gateway)
```
### Utility
```
/branch (/fork) Branch the current session
/btw Ephemeral side question (doesn't interrupt main task)
/fast Toggle priority/fast processing
/browser Open CDP browser connection
/history Show conversation history (CLI)
/save Save conversation to file (CLI)
/paste Attach clipboard image (CLI)
/image Attach local image file (CLI)
```
### Info ### Info
``` ```
/help Show commands /help Show commands
@ -311,11 +333,11 @@ Edit with `hermes config edit` or `hermes config set section.key value`.
| `terminal` | `backend` (local/docker/ssh/modal), `cwd`, `timeout` (180) | | `terminal` | `backend` (local/docker/ssh/modal), `cwd`, `timeout` (180) |
| `compression` | `enabled`, `threshold` (0.50), `target_ratio` (0.20) | | `compression` | `enabled`, `threshold` (0.50), `target_ratio` (0.20) |
| `display` | `skin`, `tool_progress`, `show_reasoning`, `show_cost` | | `display` | `skin`, `tool_progress`, `show_reasoning`, `show_cost` |
| `stt` | `enabled`, `provider` (local/groq/openai) | | `stt` | `enabled`, `provider` (local/groq/openai/mistral) |
| `tts` | `provider` (edge/elevenlabs/openai/kokoro/fish) | | `tts` | `provider` (edge/elevenlabs/openai/minimax/mistral/neutts) |
| `memory` | `memory_enabled`, `user_profile_enabled`, `provider` | | `memory` | `memory_enabled`, `user_profile_enabled`, `provider` |
| `security` | `tirith_enabled`, `website_blocklist` | | `security` | `tirith_enabled`, `website_blocklist` |
| `delegation` | `model`, `provider`, `max_iterations` (50) | | `delegation` | `model`, `provider`, `base_url`, `api_key`, `max_iterations` (50), `reasoning_effort` |
| `smart_model_routing` | `enabled`, `cheap_model` | | `smart_model_routing` | `enabled`, `cheap_model` |
| `checkpoints` | `enabled`, `max_snapshots` (50) | | `checkpoints` | `enabled`, `max_snapshots` (50) |
@ -323,7 +345,7 @@ Full config reference: https://hermes-agent.nousresearch.com/docs/user-guide/con
### Providers ### Providers
18 providers supported. Set via `hermes model` or `hermes setup`. 20+ providers supported. Set via `hermes model` or `hermes setup`.
| Provider | Auth | Key env var | | Provider | Auth | Key env var |
|----------|------|-------------| |----------|------|-------------|
@ -332,16 +354,23 @@ Full config reference: https://hermes-agent.nousresearch.com/docs/user-guide/con
| Nous Portal | OAuth | `hermes login --provider nous` | | Nous Portal | OAuth | `hermes login --provider nous` |
| OpenAI Codex | OAuth | `hermes login --provider openai-codex` | | OpenAI Codex | OAuth | `hermes login --provider openai-codex` |
| GitHub Copilot | Token | `COPILOT_GITHUB_TOKEN` | | GitHub Copilot | Token | `COPILOT_GITHUB_TOKEN` |
| Google Gemini | API key | `GOOGLE_API_KEY` or `GEMINI_API_KEY` |
| DeepSeek | API key | `DEEPSEEK_API_KEY` | | DeepSeek | API key | `DEEPSEEK_API_KEY` |
| xAI / Grok | API key | `XAI_API_KEY` |
| Hugging Face | Token | `HF_TOKEN` | | Hugging Face | Token | `HF_TOKEN` |
| Z.AI / GLM | API key | `GLM_API_KEY` | | Z.AI / GLM | API key | `GLM_API_KEY` |
| MiniMax | API key | `MINIMAX_API_KEY` | | MiniMax | API key | `MINIMAX_API_KEY` |
| MiniMax CN | API key | `MINIMAX_CN_API_KEY` |
| Kimi / Moonshot | API key | `KIMI_API_KEY` | | Kimi / Moonshot | API key | `KIMI_API_KEY` |
| Alibaba / DashScope | API key | `DASHSCOPE_API_KEY` | | Alibaba / DashScope | API key | `DASHSCOPE_API_KEY` |
| Xiaomi MiMo | API key | `XIAOMI_API_KEY` |
| Kilo Code | API key | `KILOCODE_API_KEY` | | Kilo Code | API key | `KILOCODE_API_KEY` |
| AI Gateway (Vercel) | API key | `AI_GATEWAY_API_KEY` |
| OpenCode Zen | API key | `OPENCODE_ZEN_API_KEY` |
| OpenCode Go | API key | `OPENCODE_GO_API_KEY` |
| Qwen OAuth | OAuth | `hermes login --provider qwen-oauth` |
| Custom endpoint | Config | `model.base_url` + `model.api_key` in config.yaml | | Custom endpoint | Config | `model.base_url` + `model.api_key` in config.yaml |
| GitHub Copilot ACP | External | `COPILOT_CLI_PATH` or Copilot CLI |
Plus: AI Gateway, OpenCode Zen, OpenCode Go, MiniMax CN, GitHub Copilot ACP.
Full provider docs: https://hermes-agent.nousresearch.com/docs/integrations/providers Full provider docs: https://hermes-agent.nousresearch.com/docs/integrations/providers
@ -365,6 +394,10 @@ Enable/disable via `hermes tools` (interactive) or `hermes tools enable/disable
| `delegation` | Subagent task delegation | | `delegation` | Subagent task delegation |
| `cronjob` | Scheduled task management | | `cronjob` | Scheduled task management |
| `clarify` | Ask user clarifying questions | | `clarify` | Ask user clarifying questions |
| `messaging` | Cross-platform message sending |
| `search` | Web search only (subset of `web`) |
| `todo` | In-session task planning and tracking |
| `rl` | Reinforcement learning tools (off by default) |
| `moa` | Mixture of Agents (off by default) | | `moa` | Mixture of Agents (off by default) |
| `homeassistant` | Smart home control (off by default) | | `homeassistant` | Smart home control (off by default) |
@ -382,12 +415,13 @@ Provider priority (auto-detected):
1. **Local faster-whisper** — free, no API key: `pip install faster-whisper` 1. **Local faster-whisper** — free, no API key: `pip install faster-whisper`
2. **Groq Whisper** — free tier: set `GROQ_API_KEY` 2. **Groq Whisper** — free tier: set `GROQ_API_KEY`
3. **OpenAI Whisper** — paid: set `VOICE_TOOLS_OPENAI_KEY` 3. **OpenAI Whisper** — paid: set `VOICE_TOOLS_OPENAI_KEY`
4. **Mistral Voxtral** — set `MISTRAL_API_KEY`
Config: Config:
```yaml ```yaml
stt: stt:
enabled: true enabled: true
provider: local # local, groq, openai provider: local # local, groq, openai, mistral
local: local:
model: base # tiny, base, small, medium, large-v3 model: base # tiny, base, small, medium, large-v3
``` ```
@ -399,8 +433,9 @@ stt:
| Edge TTS | None | Yes (default) | | Edge TTS | None | Yes (default) |
| ElevenLabs | `ELEVENLABS_API_KEY` | Free tier | | ElevenLabs | `ELEVENLABS_API_KEY` | Free tier |
| OpenAI | `VOICE_TOOLS_OPENAI_KEY` | Paid | | OpenAI | `VOICE_TOOLS_OPENAI_KEY` | Paid |
| Kokoro (local) | None | Free | | MiniMax | `MINIMAX_API_KEY` | Paid |
| Fish Audio | `FISH_AUDIO_API_KEY` | Free tier | | Mistral (Voxtral) | `MISTRAL_API_KEY` | Paid |
| NeuTTS (local) | None (`pip install neutts[all]` + `espeak-ng`) | Free |
Voice commands: `/voice on` (voice-to-voice), `/voice tts` (always voice), `/voice off`. Voice commands: `/voice on` (voice-to-voice), `/voice tts` (always voice), `/voice off`.
@ -492,7 +527,7 @@ terminal(command="tmux new-session -d -s resumed 'hermes --resume 20260225_14305
### Voice not working ### Voice not working
1. Check `stt.enabled: true` in config.yaml 1. Check `stt.enabled: true` in config.yaml
2. Verify provider: `pip install faster-whisper` or set API key 2. Verify provider: `pip install faster-whisper` or set API key
3. Restart gateway: `/restart` 3. In gateway: `/restart`. In CLI: exit and relaunch.
### Tool not available ### Tool not available
1. `hermes tools` — check if toolset is enabled for your platform 1. `hermes tools` — check if toolset is enabled for your platform
@ -503,10 +538,11 @@ terminal(command="tmux new-session -d -s resumed 'hermes --resume 20260225_14305
1. `hermes doctor` — check config and dependencies 1. `hermes doctor` — check config and dependencies
2. `hermes login` — re-authenticate OAuth providers 2. `hermes login` — re-authenticate OAuth providers
3. Check `.env` has the right API key 3. Check `.env` has the right API key
4. **Copilot 403**: `gh auth login` tokens do NOT work for Copilot API. You must use the Copilot-specific OAuth device code flow via `hermes model` → GitHub Copilot.
### Changes not taking effect ### Changes not taking effect
- **Tools/skills:** `/reset` starts a new session with updated toolset - **Tools/skills:** `/reset` starts a new session with updated toolset
- **Config changes:** `/restart` reloads gateway config - **Config changes:** In gateway: `/restart`. In CLI: exit and relaunch.
- **Code changes:** Restart the CLI or gateway process - **Code changes:** Restart the CLI or gateway process
### Skills not showing ### Skills not showing
@ -520,6 +556,23 @@ Check logs first:
grep -i "failed to send\|error" ~/.hermes/logs/gateway.log | tail -20 grep -i "failed to send\|error" ~/.hermes/logs/gateway.log | tail -20
``` ```
Common gateway problems:
- **Gateway dies on SSH logout**: Enable linger: `sudo loginctl enable-linger $USER`
- **Gateway dies on WSL2 close**: WSL2 requires `systemd=true` in `/etc/wsl.conf` for systemd services to work. Without it, gateway falls back to `nohup` (dies when session closes).
- **Gateway crash loop**: Reset the failed state: `systemctl --user reset-failed hermes-gateway`
### Platform-specific issues
- **Discord bot silent**: Must enable **Message Content Intent** in Bot → Privileged Gateway Intents.
- **Slack bot only works in DMs**: Must subscribe to `message.channels` event. Without it, the bot ignores public channels.
- **Windows HTTP 400 "No models provided"**: Config file encoding issue (BOM). Ensure `config.yaml` is saved as UTF-8 without BOM.
### Auxiliary models not working
If `auxiliary` tasks (vision, compression, session_search) fail silently, the `auto` provider can't find a backend. Either set `OPENROUTER_API_KEY` or `GOOGLE_API_KEY`, or explicitly configure each auxiliary task's provider:
```bash
hermes config set auxiliary.vision.provider <your_provider>
hermes config set auxiliary.vision.model <model_name>
```
--- ---
## Where to Find Things ## Where to Find Things
@ -557,7 +610,7 @@ hermes-agent/
├── toolsets.py # Toolset definitions ├── toolsets.py # Toolset definitions
├── cli.py # Interactive CLI (HermesCLI) ├── cli.py # Interactive CLI (HermesCLI)
├── hermes_state.py # SQLite session store ├── hermes_state.py # SQLite session store
├── agent/ # Prompt builder, compression, display, adapters ├── agent/ # Prompt builder, context compression, memory, model routing, credential pooling, skill dispatch
├── hermes_cli/ # CLI subcommands, config, setup, commands ├── hermes_cli/ # CLI subcommands, config, setup, commands
│ ├── commands.py # Slash command registry (CommandDef) │ ├── commands.py # Slash command registry (CommandDef)
│ ├── config.py # DEFAULT_CONFIG, env var definitions │ ├── config.py # DEFAULT_CONFIG, env var definitions
@ -626,7 +679,6 @@ run_conversation():
### Testing ### Testing
```bash ```bash
source venv/bin/activate # or .venv/bin/activate
python -m pytest tests/ -o 'addopts=' -q # Full suite python -m pytest tests/ -o 'addopts=' -q # Full suite
python -m pytest tests/tools/ -q # Specific area python -m pytest tests/tools/ -q # Specific area
``` ```

View file

@ -820,6 +820,24 @@ Every successful ML paper centers on what Neel Nanda calls "the narrative": a sh
**If you cannot state your contribution in one sentence, you don't yet have a paper.** **If you cannot state your contribution in one sentence, you don't yet have a paper.**
### The Sources Behind This Guidance
This skill synthesizes writing philosophy from researchers who have published extensively at top venues. The writing philosophy layer was originally compiled by [Orchestra Research](https://github.com/orchestra-research) as the `ml-paper-writing` skill.
| Source | Key Contribution | Link |
|--------|-----------------|------|
| **Neel Nanda** (Google DeepMind) | The Narrative Principle, What/Why/So What framework | [How to Write ML Papers](https://www.alignmentforum.org/posts/eJGptPbbFPZGLpjsp/highly-opinionated-advice-on-how-to-write-ml-papers) |
| **Sebastian Farquhar** (DeepMind) | 5-sentence abstract formula | [How to Write ML Papers](https://sebastianfarquhar.com/on-research/2024/11/04/how_to_write_ml_papers/) |
| **Gopen & Swan** | 7 principles of reader expectations | [Science of Scientific Writing](https://cseweb.ucsd.edu/~swanson/papers/science-of-writing.pdf) |
| **Zachary Lipton** | Word choice, eliminating hedging | [Heuristics for Scientific Writing](https://www.approximatelycorrect.com/2018/01/29/heuristics-technical-scientific-writing-machine-learning-perspective/) |
| **Jacob Steinhardt** (UC Berkeley) | Precision, consistent terminology | [Writing Tips](https://bounded-regret.ghost.io/) |
| **Ethan Perez** (Anthropic) | Micro-level clarity tips | [Easy Paper Writing Tips](https://ethanperez.net/easy-paper-writing-tips/) |
| **Andrej Karpathy** | Single contribution focus | Various lectures |
**For deeper dives into any of these, see:**
- [references/writing-guide.md](references/writing-guide.md) — Full explanations with examples
- [references/sources.md](references/sources.md) — Complete bibliography
### Time Allocation ### Time Allocation
Spend approximately **equal time** on each of: Spend approximately **equal time** on each of:

View file

@ -4,6 +4,12 @@ This document lists all authoritative sources used to build this skill, organize
--- ---
## Origin & Attribution
The writing philosophy, citation verification workflow, and conference reference materials in this skill were originally compiled by **[Orchestra Research](https://github.com/orchestra-research)** as the `ml-paper-writing` skill (January 2026), drawing on Neel Nanda's blog post and other researcher guides listed below. The skill was integrated into hermes-agent by teknium (January 2026), then expanded into the current `research-paper-writing` pipeline by SHL0MS (April 2026, PR #4654), which added experiment design, execution monitoring, iterative refinement, and submission phases while preserving the original writing philosophy and reference files.
---
## Writing Philosophy & Guides ## Writing Philosophy & Guides
### Primary Sources (Must-Read) ### Primary Sources (Must-Read)

View file

@ -17,7 +17,6 @@ from agent.auxiliary_client import (
call_llm, call_llm,
async_call_llm, async_call_llm,
_read_codex_access_token, _read_codex_access_token,
_get_auxiliary_provider,
_get_provider_chain, _get_provider_chain,
_is_payment_error, _is_payment_error,
_try_payment_fallback, _try_payment_fallback,
@ -32,12 +31,6 @@ def _clean_env(monkeypatch):
"OPENROUTER_API_KEY", "OPENAI_BASE_URL", "OPENAI_API_KEY", "OPENROUTER_API_KEY", "OPENAI_BASE_URL", "OPENAI_API_KEY",
"OPENAI_MODEL", "LLM_MODEL", "NOUS_INFERENCE_BASE_URL", "OPENAI_MODEL", "LLM_MODEL", "NOUS_INFERENCE_BASE_URL",
"ANTHROPIC_API_KEY", "ANTHROPIC_TOKEN", "CLAUDE_CODE_OAUTH_TOKEN", "ANTHROPIC_API_KEY", "ANTHROPIC_TOKEN", "CLAUDE_CODE_OAUTH_TOKEN",
# Per-task provider/model/direct-endpoint overrides
"AUXILIARY_VISION_PROVIDER", "AUXILIARY_VISION_MODEL",
"AUXILIARY_VISION_BASE_URL", "AUXILIARY_VISION_API_KEY",
"AUXILIARY_WEB_EXTRACT_PROVIDER", "AUXILIARY_WEB_EXTRACT_MODEL",
"AUXILIARY_WEB_EXTRACT_BASE_URL", "AUXILIARY_WEB_EXTRACT_API_KEY",
"CONTEXT_COMPRESSION_PROVIDER", "CONTEXT_COMPRESSION_MODEL",
): ):
monkeypatch.delenv(key, raising=False) monkeypatch.delenv(key, raising=False)
@ -568,29 +561,6 @@ class TestGetTextAuxiliaryClient:
call_kwargs = mock_openai.call_args call_kwargs = mock_openai.call_args
assert call_kwargs.kwargs["base_url"] == "http://localhost:1234/v1" assert call_kwargs.kwargs["base_url"] == "http://localhost:1234/v1"
def test_task_direct_endpoint_override(self, monkeypatch):
monkeypatch.setenv("OPENROUTER_API_KEY", "or-key")
monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_BASE_URL", "http://localhost:2345/v1")
monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_API_KEY", "task-key")
monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_MODEL", "task-model")
with patch("agent.auxiliary_client.OpenAI") as mock_openai:
client, model = get_text_auxiliary_client("web_extract")
assert model == "task-model"
assert mock_openai.call_args.kwargs["base_url"] == "http://localhost:2345/v1"
assert mock_openai.call_args.kwargs["api_key"] == "task-key"
def test_task_direct_endpoint_without_openai_key_uses_placeholder(self, monkeypatch):
"""Local endpoints without an API key should use 'no-key-required' placeholder."""
monkeypatch.setenv("OPENROUTER_API_KEY", "or-key")
monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_BASE_URL", "http://localhost:2345/v1")
monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_MODEL", "task-model")
with patch("agent.auxiliary_client.OpenAI") as mock_openai:
client, model = get_text_auxiliary_client("web_extract")
assert client is not None
assert model == "task-model"
assert mock_openai.call_args.kwargs["api_key"] == "no-key-required"
assert mock_openai.call_args.kwargs["base_url"] == "http://localhost:2345/v1"
def test_custom_endpoint_uses_config_saved_base_url(self, monkeypatch): def test_custom_endpoint_uses_config_saved_base_url(self, monkeypatch):
config = { config = {
"model": { "model": {
@ -879,73 +849,9 @@ class TestAuxiliaryPoolAwareness:
class TestGetAuxiliaryProvider:
"""Tests for _get_auxiliary_provider env var resolution."""
def test_no_task_returns_auto(self):
assert _get_auxiliary_provider() == "auto"
assert _get_auxiliary_provider("") == "auto"
def test_auxiliary_prefix_takes_priority(self, monkeypatch):
monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "openrouter")
assert _get_auxiliary_provider("vision") == "openrouter"
def test_context_prefix_fallback(self, monkeypatch):
monkeypatch.setenv("CONTEXT_COMPRESSION_PROVIDER", "nous")
assert _get_auxiliary_provider("compression") == "nous"
def test_auxiliary_prefix_over_context_prefix(self, monkeypatch):
monkeypatch.setenv("AUXILIARY_COMPRESSION_PROVIDER", "openrouter")
monkeypatch.setenv("CONTEXT_COMPRESSION_PROVIDER", "nous")
assert _get_auxiliary_provider("compression") == "openrouter"
def test_auto_value_treated_as_auto(self, monkeypatch):
monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "auto")
assert _get_auxiliary_provider("vision") == "auto"
def test_whitespace_stripped(self, monkeypatch):
monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", " openrouter ")
assert _get_auxiliary_provider("vision") == "openrouter"
def test_case_insensitive(self, monkeypatch):
monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "OpenRouter")
assert _get_auxiliary_provider("vision") == "openrouter"
def test_main_provider(self, monkeypatch):
monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_PROVIDER", "main")
assert _get_auxiliary_provider("web_extract") == "main"
class TestTaskSpecificOverrides: class TestTaskSpecificOverrides:
"""Integration tests for per-task provider routing via get_text_auxiliary_client(task=...).""" """Integration tests for per-task provider routing via get_text_auxiliary_client(task=...)."""
def test_text_with_vision_provider_override(self, monkeypatch):
"""AUXILIARY_VISION_PROVIDER should not affect text tasks."""
monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "nous")
monkeypatch.setenv("OPENROUTER_API_KEY", "or-key")
with patch("agent.auxiliary_client.OpenAI"):
client, model = get_text_auxiliary_client() # no task → auto
assert model == "google/gemini-3-flash-preview" # OpenRouter, not Nous
def test_compression_task_reads_context_prefix(self, monkeypatch):
"""Compression task should check CONTEXT_COMPRESSION_PROVIDER env var."""
monkeypatch.setenv("CONTEXT_COMPRESSION_PROVIDER", "nous")
monkeypatch.setenv("OPENROUTER_API_KEY", "or-key") # would win in auto
with patch("agent.auxiliary_client._read_nous_auth") as mock_nous, \
patch("agent.auxiliary_client.OpenAI"):
mock_nous.return_value = {"access_token": "***"}
client, model = get_text_auxiliary_client("compression")
# Config-first: model comes from config.yaml summary_model default,
# but provider is forced to Nous via env var
assert client is not None
def test_web_extract_task_override(self, monkeypatch):
monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_PROVIDER", "openrouter")
monkeypatch.setenv("OPENROUTER_API_KEY", "or-key")
with patch("agent.auxiliary_client.OpenAI"):
client, model = get_text_auxiliary_client("web_extract")
assert model == "google/gemini-3-flash-preview"
def test_task_direct_endpoint_from_config(self, monkeypatch, tmp_path): def test_task_direct_endpoint_from_config(self, monkeypatch, tmp_path):
hermes_home = tmp_path / "hermes" hermes_home = tmp_path / "hermes"
hermes_home.mkdir(parents=True, exist_ok=True) hermes_home.mkdir(parents=True, exist_ok=True)
@ -979,8 +885,6 @@ class TestTaskSpecificOverrides:
"""model: """model:
default: glm-5.1 default: glm-5.1
provider: opencode-go provider: opencode-go
compression:
summary_provider: auto
""" """
) )
monkeypatch.setenv("HERMES_HOME", str(hermes_home)) monkeypatch.setenv("HERMES_HOME", str(hermes_home))
@ -1039,24 +943,45 @@ model:
"model": "gpt-5.4", "model": "gpt-5.4",
} }
def test_compression_summary_base_url_from_config(self, monkeypatch, tmp_path):
"""compression.summary_base_url should produce a custom-endpoint client.""" def test_resolve_provider_client_supports_copilot_acp_external_process():
hermes_home = tmp_path / "hermes" fake_client = MagicMock()
hermes_home.mkdir(parents=True, exist_ok=True)
(hermes_home / "config.yaml").write_text( with patch("agent.auxiliary_client._read_main_model", return_value="gpt-5.4-mini"), \
"""compression: patch("agent.auxiliary_client.CodexAuxiliaryClient", MagicMock()), \
summary_provider: custom patch("agent.copilot_acp_client.CopilotACPClient", return_value=fake_client) as mock_acp, \
summary_model: glm-4.7 patch("hermes_cli.auth.resolve_external_process_provider_credentials", return_value={
summary_base_url: https://api.z.ai/api/coding/paas/v4 "provider": "copilot-acp",
""" "api_key": "copilot-acp",
) "base_url": "acp://copilot",
monkeypatch.setenv("HERMES_HOME", str(hermes_home)) "command": "/usr/bin/copilot",
# Custom endpoints need an API key to build the client "args": ["--acp", "--stdio"],
monkeypatch.setenv("OPENAI_API_KEY", "test-key") }):
with patch("agent.auxiliary_client.OpenAI") as mock_openai: client, model = resolve_provider_client("copilot-acp")
client, model = get_text_auxiliary_client("compression")
assert model == "glm-4.7" assert client is fake_client
assert mock_openai.call_args.kwargs["base_url"] == "https://api.z.ai/api/coding/paas/v4" assert model == "gpt-5.4-mini"
assert mock_acp.call_args.kwargs["api_key"] == "copilot-acp"
assert mock_acp.call_args.kwargs["base_url"] == "acp://copilot"
assert mock_acp.call_args.kwargs["command"] == "/usr/bin/copilot"
assert mock_acp.call_args.kwargs["args"] == ["--acp", "--stdio"]
def test_resolve_provider_client_copilot_acp_requires_explicit_or_configured_model():
with patch("agent.auxiliary_client._read_main_model", return_value=""), \
patch("agent.copilot_acp_client.CopilotACPClient") as mock_acp, \
patch("hermes_cli.auth.resolve_external_process_provider_credentials", return_value={
"provider": "copilot-acp",
"api_key": "copilot-acp",
"base_url": "acp://copilot",
"command": "/usr/bin/copilot",
"args": ["--acp", "--stdio"],
}):
client, model = resolve_provider_client("copilot-acp")
assert client is None
assert model is None
mock_acp.assert_not_called()
class TestAuxiliaryMaxTokensParam: class TestAuxiliaryMaxTokensParam:

View file

@ -273,18 +273,6 @@ class TestDefaultConfigShape:
assert web["provider"] == "auto" assert web["provider"] == "auto"
assert web["model"] == "" assert web["model"] == ""
def test_compression_provider_default(self):
from hermes_cli.config import DEFAULT_CONFIG
compression = DEFAULT_CONFIG["compression"]
assert "summary_provider" in compression
assert compression["summary_provider"] == "auto"
def test_compression_base_url_default(self):
from hermes_cli.config import DEFAULT_CONFIG
compression = DEFAULT_CONFIG["compression"]
assert "summary_base_url" in compression
assert compression["summary_base_url"] is None
# ── CLI defaults parity ───────────────────────────────────────────────────── # ── CLI defaults parity ─────────────────────────────────────────────────────

View file

@ -12,17 +12,6 @@ def _isolate(tmp_path, monkeypatch):
hermes_home = tmp_path / ".hermes" hermes_home = tmp_path / ".hermes"
hermes_home.mkdir() hermes_home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(hermes_home)) monkeypatch.setenv("HERMES_HOME", str(hermes_home))
for env_var in (
"AUXILIARY_VISION_PROVIDER",
"AUXILIARY_VISION_MODEL",
"AUXILIARY_VISION_BASE_URL",
"AUXILIARY_VISION_API_KEY",
"CONTEXT_VISION_PROVIDER",
"CONTEXT_VISION_MODEL",
"CONTEXT_VISION_BASE_URL",
"CONTEXT_VISION_API_KEY",
):
monkeypatch.delenv(env_var, raising=False)
# Write a minimal config so load_config doesn't fail # Write a minimal config so load_config doesn't fail
(hermes_home / "config.yaml").write_text("model:\n default: test-model\n") (hermes_home / "config.yaml").write_text("model:\n default: test-model\n")
@ -69,6 +58,10 @@ class TestNormalizeVisionProvider:
assert _normalize_vision_provider("beans") == "beans" assert _normalize_vision_provider("beans") == "beans"
assert _normalize_vision_provider("deepseek") == "deepseek" assert _normalize_vision_provider("deepseek") == "deepseek"
def test_custom_colon_named_provider_preserved(self):
from agent.auxiliary_client import _normalize_vision_provider
assert _normalize_vision_provider("custom:beans") == "beans"
def test_codex_alias_still_works(self): def test_codex_alias_still_works(self):
from agent.auxiliary_client import _normalize_vision_provider from agent.auxiliary_client import _normalize_vision_provider
assert _normalize_vision_provider("codex") == "openai-codex" assert _normalize_vision_provider("codex") == "openai-codex"
@ -240,3 +233,22 @@ class TestResolveVisionProviderClientModelNormalization:
assert provider == "zai" assert provider == "zai"
assert client is not None assert client is not None
assert model == "glm-5.1" assert model == "glm-5.1"
class TestVisionPathApiMode:
"""Vision path should propagate api_mode to _get_cached_client."""
def test_explicit_provider_passes_api_mode(self, tmp_path):
_write_config(tmp_path, {
"model": {"default": "test-model"},
"auxiliary": {"vision": {"api_mode": "chat_completions"}},
})
with patch("agent.auxiliary_client._get_cached_client") as mock_gcc:
mock_gcc.return_value = (MagicMock(), "test-model")
from agent.auxiliary_client import resolve_vision_provider_client
provider, client, model = resolve_vision_provider_client(provider="deepseek")
mock_gcc.assert_called_once()
_, kwargs = mock_gcc.call_args
assert kwargs.get("api_mode") == "chat_completions"

View file

@ -308,6 +308,34 @@ class TestMinimaxPreserveDots:
from run_agent import AIAgent from run_agent import AIAgent
assert AIAgent._anthropic_preserve_dots(agent) is False assert AIAgent._anthropic_preserve_dots(agent) is False
def test_opencode_zen_provider_preserves_dots(self):
from types import SimpleNamespace
agent = SimpleNamespace(provider="opencode-zen", base_url="")
from run_agent import AIAgent
assert AIAgent._anthropic_preserve_dots(agent) is True
def test_opencode_zen_url_preserves_dots(self):
from types import SimpleNamespace
agent = SimpleNamespace(provider="custom", base_url="https://opencode.ai/zen/v1")
from run_agent import AIAgent
assert AIAgent._anthropic_preserve_dots(agent) is True
def test_zai_provider_preserves_dots(self):
from types import SimpleNamespace
agent = SimpleNamespace(provider="zai", base_url="")
from run_agent import AIAgent
assert AIAgent._anthropic_preserve_dots(agent) is True
def test_bigmodel_cn_url_preserves_dots(self):
from types import SimpleNamespace
agent = SimpleNamespace(provider="custom", base_url="https://open.bigmodel.cn/api/paas/v4")
from run_agent import AIAgent
assert AIAgent._anthropic_preserve_dots(agent) is True
def test_normalize_preserves_m25_free_dot(self):
from agent.anthropic_adapter import normalize_model_name
assert normalize_model_name("minimax-m2.5-free", preserve_dots=True) == "minimax-m2.5-free"
def test_normalize_preserves_m27_dot(self): def test_normalize_preserves_m27_dot(self):
from agent.anthropic_adapter import normalize_model_name from agent.anthropic_adapter import normalize_model_name
assert normalize_model_name("MiniMax-M2.7", preserve_dots=True) == "MiniMax-M2.7" assert normalize_model_name("MiniMax-M2.7", preserve_dots=True) == "MiniMax-M2.7"

View file

@ -70,6 +70,44 @@ class TestQueryLocalContextLengthOllama:
assert result == 32768 assert result == 32768
def test_ollama_num_ctx_wins_over_model_info(self):
"""When both num_ctx (Modelfile) and model_info (GGUF) are present,
num_ctx wins because it's the *runtime* context Ollama actually
allocates KV cache for. The GGUF model_info.context_length is the
training max using it would let Hermes grow conversations past
the runtime limit and Ollama would silently truncate.
Concrete example: hermes-brain:qwen3-14b-ctx32k is a Modelfile
derived from qwen3:14b with `num_ctx 32768`, but the underlying
GGUF reports `qwen3.context_length: 40960` (training max). If
Hermes used 40960 it would let the conversation grow past 32768
before compressing, and Ollama would truncate the prefix.
"""
from agent.model_metadata import _query_local_context_length
show_resp = self._make_resp(200, {
"model_info": {"qwen3.context_length": 40960},
"parameters": "num_ctx 32768\ntemperature 0.6\n",
})
models_resp = self._make_resp(404, {})
client_mock = MagicMock()
client_mock.__enter__ = lambda s: client_mock
client_mock.__exit__ = MagicMock(return_value=False)
client_mock.post.return_value = show_resp
client_mock.get.return_value = models_resp
with patch("agent.model_metadata.detect_local_server_type", return_value="ollama"), \
patch("httpx.Client", return_value=client_mock):
result = _query_local_context_length(
"hermes-brain:qwen3-14b-ctx32k", "http://100.77.243.5:11434/v1"
)
assert result == 32768, (
f"Expected num_ctx (32768) to win over model_info (40960), got {result}. "
"If Hermes uses the GGUF training max, conversations will silently truncate."
)
def test_ollama_show_404_falls_through(self): def test_ollama_show_404_falls_through(self):
"""When /api/show returns 404, falls through to /v1/models/{model}.""" """When /api/show returns 404, falls through to /v1/models/{model}."""
from agent.model_metadata import _query_local_context_length from agent.model_metadata import _query_local_context_length

View file

@ -51,10 +51,10 @@ class TestSaveConfigValueAtomic:
def test_creates_nested_keys(self, config_env): def test_creates_nested_keys(self, config_env):
"""Dot-separated paths create intermediate dicts as needed.""" """Dot-separated paths create intermediate dicts as needed."""
from cli import save_config_value from cli import save_config_value
save_config_value("compression.summary_model", "google/gemini-3-flash-preview") save_config_value("auxiliary.compression.model", "google/gemini-3-flash-preview")
result = yaml.safe_load(config_env.read_text()) result = yaml.safe_load(config_env.read_text())
assert result["compression"]["summary_model"] == "google/gemini-3-flash-preview" assert result["auxiliary"]["compression"]["model"] == "google/gemini-3-flash-preview"
def test_overwrites_existing_value(self, config_env): def test_overwrites_existing_value(self, config_env):
"""Updating an existing key replaces the value.""" """Updating an existing key replaces the value."""

View file

@ -180,33 +180,71 @@ class TestDisplayResumedHistory:
assert 200 <= a_count <= 310 # roughly 300 chars (±panel padding) assert 200 <= a_count <= 310 # roughly 300 chars (±panel padding)
def test_long_assistant_message_truncated(self): def test_long_assistant_message_truncated(self):
"""Non-last assistant messages are still truncated."""
cli = _make_cli() cli = _make_cli()
long_text = "B" * 400 long_text = "B" * 400
cli.conversation_history = [ cli.conversation_history = [
{"role": "user", "content": "Tell me a lot."}, {"role": "user", "content": "Tell me a lot."},
{"role": "assistant", "content": long_text}, {"role": "assistant", "content": long_text},
{"role": "user", "content": "And more?"},
{"role": "assistant", "content": "Short final reply."},
] ]
output = self._capture_display(cli) output = self._capture_display(cli)
assert "..." in output # The non-last assistant message should be truncated
assert "B" * 400 not in output assert "B" * 400 not in output
# The last assistant message shown in full
assert "Short final reply." in output
def test_multiline_assistant_truncated(self): def test_multiline_assistant_truncated(self):
"""Non-last multiline assistant messages are truncated to 3 lines."""
cli = _make_cli() cli = _make_cli()
multi = "\n".join([f"Line {i}" for i in range(20)]) multi = "\n".join([f"Line {i}" for i in range(20)])
cli.conversation_history = [ cli.conversation_history = [
{"role": "user", "content": "Show me lines."}, {"role": "user", "content": "Show me lines."},
{"role": "assistant", "content": multi}, {"role": "assistant", "content": multi},
{"role": "user", "content": "What else?"},
{"role": "assistant", "content": "Done."},
] ]
output = self._capture_display(cli) output = self._capture_display(cli)
# First 3 lines should be there # First 3 lines of non-last assistant should be there
assert "Line 0" in output assert "Line 0" in output
assert "Line 1" in output assert "Line 1" in output
assert "Line 2" in output assert "Line 2" in output
# Line 19 should NOT be there (truncated after 3 lines) # Line 19 should NOT be in the truncated message
assert "Line 19" not in output assert "Line 19" not in output
def test_last_assistant_response_shown_in_full(self):
"""The last assistant response is shown un-truncated so the user
knows where they left off without wasting tokens re-asking."""
cli = _make_cli()
long_text = "X" * 500
cli.conversation_history = [
{"role": "user", "content": "Tell me everything."},
{"role": "assistant", "content": long_text},
]
output = self._capture_display(cli)
# Full 500-char text should be present (may be line-wrapped by Rich)
x_count = output.count("X")
assert x_count >= 490 # allow small Rich formatting variance
def test_last_assistant_multiline_shown_in_full(self):
"""The last assistant response shows all lines, not just 3."""
cli = _make_cli()
multi = "\n".join([f"Line {i}" for i in range(20)])
cli.conversation_history = [
{"role": "user", "content": "Show me everything."},
{"role": "assistant", "content": multi},
]
output = self._capture_display(cli)
# All 20 lines should be present since it's the last response
assert "Line 0" in output
assert "Line 10" in output
assert "Line 19" in output
def test_large_history_shows_truncation_indicator(self): def test_large_history_shows_truncation_indicator(self):
cli = _make_cli() cli = _make_cli()
cli.conversation_history = _large_history(n_exchanges=15) cli.conversation_history = _large_history(n_exchanges=15)

View file

@ -35,6 +35,7 @@ def make_restart_source(chat_id: str = "123456", chat_type: str = "dm") -> Sessi
platform=Platform.TELEGRAM, platform=Platform.TELEGRAM,
chat_id=chat_id, chat_id=chat_id,
chat_type=chat_type, chat_type=chat_type,
user_id="u1",
) )

View file

@ -0,0 +1,87 @@
"""Tests for _normalize_chat_content in the API server adapter."""
from gateway.platforms.api_server import _normalize_chat_content
class TestNormalizeChatContent:
"""Content normalization converts array-based content parts to plain text."""
def test_none_returns_empty_string(self):
assert _normalize_chat_content(None) == ""
def test_plain_string_returned_as_is(self):
assert _normalize_chat_content("hello world") == "hello world"
def test_empty_string_returned_as_is(self):
assert _normalize_chat_content("") == ""
def test_text_content_part(self):
content = [{"type": "text", "text": "hello"}]
assert _normalize_chat_content(content) == "hello"
def test_input_text_content_part(self):
content = [{"type": "input_text", "text": "user input"}]
assert _normalize_chat_content(content) == "user input"
def test_output_text_content_part(self):
content = [{"type": "output_text", "text": "assistant output"}]
assert _normalize_chat_content(content) == "assistant output"
def test_multiple_text_parts_joined_with_newline(self):
content = [
{"type": "text", "text": "first"},
{"type": "text", "text": "second"},
]
assert _normalize_chat_content(content) == "first\nsecond"
def test_mixed_string_and_dict_parts(self):
content = ["plain string", {"type": "text", "text": "dict part"}]
assert _normalize_chat_content(content) == "plain string\ndict part"
def test_image_url_parts_silently_skipped(self):
content = [
{"type": "text", "text": "check this:"},
{"type": "image_url", "image_url": {"url": "https://example.com/img.png"}},
]
assert _normalize_chat_content(content) == "check this:"
def test_integer_content_converted(self):
assert _normalize_chat_content(42) == "42"
def test_boolean_content_converted(self):
assert _normalize_chat_content(True) == "True"
def test_deeply_nested_list_respects_depth_limit(self):
"""Nesting beyond max_depth returns empty string."""
content = [[[[[[[[[[[["deep"]]]]]]]]]]]]
result = _normalize_chat_content(content)
# The deep nesting should be truncated, not crash
assert isinstance(result, str)
def test_large_list_capped(self):
"""Lists beyond MAX_CONTENT_LIST_SIZE are truncated."""
content = [{"type": "text", "text": f"item{i}"} for i in range(2000)]
result = _normalize_chat_content(content)
# Should not contain all 2000 items
assert result.count("item") <= 1000
def test_oversized_string_truncated(self):
"""Strings beyond 64KB are truncated."""
huge = "x" * 100_000
result = _normalize_chat_content(huge)
assert len(result) == 65_536
def test_empty_text_parts_filtered(self):
content = [
{"type": "text", "text": ""},
{"type": "text", "text": "actual"},
{"type": "text", "text": ""},
]
assert _normalize_chat_content(content) == "actual"
def test_dict_without_type_skipped(self):
content = [{"foo": "bar"}, {"type": "text", "text": "real"}]
assert _normalize_chat_content(content) == "real"
def test_empty_list_returns_empty(self):
assert _normalize_chat_content([]) == ""

View file

@ -359,3 +359,44 @@ async def test_discord_thread_participation_tracked_on_dispatch(adapter, monkeyp
await adapter._handle_message(message) await adapter._handle_message(message)
assert "777" in adapter._threads assert "777" in adapter._threads
@pytest.mark.asyncio
async def test_discord_voice_linked_channel_skips_mention_requirement_and_auto_thread(adapter, monkeypatch):
"""Active voice-linked text channels should behave like free-response channels."""
monkeypatch.setenv("DISCORD_REQUIRE_MENTION", "true")
monkeypatch.delenv("DISCORD_FREE_RESPONSE_CHANNELS", raising=False)
monkeypatch.delenv("DISCORD_AUTO_THREAD", raising=False)
adapter._voice_text_channels[111] = 789
adapter._auto_create_thread = AsyncMock()
message = make_message(
channel=FakeTextChannel(channel_id=789),
content="follow-up from voice text chat",
)
await adapter._handle_message(message)
adapter._auto_create_thread.assert_not_awaited()
adapter.handle_message.assert_awaited_once()
event = adapter.handle_message.await_args.args[0]
assert event.text == "follow-up from voice text chat"
assert event.source.chat_type == "group"
@pytest.mark.asyncio
async def test_discord_voice_linked_parent_thread_still_requires_mention(adapter, monkeypatch):
"""Threads under a voice-linked channel should still require @mention."""
monkeypatch.setenv("DISCORD_REQUIRE_MENTION", "true")
monkeypatch.delenv("DISCORD_FREE_RESPONSE_CHANNELS", raising=False)
adapter._voice_text_channels[111] = 789
message = make_message(
channel=FakeThread(channel_id=790, parent=FakeTextChannel(channel_id=789)),
content="thread reply without mention",
)
await adapter._handle_message(message)
adapter.handle_message.assert_not_awaited()

View file

@ -124,7 +124,7 @@ class TestSendWithReplyToMode:
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_off_mode_no_reply_reference(self): async def test_off_mode_no_reply_reference(self):
adapter, channel, ref_msg = _make_discord_adapter("off") adapter, channel, ref_msg = _make_discord_adapter("off")
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"]
await adapter.send("12345", "test content", reply_to="999") await adapter.send("12345", "test content", reply_to="999")
@ -137,7 +137,7 @@ class TestSendWithReplyToMode:
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_first_mode_only_first_chunk_references(self): async def test_first_mode_only_first_chunk_references(self):
adapter, channel, ref_msg = _make_discord_adapter("first") adapter, channel, ref_msg = _make_discord_adapter("first")
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"]
await adapter.send("12345", "test content", reply_to="999") await adapter.send("12345", "test content", reply_to="999")
@ -152,7 +152,7 @@ class TestSendWithReplyToMode:
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_all_mode_all_chunks_reference(self): async def test_all_mode_all_chunks_reference(self):
adapter, channel, ref_msg = _make_discord_adapter("all") adapter, channel, ref_msg = _make_discord_adapter("all")
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"]
await adapter.send("12345", "test content", reply_to="999") await adapter.send("12345", "test content", reply_to="999")
@ -165,7 +165,7 @@ class TestSendWithReplyToMode:
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_no_reply_to_param_no_reference(self): async def test_no_reply_to_param_no_reference(self):
adapter, channel, ref_msg = _make_discord_adapter("all") adapter, channel, ref_msg = _make_discord_adapter("all")
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2"]
await adapter.send("12345", "test content", reply_to=None) await adapter.send("12345", "test content", reply_to=None)
@ -176,7 +176,7 @@ class TestSendWithReplyToMode:
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_single_chunk_respects_first_mode(self): async def test_single_chunk_respects_first_mode(self):
adapter, channel, ref_msg = _make_discord_adapter("first") adapter, channel, ref_msg = _make_discord_adapter("first")
adapter.truncate_message = lambda content, max_len: ["single chunk"] adapter.truncate_message = lambda content, max_len, **kw: ["single chunk"]
await adapter.send("12345", "test", reply_to="999") await adapter.send("12345", "test", reply_to="999")
@ -187,7 +187,7 @@ class TestSendWithReplyToMode:
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_single_chunk_off_mode(self): async def test_single_chunk_off_mode(self):
adapter, channel, ref_msg = _make_discord_adapter("off") adapter, channel, ref_msg = _make_discord_adapter("off")
adapter.truncate_message = lambda content, max_len: ["single chunk"] adapter.truncate_message = lambda content, max_len, **kw: ["single chunk"]
await adapter.send("12345", "test", reply_to="999") await adapter.send("12345", "test", reply_to="999")
@ -200,7 +200,7 @@ class TestSendWithReplyToMode:
async def test_invalid_mode_falls_back_to_first_behavior(self): async def test_invalid_mode_falls_back_to_first_behavior(self):
"""Invalid mode behaves like 'first' — only first chunk gets reference.""" """Invalid mode behaves like 'first' — only first chunk gets reference."""
adapter, channel, ref_msg = _make_discord_adapter("banana") adapter, channel, ref_msg = _make_discord_adapter("banana")
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2"]
await adapter.send("12345", "test", reply_to="999") await adapter.send("12345", "test", reply_to="999")

View file

@ -189,14 +189,14 @@ class TestPlatformDefaults:
"""Slack, Mattermost, Matrix default to 'new' tool progress.""" """Slack, Mattermost, Matrix default to 'new' tool progress."""
from gateway.display_config import resolve_display_setting from gateway.display_config import resolve_display_setting
for plat in ("slack", "mattermost", "matrix", "feishu"): for plat in ("slack", "mattermost", "matrix", "feishu", "whatsapp"):
assert resolve_display_setting({}, plat, "tool_progress") == "new", plat assert resolve_display_setting({}, plat, "tool_progress") == "new", plat
def test_low_tier_platforms(self): def test_low_tier_platforms(self):
"""Signal, WhatsApp, etc. default to 'off' tool progress.""" """Signal, BlueBubbles, etc. default to 'off' tool progress."""
from gateway.display_config import resolve_display_setting from gateway.display_config import resolve_display_setting
for plat in ("signal", "whatsapp", "bluebubbles", "weixin", "wecom", "dingtalk"): for plat in ("signal", "bluebubbles", "weixin", "wecom", "dingtalk"):
assert resolve_display_setting({}, plat, "tool_progress") == "off", plat assert resolve_display_setting({}, plat, "tool_progress") == "off", plat
def test_minimal_tier_platforms(self): def test_minimal_tier_platforms(self):

View file

@ -0,0 +1,438 @@
"""Tests for gateway.platforms.feishu — Feishu scan-to-create registration."""
import json
from unittest.mock import patch, MagicMock
import pytest
def _mock_urlopen(response_data, status=200):
"""Create a mock for urllib.request.urlopen that returns JSON response_data."""
mock_response = MagicMock()
mock_response.read.return_value = json.dumps(response_data).encode("utf-8")
mock_response.status = status
mock_response.__enter__ = lambda s: s
mock_response.__exit__ = MagicMock(return_value=False)
return mock_response
class TestPostRegistration:
"""Tests for the low-level HTTP helper."""
@patch("gateway.platforms.feishu.urlopen")
def test_post_registration_returns_parsed_json(self, mock_urlopen_fn):
from gateway.platforms.feishu import _post_registration
mock_urlopen_fn.return_value = _mock_urlopen({"nonce": "abc", "supported_auth_methods": ["client_secret"]})
result = _post_registration("https://accounts.feishu.cn", {"action": "init"})
assert result["nonce"] == "abc"
assert "client_secret" in result["supported_auth_methods"]
@patch("gateway.platforms.feishu.urlopen")
def test_post_registration_sends_form_encoded_body(self, mock_urlopen_fn):
from gateway.platforms.feishu import _post_registration
mock_urlopen_fn.return_value = _mock_urlopen({})
_post_registration("https://accounts.feishu.cn", {"action": "init", "key": "val"})
call_args = mock_urlopen_fn.call_args
request = call_args[0][0]
body = request.data.decode("utf-8")
assert "action=init" in body
assert "key=val" in body
assert request.get_header("Content-type") == "application/x-www-form-urlencoded"
class TestInitRegistration:
"""Tests for the init step."""
@patch("gateway.platforms.feishu.urlopen")
def test_init_succeeds_when_client_secret_supported(self, mock_urlopen_fn):
from gateway.platforms.feishu import _init_registration
mock_urlopen_fn.return_value = _mock_urlopen({
"nonce": "abc",
"supported_auth_methods": ["client_secret"],
})
_init_registration("feishu")
@patch("gateway.platforms.feishu.urlopen")
def test_init_raises_when_client_secret_not_supported(self, mock_urlopen_fn):
from gateway.platforms.feishu import _init_registration
mock_urlopen_fn.return_value = _mock_urlopen({
"nonce": "abc",
"supported_auth_methods": ["other_method"],
})
with pytest.raises(RuntimeError, match="client_secret"):
_init_registration("feishu")
@patch("gateway.platforms.feishu.urlopen")
def test_init_uses_lark_url_for_lark_domain(self, mock_urlopen_fn):
from gateway.platforms.feishu import _init_registration
mock_urlopen_fn.return_value = _mock_urlopen({
"nonce": "abc",
"supported_auth_methods": ["client_secret"],
})
_init_registration("lark")
call_args = mock_urlopen_fn.call_args
request = call_args[0][0]
assert "larksuite.com" in request.full_url
class TestBeginRegistration:
"""Tests for the begin step."""
@patch("gateway.platforms.feishu.urlopen")
def test_begin_returns_device_code_and_qr_url(self, mock_urlopen_fn):
from gateway.platforms.feishu import _begin_registration
mock_urlopen_fn.return_value = _mock_urlopen({
"device_code": "dc_123",
"verification_uri_complete": "https://accounts.feishu.cn/qr/abc",
"user_code": "ABCD-1234",
"interval": 5,
"expire_in": 600,
})
result = _begin_registration("feishu")
assert result["device_code"] == "dc_123"
assert "qr_url" in result
assert "accounts.feishu.cn" in result["qr_url"]
assert result["user_code"] == "ABCD-1234"
assert result["interval"] == 5
assert result["expire_in"] == 600
@patch("gateway.platforms.feishu.urlopen")
def test_begin_sends_correct_archetype(self, mock_urlopen_fn):
from gateway.platforms.feishu import _begin_registration
mock_urlopen_fn.return_value = _mock_urlopen({
"device_code": "dc_123",
"verification_uri_complete": "https://example.com/qr",
"user_code": "X",
"interval": 5,
"expire_in": 600,
})
_begin_registration("feishu")
request = mock_urlopen_fn.call_args[0][0]
body = request.data.decode("utf-8")
assert "archetype=PersonalAgent" in body
assert "auth_method=client_secret" in body
class TestPollRegistration:
"""Tests for the poll step."""
@patch("gateway.platforms.feishu.time")
@patch("gateway.platforms.feishu.urlopen")
def test_poll_returns_credentials_on_success(self, mock_urlopen_fn, mock_time):
from gateway.platforms.feishu import _poll_registration
mock_time.time.side_effect = [0, 1]
mock_time.sleep = MagicMock()
mock_urlopen_fn.return_value = _mock_urlopen({
"client_id": "cli_app123",
"client_secret": "secret456",
"user_info": {"open_id": "ou_owner", "tenant_brand": "feishu"},
})
result = _poll_registration(
device_code="dc_123", interval=1, expire_in=60, domain="feishu"
)
assert result is not None
assert result["app_id"] == "cli_app123"
assert result["app_secret"] == "secret456"
assert result["domain"] == "feishu"
assert result["open_id"] == "ou_owner"
@patch("gateway.platforms.feishu.time")
@patch("gateway.platforms.feishu.urlopen")
def test_poll_switches_domain_on_lark_tenant_brand(self, mock_urlopen_fn, mock_time):
from gateway.platforms.feishu import _poll_registration
mock_time.time.side_effect = [0, 1, 2]
mock_time.sleep = MagicMock()
pending_resp = _mock_urlopen({
"error": "authorization_pending",
"user_info": {"tenant_brand": "lark"},
})
success_resp = _mock_urlopen({
"client_id": "cli_lark",
"client_secret": "secret_lark",
"user_info": {"open_id": "ou_lark", "tenant_brand": "lark"},
})
mock_urlopen_fn.side_effect = [pending_resp, success_resp]
result = _poll_registration(
device_code="dc_123", interval=0, expire_in=60, domain="feishu"
)
assert result is not None
assert result["domain"] == "lark"
@patch("gateway.platforms.feishu.time")
@patch("gateway.platforms.feishu.urlopen")
def test_poll_success_with_lark_brand_in_same_response(self, mock_urlopen_fn, mock_time):
"""Credentials and lark tenant_brand in one response must not be discarded."""
from gateway.platforms.feishu import _poll_registration
mock_time.time.side_effect = [0, 1]
mock_time.sleep = MagicMock()
mock_urlopen_fn.return_value = _mock_urlopen({
"client_id": "cli_lark_direct",
"client_secret": "secret_lark_direct",
"user_info": {"open_id": "ou_lark_direct", "tenant_brand": "lark"},
})
result = _poll_registration(
device_code="dc_123", interval=1, expire_in=60, domain="feishu"
)
assert result is not None
assert result["app_id"] == "cli_lark_direct"
assert result["domain"] == "lark"
assert result["open_id"] == "ou_lark_direct"
@patch("gateway.platforms.feishu.time")
@patch("gateway.platforms.feishu.urlopen")
def test_poll_returns_none_on_access_denied(self, mock_urlopen_fn, mock_time):
from gateway.platforms.feishu import _poll_registration
mock_time.time.side_effect = [0, 1]
mock_time.sleep = MagicMock()
mock_urlopen_fn.return_value = _mock_urlopen({
"error": "access_denied",
})
result = _poll_registration(
device_code="dc_123", interval=1, expire_in=60, domain="feishu"
)
assert result is None
@patch("gateway.platforms.feishu.time")
@patch("gateway.platforms.feishu.urlopen")
def test_poll_returns_none_on_timeout(self, mock_urlopen_fn, mock_time):
from gateway.platforms.feishu import _poll_registration
mock_time.time.side_effect = [0, 999]
mock_time.sleep = MagicMock()
mock_urlopen_fn.return_value = _mock_urlopen({
"error": "authorization_pending",
})
result = _poll_registration(
device_code="dc_123", interval=1, expire_in=1, domain="feishu"
)
assert result is None
class TestRenderQr:
"""Tests for QR code terminal rendering."""
@patch("gateway.platforms.feishu._qrcode_mod", create=True)
def test_render_qr_returns_true_on_success(self, mock_qrcode_mod):
from gateway.platforms.feishu import _render_qr
mock_qr = MagicMock()
mock_qrcode_mod.QRCode.return_value = mock_qr
assert _render_qr("https://example.com/qr") is True
mock_qr.add_data.assert_called_once_with("https://example.com/qr")
mock_qr.make.assert_called_once_with(fit=True)
mock_qr.print_ascii.assert_called_once()
def test_render_qr_returns_false_when_qrcode_missing(self):
from gateway.platforms.feishu import _render_qr
with patch("gateway.platforms.feishu._qrcode_mod", None):
assert _render_qr("https://example.com/qr") is False
class TestProbeBot:
"""Tests for bot connectivity verification."""
@patch("gateway.platforms.feishu.FEISHU_AVAILABLE", True)
def test_probe_returns_bot_info_on_success(self):
from gateway.platforms.feishu import probe_bot
with patch("gateway.platforms.feishu._probe_bot_sdk") as mock_sdk:
mock_sdk.return_value = {"bot_name": "TestBot", "bot_open_id": "ou_bot123"}
result = probe_bot("cli_app", "secret", "feishu")
assert result is not None
assert result["bot_name"] == "TestBot"
assert result["bot_open_id"] == "ou_bot123"
@patch("gateway.platforms.feishu.FEISHU_AVAILABLE", True)
def test_probe_returns_none_on_failure(self):
from gateway.platforms.feishu import probe_bot
with patch("gateway.platforms.feishu._probe_bot_sdk") as mock_sdk:
mock_sdk.return_value = None
result = probe_bot("bad_id", "bad_secret", "feishu")
assert result is None
@patch("gateway.platforms.feishu.FEISHU_AVAILABLE", False)
@patch("gateway.platforms.feishu.urlopen")
def test_http_fallback_when_sdk_unavailable(self, mock_urlopen_fn):
"""Without lark_oapi, probe falls back to raw HTTP."""
from gateway.platforms.feishu import probe_bot
token_resp = _mock_urlopen({"code": 0, "tenant_access_token": "t-123"})
bot_resp = _mock_urlopen({"code": 0, "bot": {"bot_name": "HttpBot", "open_id": "ou_http"}})
mock_urlopen_fn.side_effect = [token_resp, bot_resp]
result = probe_bot("cli_app", "secret", "feishu")
assert result is not None
assert result["bot_name"] == "HttpBot"
@patch("gateway.platforms.feishu.FEISHU_AVAILABLE", False)
@patch("gateway.platforms.feishu.urlopen")
def test_http_fallback_returns_none_on_network_error(self, mock_urlopen_fn):
from gateway.platforms.feishu import probe_bot
from urllib.error import URLError
mock_urlopen_fn.side_effect = URLError("connection refused")
result = probe_bot("cli_app", "secret", "feishu")
assert result is None
class TestQrRegister:
"""Tests for the public qr_register entry point."""
@patch("gateway.platforms.feishu.probe_bot")
@patch("gateway.platforms.feishu._render_qr")
@patch("gateway.platforms.feishu._poll_registration")
@patch("gateway.platforms.feishu._begin_registration")
@patch("gateway.platforms.feishu._init_registration")
def test_qr_register_success_flow(
self, mock_init, mock_begin, mock_poll, mock_render, mock_probe
):
from gateway.platforms.feishu import qr_register
mock_begin.return_value = {
"device_code": "dc_123",
"qr_url": "https://example.com/qr",
"user_code": "ABCD",
"interval": 1,
"expire_in": 60,
}
mock_poll.return_value = {
"app_id": "cli_app",
"app_secret": "secret",
"domain": "feishu",
"open_id": "ou_owner",
}
mock_probe.return_value = {"bot_name": "MyBot", "bot_open_id": "ou_bot"}
result = qr_register()
assert result is not None
assert result["app_id"] == "cli_app"
assert result["app_secret"] == "secret"
assert result["bot_name"] == "MyBot"
mock_init.assert_called_once()
mock_render.assert_called_once()
@patch("gateway.platforms.feishu._init_registration")
def test_qr_register_returns_none_on_init_failure(self, mock_init):
from gateway.platforms.feishu import qr_register
mock_init.side_effect = RuntimeError("not supported")
result = qr_register()
assert result is None
@patch("gateway.platforms.feishu._render_qr")
@patch("gateway.platforms.feishu._poll_registration")
@patch("gateway.platforms.feishu._begin_registration")
@patch("gateway.platforms.feishu._init_registration")
def test_qr_register_returns_none_on_poll_failure(
self, mock_init, mock_begin, mock_poll, mock_render
):
from gateway.platforms.feishu import qr_register
mock_begin.return_value = {
"device_code": "dc_123",
"qr_url": "https://example.com/qr",
"user_code": "ABCD",
"interval": 1,
"expire_in": 60,
}
mock_poll.return_value = None
result = qr_register()
assert result is None
# -- Contract: expected errors → None, unexpected errors → propagate --
@patch("gateway.platforms.feishu._init_registration")
def test_qr_register_returns_none_on_network_error(self, mock_init):
"""URLError (network down) is an expected failure → None."""
from gateway.platforms.feishu import qr_register
from urllib.error import URLError
mock_init.side_effect = URLError("DNS resolution failed")
result = qr_register()
assert result is None
@patch("gateway.platforms.feishu._init_registration")
def test_qr_register_returns_none_on_json_error(self, mock_init):
"""Malformed server response is an expected failure → None."""
from gateway.platforms.feishu import qr_register
mock_init.side_effect = json.JSONDecodeError("bad json", "", 0)
result = qr_register()
assert result is None
@patch("gateway.platforms.feishu._init_registration")
def test_qr_register_propagates_unexpected_errors(self, mock_init):
"""Bugs (e.g. AttributeError) must not be swallowed — they propagate."""
from gateway.platforms.feishu import qr_register
mock_init.side_effect = AttributeError("some internal bug")
with pytest.raises(AttributeError, match="some internal bug"):
qr_register()
# -- Negative paths: partial/malformed server responses --
@patch("gateway.platforms.feishu._render_qr")
@patch("gateway.platforms.feishu._begin_registration")
@patch("gateway.platforms.feishu._init_registration")
def test_qr_register_returns_none_when_begin_missing_device_code(
self, mock_init, mock_begin, mock_render
):
"""Server returns begin response without device_code → RuntimeError → None."""
from gateway.platforms.feishu import qr_register
mock_begin.side_effect = RuntimeError("Feishu registration did not return a device_code")
result = qr_register()
assert result is None
@patch("gateway.platforms.feishu.probe_bot")
@patch("gateway.platforms.feishu._render_qr")
@patch("gateway.platforms.feishu._poll_registration")
@patch("gateway.platforms.feishu._begin_registration")
@patch("gateway.platforms.feishu._init_registration")
def test_qr_register_succeeds_even_when_probe_fails(
self, mock_init, mock_begin, mock_poll, mock_render, mock_probe
):
"""Registration succeeds but probe fails → result with bot_name=None."""
from gateway.platforms.feishu import qr_register
mock_begin.return_value = {
"device_code": "dc_123",
"qr_url": "https://example.com/qr",
"user_code": "ABCD",
"interval": 1,
"expire_in": 60,
}
mock_poll.return_value = {
"app_id": "cli_app",
"app_secret": "secret",
"domain": "feishu",
"open_id": "ou_owner",
}
mock_probe.return_value = None # probe failed
result = qr_register()
assert result is not None
assert result["app_id"] == "cli_app"
assert result["bot_name"] is None
assert result["bot_open_id"] is None

View file

@ -48,6 +48,7 @@ def _make_event(
room_id="!room1:example.org", room_id="!room1:example.org",
formatted_body=None, formatted_body=None,
thread_id=None, thread_id=None,
mention_user_ids=None,
): ):
"""Create a fake room message event. """Create a fake room message event.
@ -60,6 +61,9 @@ def _make_event(
content["formatted_body"] = formatted_body content["formatted_body"] = formatted_body
content["format"] = "org.matrix.custom.html" content["format"] = "org.matrix.custom.html"
if mention_user_ids is not None:
content["m.mentions"] = {"user_ids": mention_user_ids}
relates_to = {} relates_to = {}
if thread_id: if thread_id:
relates_to["rel_type"] = "m.thread" relates_to["rel_type"] = "m.thread"
@ -108,6 +112,44 @@ class TestIsBotMentioned:
# "hermesbot" should not match word-boundary check for "hermes" # "hermesbot" should not match word-boundary check for "hermes"
assert not self.adapter._is_bot_mentioned("hermesbot is here") assert not self.adapter._is_bot_mentioned("hermesbot is here")
# m.mentions.user_ids — MSC3952 / Matrix v1.7 authoritative mentions
# Ported from openclaw/openclaw#64796
def test_m_mentions_user_ids_authoritative(self):
"""m.mentions.user_ids alone is sufficient — no body text needed."""
assert self.adapter._is_bot_mentioned(
"please reply", # no @hermes anywhere in body
mention_user_ids=["@hermes:example.org"],
)
def test_m_mentions_user_ids_with_body_mention(self):
"""Both m.mentions and body mention — should still be True."""
assert self.adapter._is_bot_mentioned(
"hey @hermes:example.org help",
mention_user_ids=["@hermes:example.org"],
)
def test_m_mentions_user_ids_other_user_only(self):
"""m.mentions with a different user — bot is NOT mentioned."""
assert not self.adapter._is_bot_mentioned(
"hello",
mention_user_ids=["@alice:example.org"],
)
def test_m_mentions_user_ids_empty_list(self):
"""Empty user_ids list — falls through to text detection."""
assert not self.adapter._is_bot_mentioned(
"hello everyone",
mention_user_ids=[],
)
def test_m_mentions_user_ids_none(self):
"""None mention_user_ids — falls through to text detection."""
assert not self.adapter._is_bot_mentioned(
"hello everyone",
mention_user_ids=None,
)
class TestStripMention: class TestStripMention:
def setup_method(self): def setup_method(self):
@ -176,6 +218,44 @@ async def test_require_mention_html_pill(monkeypatch):
adapter.handle_message.assert_awaited_once() adapter.handle_message.assert_awaited_once()
@pytest.mark.asyncio
async def test_require_mention_m_mentions_user_ids(monkeypatch):
"""m.mentions.user_ids is authoritative per MSC3952 — no body mention needed.
Ported from openclaw/openclaw#64796.
"""
monkeypatch.delenv("MATRIX_REQUIRE_MENTION", raising=False)
monkeypatch.delenv("MATRIX_FREE_RESPONSE_ROOMS", raising=False)
monkeypatch.setenv("MATRIX_AUTO_THREAD", "false")
adapter = _make_adapter()
# Body has NO mention, but m.mentions.user_ids includes the bot.
event = _make_event(
"please reply",
mention_user_ids=["@hermes:example.org"],
)
await adapter._on_room_message(event)
adapter.handle_message.assert_awaited_once()
@pytest.mark.asyncio
async def test_require_mention_m_mentions_other_user_ignored(monkeypatch):
"""m.mentions.user_ids mentioning another user should NOT activate the bot."""
monkeypatch.delenv("MATRIX_REQUIRE_MENTION", raising=False)
monkeypatch.delenv("MATRIX_FREE_RESPONSE_ROOMS", raising=False)
monkeypatch.setenv("MATRIX_AUTO_THREAD", "false")
adapter = _make_adapter()
event = _make_event(
"hey alice check this",
mention_user_ids=["@alice:example.org"],
)
await adapter._on_room_message(event)
adapter.handle_message.assert_not_awaited()
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_require_mention_dm_always_responds(monkeypatch): async def test_require_mention_dm_always_responds(monkeypatch):
"""DMs always respond regardless of mention setting.""" """DMs always respond regardless of mention setting."""

View file

@ -9,6 +9,8 @@ from gateway.platforms.base import (
MessageEvent, MessageEvent,
MessageType, MessageType,
safe_url_for_log, safe_url_for_log,
utf16_len,
_prefix_within_utf16_limit,
) )
@ -448,3 +450,135 @@ class TestGetHumanDelay:
with patch.dict(os.environ, env): with patch.dict(os.environ, env):
delay = BasePlatformAdapter._get_human_delay() delay = BasePlatformAdapter._get_human_delay()
assert 0.1 <= delay <= 0.2 assert 0.1 <= delay <= 0.2
# ---------------------------------------------------------------------------
# utf16_len / _prefix_within_utf16_limit / truncate_message with len_fn
# ---------------------------------------------------------------------------
# Ported from nearai/ironclaw#2304 — Telegram counts message length in UTF-16
# code units, not Unicode code-points. Astral-plane characters (emoji, CJK
# Extension B) are surrogate pairs: 1 Python char but 2 UTF-16 units.
class TestUtf16Len:
"""Verify the UTF-16 length helper."""
def test_ascii(self):
assert utf16_len("hello") == 5
def test_bmp_cjk(self):
# CJK ideographs in the BMP are 1 code unit each
assert utf16_len("你好") == 2
def test_emoji_surrogate_pair(self):
# 😀 (U+1F600) is outside BMP → 2 UTF-16 code units
assert utf16_len("😀") == 2
def test_mixed(self):
# "hi😀" = 2 + 2 = 4 UTF-16 units
assert utf16_len("hi😀") == 4
def test_musical_symbol(self):
# 𝄞 (U+1D11E) — Musical Symbol G Clef, surrogate pair
assert utf16_len("𝄞") == 2
def test_empty(self):
assert utf16_len("") == 0
class TestPrefixWithinUtf16Limit:
"""Verify UTF-16-aware prefix truncation."""
def test_fits_entirely(self):
assert _prefix_within_utf16_limit("hello", 10) == "hello"
def test_ascii_truncation(self):
result = _prefix_within_utf16_limit("hello world", 5)
assert result == "hello"
assert utf16_len(result) <= 5
def test_does_not_split_surrogate_pair(self):
# "a😀b" = 1 + 2 + 1 = 4 UTF-16 units; limit 2 should give "a"
result = _prefix_within_utf16_limit("a😀b", 2)
assert result == "a"
assert utf16_len(result) <= 2
def test_emoji_at_limit(self):
# "😀" = 2 UTF-16 units; limit 2 should include it
result = _prefix_within_utf16_limit("😀x", 2)
assert result == "😀"
def test_all_emoji(self):
msg = "😀" * 10 # 20 UTF-16 units
result = _prefix_within_utf16_limit(msg, 6)
assert result == "😀😀😀"
assert utf16_len(result) == 6
def test_empty(self):
assert _prefix_within_utf16_limit("", 5) == ""
class TestTruncateMessageUtf16:
"""Verify truncate_message respects UTF-16 lengths when len_fn=utf16_len."""
def test_short_emoji_message_no_split(self):
"""A short message under the UTF-16 limit should not be split."""
msg = "Hello 😀 world"
chunks = BasePlatformAdapter.truncate_message(msg, 4096, len_fn=utf16_len)
assert len(chunks) == 1
assert chunks[0] == msg
def test_emoji_near_limit_triggers_split(self):
"""A message at 4096 codepoints but >4096 UTF-16 units must split."""
# 2049 emoji = 2049 codepoints but 4098 UTF-16 units → exceeds 4096
msg = "😀" * 2049
assert len(msg) == 2049 # Python len sees 2049 chars
assert utf16_len(msg) == 4098 # but it's 4098 UTF-16 units
# Without UTF-16 awareness, this would NOT split (2049 < 4096)
chunks_naive = BasePlatformAdapter.truncate_message(msg, 4096)
assert len(chunks_naive) == 1, "Without len_fn, no split expected"
# With UTF-16 awareness, it MUST split
chunks = BasePlatformAdapter.truncate_message(msg, 4096, len_fn=utf16_len)
assert len(chunks) > 1, "With utf16_len, message should be split"
# Each chunk must fit within the UTF-16 limit
for i, chunk in enumerate(chunks):
assert utf16_len(chunk) <= 4096, (
f"Chunk {i} exceeds 4096 UTF-16 units: {utf16_len(chunk)}"
)
def test_each_utf16_chunk_within_limit(self):
"""All chunks produced with utf16_len must fit the limit."""
# Mix of BMP and astral-plane characters
msg = ("Hello 😀 world 🎵 test 𝄞 " * 200).strip()
max_len = 200
chunks = BasePlatformAdapter.truncate_message(msg, max_len, len_fn=utf16_len)
for i, chunk in enumerate(chunks):
u16_len = utf16_len(chunk)
assert u16_len <= max_len + 20, (
f"Chunk {i} UTF-16 length {u16_len} exceeds {max_len}"
)
def test_all_content_preserved(self):
"""Splitting with utf16_len must not lose content."""
words = ["emoji😀", "music🎵", "cjk你好", "plain"] * 100
msg = " ".join(words)
chunks = BasePlatformAdapter.truncate_message(msg, 200, len_fn=utf16_len)
reassembled = " ".join(chunks)
for word in words:
assert word in reassembled, f"Word '{word}' lost during UTF-16 split"
def test_code_blocks_preserved_with_utf16(self):
"""Code block fence handling should work with utf16_len too."""
msg = "Before\n```python\n" + "x = '😀'\n" * 200 + "```\nAfter"
chunks = BasePlatformAdapter.truncate_message(msg, 300, len_fn=utf16_len)
assert len(chunks) > 1
# Each chunk should have balanced fences
for i, chunk in enumerate(chunks):
fence_count = chunk.count("```")
assert fence_count % 2 == 0, (
f"Chunk {i} has unbalanced fences ({fence_count})"
)

View file

@ -0,0 +1,215 @@
"""Tests for /restart notification — the gateway notifies the requester on comeback."""
import asyncio
import json
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock
import pytest
import gateway.run as gateway_run
from gateway.config import Platform
from gateway.platforms.base import MessageEvent, MessageType
from gateway.session import build_session_key
from tests.gateway.restart_test_helpers import (
make_restart_runner,
make_restart_source,
)
# ── _handle_restart_command writes .restart_notify.json ──────────────────
@pytest.mark.asyncio
async def test_restart_command_writes_notify_file(tmp_path, monkeypatch):
"""When /restart fires, the requester's routing info is persisted to disk."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
runner, _adapter = make_restart_runner()
runner.request_restart = MagicMock(return_value=True)
source = make_restart_source(chat_id="42")
event = MessageEvent(
text="/restart",
message_type=MessageType.TEXT,
source=source,
message_id="m1",
)
result = await runner._handle_restart_command(event)
assert "Restarting" in result
notify_path = tmp_path / ".restart_notify.json"
assert notify_path.exists()
data = json.loads(notify_path.read_text())
assert data["platform"] == "telegram"
assert data["chat_id"] == "42"
assert "thread_id" not in data # no thread → omitted
@pytest.mark.asyncio
async def test_restart_command_uses_service_restart_under_systemd(tmp_path, monkeypatch):
"""Under systemd (INVOCATION_ID set), /restart uses via_service=True."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
monkeypatch.setenv("INVOCATION_ID", "abc123")
runner, _adapter = make_restart_runner()
runner.request_restart = MagicMock(return_value=True)
source = make_restart_source(chat_id="42")
event = MessageEvent(
text="/restart",
message_type=MessageType.TEXT,
source=source,
message_id="m1",
)
await runner._handle_restart_command(event)
runner.request_restart.assert_called_once_with(detached=False, via_service=True)
@pytest.mark.asyncio
async def test_restart_command_uses_detached_without_systemd(tmp_path, monkeypatch):
"""Without systemd, /restart uses the detached subprocess approach."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
monkeypatch.delenv("INVOCATION_ID", raising=False)
runner, _adapter = make_restart_runner()
runner.request_restart = MagicMock(return_value=True)
source = make_restart_source(chat_id="42")
event = MessageEvent(
text="/restart",
message_type=MessageType.TEXT,
source=source,
message_id="m1",
)
await runner._handle_restart_command(event)
runner.request_restart.assert_called_once_with(detached=True, via_service=False)
@pytest.mark.asyncio
async def test_restart_command_preserves_thread_id(tmp_path, monkeypatch):
"""Thread ID is saved when the requester is in a threaded chat."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
runner, _adapter = make_restart_runner()
runner.request_restart = MagicMock(return_value=True)
source = make_restart_source(chat_id="99")
source.thread_id = "topic_7"
event = MessageEvent(
text="/restart",
message_type=MessageType.TEXT,
source=source,
message_id="m2",
)
await runner._handle_restart_command(event)
data = json.loads((tmp_path / ".restart_notify.json").read_text())
assert data["thread_id"] == "topic_7"
# ── _send_restart_notification ───────────────────────────────────────────
@pytest.mark.asyncio
async def test_send_restart_notification_delivers_and_cleans_up(tmp_path, monkeypatch):
"""On startup, the notification is sent and the file is removed."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
notify_path = tmp_path / ".restart_notify.json"
notify_path.write_text(json.dumps({
"platform": "telegram",
"chat_id": "42",
}))
runner, adapter = make_restart_runner()
adapter.send = AsyncMock()
await runner._send_restart_notification()
adapter.send.assert_called_once()
call_args = adapter.send.call_args
assert call_args[0][0] == "42" # chat_id
assert "restarted" in call_args[0][1].lower()
assert call_args[1].get("metadata") is None # no thread
assert not notify_path.exists()
@pytest.mark.asyncio
async def test_send_restart_notification_with_thread(tmp_path, monkeypatch):
"""Thread ID is passed as metadata so the message lands in the right topic."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
notify_path = tmp_path / ".restart_notify.json"
notify_path.write_text(json.dumps({
"platform": "telegram",
"chat_id": "99",
"thread_id": "topic_7",
}))
runner, adapter = make_restart_runner()
adapter.send = AsyncMock()
await runner._send_restart_notification()
call_args = adapter.send.call_args
assert call_args[1]["metadata"] == {"thread_id": "topic_7"}
assert not notify_path.exists()
@pytest.mark.asyncio
async def test_send_restart_notification_noop_when_no_file(tmp_path, monkeypatch):
"""Nothing happens if there's no pending restart notification."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
runner, adapter = make_restart_runner()
adapter.send = AsyncMock()
await runner._send_restart_notification()
adapter.send.assert_not_called()
@pytest.mark.asyncio
async def test_send_restart_notification_skips_when_adapter_missing(tmp_path, monkeypatch):
"""If the requester's platform isn't connected, clean up without crashing."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
notify_path = tmp_path / ".restart_notify.json"
notify_path.write_text(json.dumps({
"platform": "discord", # runner only has telegram adapter
"chat_id": "42",
}))
runner, _adapter = make_restart_runner()
await runner._send_restart_notification()
# File cleaned up even though we couldn't send
assert not notify_path.exists()
@pytest.mark.asyncio
async def test_send_restart_notification_cleans_up_on_send_failure(
tmp_path, monkeypatch
):
"""If the adapter.send() raises, the file is still cleaned up."""
monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path)
notify_path = tmp_path / ".restart_notify.json"
notify_path.write_text(json.dumps({
"platform": "telegram",
"chat_id": "42",
}))
runner, adapter = make_restart_runner()
adapter.send = AsyncMock(side_effect=RuntimeError("network down"))
await runner._send_restart_notification()
assert not notify_path.exists() # cleaned up despite error

View file

@ -396,6 +396,27 @@ class QueuedCommentaryAgent:
} }
class VerboseAgent:
"""Agent that emits a tool call with args whose JSON exceeds 200 chars."""
LONG_CODE = "x" * 300
def __init__(self, **kwargs):
self.tool_progress_callback = kwargs.get("tool_progress_callback")
self.tools = []
def run_conversation(self, message, conversation_history=None, task_id=None):
self.tool_progress_callback(
"tool.started", "execute_code", None,
{"code": self.LONG_CODE},
)
time.sleep(0.35)
return {
"final_response": "done",
"messages": [],
"api_calls": 1,
}
async def _run_with_agent( async def _run_with_agent(
monkeypatch, monkeypatch,
tmp_path, tmp_path,
@ -575,3 +596,45 @@ async def test_run_agent_queued_message_does_not_treat_commentary_as_final(monke
assert result["final_response"] == "final response 2" assert result["final_response"] == "final response 2"
assert "I'll inspect the repo first." in sent_texts assert "I'll inspect the repo first." in sent_texts
assert "final response 1" in sent_texts assert "final response 1" in sent_texts
@pytest.mark.asyncio
async def test_verbose_mode_does_not_truncate_args_by_default(monkeypatch, tmp_path):
"""Verbose mode with default tool_preview_length (0) should NOT truncate args.
Previously, verbose mode capped args at 200 chars when tool_preview_length
was 0 (default). The user explicitly opted into verbose show full detail.
"""
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
VerboseAgent,
session_id="sess-verbose-no-truncate",
config_data={"display": {"tool_progress": "verbose", "tool_preview_length": 0}},
)
assert result["final_response"] == "done"
# The full 300-char 'x' string should be present, not truncated to 200
all_content = " ".join(call["content"] for call in adapter.sent)
all_content += " ".join(call["content"] for call in adapter.edits)
assert VerboseAgent.LONG_CODE in all_content
@pytest.mark.asyncio
async def test_verbose_mode_respects_explicit_tool_preview_length(monkeypatch, tmp_path):
"""When tool_preview_length is set to a positive value, verbose truncates to that."""
adapter, result = await _run_with_agent(
monkeypatch,
tmp_path,
VerboseAgent,
session_id="sess-verbose-explicit-cap",
config_data={"display": {"tool_progress": "verbose", "tool_preview_length": 50}},
)
assert result["final_response"] == "done"
all_content = " ".join(call["content"] for call in adapter.sent)
all_content += " ".join(call["content"] for call in adapter.edits)
# Should be truncated — full 300-char string NOT present
assert VerboseAgent.LONG_CODE not in all_content
# But should still contain the truncated portion with "..."
assert "..." in all_content

View file

@ -552,6 +552,45 @@ class TestLoadTranscriptPreferLongerSource:
assert result[0]["content"] == "db-q" assert result[0]["content"] == "db-q"
class TestSessionStoreSwitchSession:
"""Regression coverage for gateway /resume session switching semantics."""
def test_switch_session_reopens_target_session_in_db(self, tmp_path):
from hermes_state import SessionDB
config = GatewayConfig()
with patch("gateway.session.SessionStore._ensure_loaded"):
store = SessionStore(sessions_dir=tmp_path / "sessions", config=config)
db = SessionDB(db_path=tmp_path / "state.db")
store._db = db
store._loaded = True
source = SessionSource(
platform=Platform.FEISHU,
chat_id="chat-1",
chat_type="dm",
user_id="user-1",
user_name="tester",
)
current_entry = store.get_or_create_session(source)
current_session_id = current_entry.session_id
target_session_id = "old_session_abc"
db.create_session(target_session_id, source="feishu", user_id="user-1")
db.end_session(target_session_id, end_reason="user_exit")
assert db.get_session(target_session_id)["ended_at"] is not None
switched = store.switch_session(current_entry.session_key, target_session_id)
assert switched is not None
assert switched.session_id == target_session_id
assert db.get_session(current_session_id)["end_reason"] == "session_switch"
resumed = db.get_session(target_session_id)
assert resumed["ended_at"] is None
assert resumed["end_reason"] is None
db.close()
class TestWhatsAppDMSessionKeyConsistency: class TestWhatsAppDMSessionKeyConsistency:
"""Regression: all session-key construction must go through build_session_key """Regression: all session-key construction must go through build_session_key
so DMs are isolated by chat_id across platforms.""" so DMs are isolated by chat_id across platforms."""

View file

@ -60,7 +60,8 @@ def _make_runner():
def _make_event(text="hello", chat_id="12345"): def _make_event(text="hello", chat_id="12345"):
source = SessionSource( source = SessionSource(
platform=Platform.TELEGRAM, chat_id=chat_id, chat_type="dm" platform=Platform.TELEGRAM, chat_id=chat_id, chat_type="dm",
user_id="u1",
) )
return MessageEvent(text=text, message_type=MessageType.TEXT, source=source) return MessageEvent(text=text, message_type=MessageType.TEXT, source=source)
@ -192,7 +193,8 @@ async def test_command_messages_do_not_leave_sentinel():
_handle_message. They must NOT leave a sentinel behind.""" _handle_message. They must NOT leave a sentinel behind."""
runner = _make_runner() runner = _make_runner()
source = SessionSource( source = SessionSource(
platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm" platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm",
user_id="u1",
) )
event = MessageEvent( event = MessageEvent(
text="/help", message_type=MessageType.TEXT, source=source text="/help", message_type=MessageType.TEXT, source=source
@ -240,9 +242,7 @@ async def test_stop_during_sentinel_force_cleans_session():
stop_event = _make_event(text="/stop") stop_event = _make_event(text="/stop")
result = await runner._handle_message(stop_event) result = await runner._handle_message(stop_event)
assert result is not None, "/stop during sentinel should return a message" assert result is not None, "/stop during sentinel should return a message"
assert "force-stopped" in result.lower() or "unlocked" in result.lower() assert "stopped" in result.lower()
# Sentinel must be cleaned up
assert session_key not in runner._running_agents, ( assert session_key not in runner._running_agents, (
"/stop must remove sentinel so the session is unlocked" "/stop must remove sentinel so the session is unlocked"
) )
@ -268,7 +268,7 @@ async def test_stop_hard_kills_running_agent():
forever showing 'writing...' but never producing output.""" forever showing 'writing...' but never producing output."""
runner = _make_runner() runner = _make_runner()
session_key = build_session_key( session_key = build_session_key(
SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm") SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm", user_id="u1")
) )
# Simulate a running (possibly hung) agent # Simulate a running (possibly hung) agent
@ -289,7 +289,7 @@ async def test_stop_hard_kills_running_agent():
# Must return a confirmation # Must return a confirmation
assert result is not None assert result is not None
assert "force-stopped" in result.lower() or "unlocked" in result.lower() assert "stopped" in result.lower()
# ------------------------------------------------------------------ # ------------------------------------------------------------------
@ -301,7 +301,7 @@ async def test_stop_clears_pending_messages():
queued during the run must be discarded.""" queued during the run must be discarded."""
runner = _make_runner() runner = _make_runner()
session_key = build_session_key( session_key = build_session_key(
SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm") SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm", user_id="u1")
) )
fake_agent = MagicMock() fake_agent = MagicMock()

View file

@ -0,0 +1,279 @@
"""Tests for _setup_feishu() in hermes_cli/gateway.py.
Verifies that the interactive setup writes env vars that correctly drive the
Feishu adapter: credentials, connection mode, DM policy, and group policy.
"""
import os
from unittest.mock import patch
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _run_setup_feishu(
*,
qr_result=None,
prompt_yes_no_responses=None,
prompt_choice_responses=None,
prompt_responses=None,
existing_env=None,
):
"""Run _setup_feishu() with mocked I/O and return the env vars that were saved.
Returns a dict of {env_var_name: value} for all save_env_value calls.
"""
existing_env = existing_env or {}
prompt_yes_no_responses = list(prompt_yes_no_responses or [True])
# QR path: method(0), dm(0), group(0) — 3 choices (no connection mode)
# Manual path: method(1), domain(0), connection(0), dm(0), group(0) — 5 choices
prompt_choice_responses = list(prompt_choice_responses or [0, 0, 0])
prompt_responses = list(prompt_responses or [""])
saved_env = {}
def mock_save(name, value):
saved_env[name] = value
def mock_get(name):
return existing_env.get(name, "")
with patch("hermes_cli.gateway.save_env_value", side_effect=mock_save), \
patch("hermes_cli.gateway.get_env_value", side_effect=mock_get), \
patch("hermes_cli.gateway.prompt_yes_no", side_effect=prompt_yes_no_responses), \
patch("hermes_cli.gateway.prompt_choice", side_effect=prompt_choice_responses), \
patch("hermes_cli.gateway.prompt", side_effect=prompt_responses), \
patch("hermes_cli.gateway.print_info"), \
patch("hermes_cli.gateway.print_success"), \
patch("hermes_cli.gateway.print_warning"), \
patch("hermes_cli.gateway.print_error"), \
patch("hermes_cli.gateway.color", side_effect=lambda t, c: t), \
patch("gateway.platforms.feishu.qr_register", return_value=qr_result):
from hermes_cli.gateway import _setup_feishu
_setup_feishu()
return saved_env
# ---------------------------------------------------------------------------
# QR scan-to-create path
# ---------------------------------------------------------------------------
class TestSetupFeishuQrPath:
"""Tests for the QR scan-to-create happy path."""
def test_qr_success_saves_core_credentials(self):
env = _run_setup_feishu(
qr_result={
"app_id": "cli_test",
"app_secret": "secret_test",
"domain": "feishu",
"open_id": "ou_owner",
"bot_name": "TestBot",
"bot_open_id": "ou_bot",
},
prompt_yes_no_responses=[True], # Start QR
prompt_choice_responses=[0, 0, 0], # method=QR, dm=pairing, group=open
prompt_responses=[""], # home channel: skip
)
assert env["FEISHU_APP_ID"] == "cli_test"
assert env["FEISHU_APP_SECRET"] == "secret_test"
assert env["FEISHU_DOMAIN"] == "feishu"
def test_qr_success_does_not_persist_bot_identity(self):
"""Bot identity is discovered at runtime by _hydrate_bot_identity — not persisted
in env, so it stays fresh if the user renames the bot later."""
env = _run_setup_feishu(
qr_result={
"app_id": "cli_test",
"app_secret": "secret_test",
"domain": "feishu",
"open_id": "ou_owner",
"bot_name": "TestBot",
"bot_open_id": "ou_bot",
},
prompt_yes_no_responses=[True],
prompt_choice_responses=[0, 0, 0],
prompt_responses=[""],
)
assert "FEISHU_BOT_OPEN_ID" not in env
assert "FEISHU_BOT_NAME" not in env
# ---------------------------------------------------------------------------
# Connection mode
# ---------------------------------------------------------------------------
class TestSetupFeishuConnectionMode:
"""Connection mode: QR always websocket, manual path lets user choose."""
def test_qr_path_defaults_to_websocket(self):
env = _run_setup_feishu(
qr_result={
"app_id": "cli_test", "app_secret": "s", "domain": "feishu",
"open_id": None, "bot_name": None, "bot_open_id": None,
},
prompt_choice_responses=[0, 0, 0], # method=QR, dm=pairing, group=open
prompt_responses=[""],
)
assert env["FEISHU_CONNECTION_MODE"] == "websocket"
@patch("gateway.platforms.feishu.probe_bot", return_value=None)
def test_manual_path_websocket(self, _mock_probe):
env = _run_setup_feishu(
qr_result=None,
prompt_choice_responses=[1, 0, 0, 0, 0], # method=manual, domain=feishu, connection=ws, dm=pairing, group=open
prompt_responses=["cli_manual", "secret_manual", ""], # app_id, app_secret, home_channel
)
assert env["FEISHU_CONNECTION_MODE"] == "websocket"
@patch("gateway.platforms.feishu.probe_bot", return_value=None)
def test_manual_path_webhook(self, _mock_probe):
env = _run_setup_feishu(
qr_result=None,
prompt_choice_responses=[1, 0, 1, 0, 0], # method=manual, domain=feishu, connection=webhook, dm=pairing, group=open
prompt_responses=["cli_manual", "secret_manual", ""], # app_id, app_secret, home_channel
)
assert env["FEISHU_CONNECTION_MODE"] == "webhook"
# ---------------------------------------------------------------------------
# DM security policy
# ---------------------------------------------------------------------------
class TestSetupFeishuDmPolicy:
"""DM policy must use platform-scoped FEISHU_ALLOW_ALL_USERS, not the global flag."""
def _run_with_dm_choice(self, dm_choice_idx, prompt_responses=None):
return _run_setup_feishu(
qr_result={
"app_id": "cli_test", "app_secret": "s", "domain": "feishu",
"open_id": "ou_owner", "bot_name": None, "bot_open_id": None,
},
prompt_yes_no_responses=[True],
prompt_choice_responses=[0, dm_choice_idx, 0], # method=QR, dm=<choice>, group=open
prompt_responses=prompt_responses or [""],
)
def test_pairing_sets_feishu_allow_all_false(self):
env = self._run_with_dm_choice(0)
assert env["FEISHU_ALLOW_ALL_USERS"] == "false"
assert env["FEISHU_ALLOWED_USERS"] == ""
assert "GATEWAY_ALLOW_ALL_USERS" not in env
def test_allow_all_sets_feishu_allow_all_true(self):
env = self._run_with_dm_choice(1)
assert env["FEISHU_ALLOW_ALL_USERS"] == "true"
assert env["FEISHU_ALLOWED_USERS"] == ""
assert "GATEWAY_ALLOW_ALL_USERS" not in env
def test_allowlist_sets_feishu_allow_all_false_with_list(self):
env = self._run_with_dm_choice(2, prompt_responses=["ou_user1,ou_user2", ""])
assert env["FEISHU_ALLOW_ALL_USERS"] == "false"
assert env["FEISHU_ALLOWED_USERS"] == "ou_user1,ou_user2"
assert "GATEWAY_ALLOW_ALL_USERS" not in env
def test_allowlist_prepopulates_with_scan_owner_open_id(self):
"""When open_id is available from QR scan, it should be the default allowlist value."""
# We return the owner's open_id from prompt (+ empty home channel).
env = self._run_with_dm_choice(2, prompt_responses=["ou_owner", ""])
assert env["FEISHU_ALLOWED_USERS"] == "ou_owner"
# ---------------------------------------------------------------------------
# Group policy
# ---------------------------------------------------------------------------
class TestSetupFeishuGroupPolicy:
def test_open_with_mention(self):
env = _run_setup_feishu(
qr_result={
"app_id": "cli_test", "app_secret": "s", "domain": "feishu",
"open_id": None, "bot_name": None, "bot_open_id": None,
},
prompt_yes_no_responses=[True],
prompt_choice_responses=[0, 0, 0], # method=QR, dm=pairing, group=open
prompt_responses=[""],
)
assert env["FEISHU_GROUP_POLICY"] == "open"
def test_disabled(self):
env = _run_setup_feishu(
qr_result={
"app_id": "cli_test", "app_secret": "s", "domain": "feishu",
"open_id": None, "bot_name": None, "bot_open_id": None,
},
prompt_yes_no_responses=[True],
prompt_choice_responses=[0, 0, 1], # method=QR, dm=pairing, group=disabled
prompt_responses=[""],
)
assert env["FEISHU_GROUP_POLICY"] == "disabled"
# ---------------------------------------------------------------------------
# Adapter integration: env vars → FeishuAdapterSettings
# ---------------------------------------------------------------------------
class TestSetupFeishuAdapterIntegration:
"""Verify that env vars written by _setup_feishu() produce a valid adapter config.
This bridges the gap between 'setup wrote the right env vars' and
'the adapter will actually initialize correctly from those vars'.
"""
def _make_env_from_setup(self, dm_idx=0, group_idx=0):
"""Run _setup_feishu via QR path and return the env vars it would write."""
return _run_setup_feishu(
qr_result={
"app_id": "cli_test_app",
"app_secret": "test_secret_value",
"domain": "feishu",
"open_id": "ou_owner",
"bot_name": "IntegrationBot",
"bot_open_id": "ou_bot_integration",
},
prompt_yes_no_responses=[True],
prompt_choice_responses=[0, dm_idx, group_idx], # method=QR, dm, group
prompt_responses=[""],
)
@patch.dict(os.environ, {}, clear=True)
def test_qr_env_produces_valid_adapter_settings(self):
"""QR setup → adapter initializes with websocket mode."""
env = self._make_env_from_setup()
with patch.dict(os.environ, env, clear=True):
from gateway.config import PlatformConfig
from gateway.platforms.feishu import FeishuAdapter
adapter = FeishuAdapter(PlatformConfig())
assert adapter._app_id == "cli_test_app"
assert adapter._app_secret == "test_secret_value"
assert adapter._domain_name == "feishu"
assert adapter._connection_mode == "websocket"
@patch.dict(os.environ, {}, clear=True)
def test_open_dm_env_sets_correct_adapter_state(self):
"""Setup with 'allow all DMs' → adapter sees allow-all flag."""
env = self._make_env_from_setup(dm_idx=1)
with patch.dict(os.environ, env, clear=True):
from gateway.platforms.feishu import FeishuAdapter
from gateway.config import PlatformConfig
# Verify adapter initializes without error and env var is correct.
FeishuAdapter(PlatformConfig())
assert os.getenv("FEISHU_ALLOW_ALL_USERS") == "true"
@patch.dict(os.environ, {}, clear=True)
def test_group_open_env_sets_adapter_group_policy(self):
"""Setup with 'open groups' → adapter group_policy is 'open'."""
env = self._make_env_from_setup(group_idx=0)
with patch.dict(os.environ, env, clear=True):
from gateway.config import PlatformConfig
from gateway.platforms.feishu import FeishuAdapter
adapter = FeishuAdapter(PlatformConfig())
assert adapter._group_policy == "open"

View file

@ -209,6 +209,33 @@ class TestScopedLocks:
assert payload["pid"] == os.getpid() assert payload["pid"] == os.getpid()
assert payload["metadata"]["platform"] == "telegram" assert payload["metadata"]["platform"] == "telegram"
def test_acquire_scoped_lock_recovers_empty_lock_file(self, tmp_path, monkeypatch):
"""Empty lock file (0 bytes) left by a crashed process should be treated as stale."""
monkeypatch.setenv("HERMES_GATEWAY_LOCK_DIR", str(tmp_path / "locks"))
lock_path = tmp_path / "locks" / "slack-app-token-2bb80d537b1da3e3.lock"
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.write_text("") # simulate crash between O_CREAT and json.dump
acquired, existing = status.acquire_scoped_lock("slack-app-token", "secret", metadata={"platform": "slack"})
assert acquired is True
payload = json.loads(lock_path.read_text())
assert payload["pid"] == os.getpid()
assert payload["metadata"]["platform"] == "slack"
def test_acquire_scoped_lock_recovers_corrupt_lock_file(self, tmp_path, monkeypatch):
"""Lock file with invalid JSON should be treated as stale."""
monkeypatch.setenv("HERMES_GATEWAY_LOCK_DIR", str(tmp_path / "locks"))
lock_path = tmp_path / "locks" / "slack-app-token-2bb80d537b1da3e3.lock"
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.write_text("{truncated") # simulate partial write
acquired, existing = status.acquire_scoped_lock("slack-app-token", "secret", metadata={"platform": "slack"})
assert acquired is True
payload = json.loads(lock_path.read_text())
assert payload["pid"] == os.getpid()
def test_release_scoped_lock_only_removes_current_owner(self, tmp_path, monkeypatch): def test_release_scoped_lock_only_removes_current_owner(self, tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_GATEWAY_LOCK_DIR", str(tmp_path / "locks")) monkeypatch.setenv("HERMES_GATEWAY_LOCK_DIR", str(tmp_path / "locks"))

View file

@ -29,7 +29,7 @@ def _make_runner():
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_handle_message_does_not_priority_interrupt_photo_followup(): async def test_handle_message_does_not_priority_interrupt_photo_followup():
runner = _make_runner() runner = _make_runner()
source = SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm") source = SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm", user_id="u1")
session_key = build_session_key(source) session_key = build_session_key(source)
running_agent = MagicMock() running_agent = MagicMock()
runner._running_agents[session_key] = running_agent runner._running_agents[session_key] = running_agent

View file

@ -121,7 +121,7 @@ class TestSendWithReplyToMode:
adapter = adapter_factory(reply_to_mode="off") adapter = adapter_factory(reply_to_mode="off")
adapter._bot = MagicMock() adapter._bot = MagicMock()
adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1))
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"]
await adapter.send("12345", "test content", reply_to="999") await adapter.send("12345", "test content", reply_to="999")
@ -133,7 +133,7 @@ class TestSendWithReplyToMode:
adapter = adapter_factory(reply_to_mode="first") adapter = adapter_factory(reply_to_mode="first")
adapter._bot = MagicMock() adapter._bot = MagicMock()
adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1))
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"]
await adapter.send("12345", "test content", reply_to="999") await adapter.send("12345", "test content", reply_to="999")
@ -148,7 +148,7 @@ class TestSendWithReplyToMode:
adapter = adapter_factory(reply_to_mode="all") adapter = adapter_factory(reply_to_mode="all")
adapter._bot = MagicMock() adapter._bot = MagicMock()
adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1))
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"]
await adapter.send("12345", "test content", reply_to="999") await adapter.send("12345", "test content", reply_to="999")
@ -162,7 +162,7 @@ class TestSendWithReplyToMode:
adapter = adapter_factory(reply_to_mode="all") adapter = adapter_factory(reply_to_mode="all")
adapter._bot = MagicMock() adapter._bot = MagicMock()
adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1))
adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2"] adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2"]
await adapter.send("12345", "test content", reply_to=None) await adapter.send("12345", "test content", reply_to=None)
@ -175,7 +175,7 @@ class TestSendWithReplyToMode:
adapter = adapter_factory(reply_to_mode="first") adapter = adapter_factory(reply_to_mode="first")
adapter._bot = MagicMock() adapter._bot = MagicMock()
adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1))
adapter.truncate_message = lambda content, max_len: ["single chunk"] adapter.truncate_message = lambda content, max_len, **kw: ["single chunk"]
await adapter.send("12345", "test", reply_to="999") await adapter.send("12345", "test", reply_to="999")

View file

@ -417,6 +417,7 @@ class TestDiscordPlayTtsSkip:
adapter.config = config adapter.config = config
adapter._voice_clients = {} adapter._voice_clients = {}
adapter._voice_text_channels = {} adapter._voice_text_channels = {}
adapter._voice_sources = {}
adapter._voice_timeout_tasks = {} adapter._voice_timeout_tasks = {}
adapter._voice_receivers = {} adapter._voice_receivers = {}
adapter._voice_listen_tasks = {} adapter._voice_listen_tasks = {}
@ -702,13 +703,18 @@ class TestVoiceChannelCommands:
mock_adapter.join_voice_channel = AsyncMock(return_value=True) mock_adapter.join_voice_channel = AsyncMock(return_value=True)
mock_adapter.get_user_voice_channel = AsyncMock(return_value=mock_channel) mock_adapter.get_user_voice_channel = AsyncMock(return_value=mock_channel)
mock_adapter._voice_text_channels = {} mock_adapter._voice_text_channels = {}
mock_adapter._voice_sources = {}
mock_adapter._voice_input_callback = None mock_adapter._voice_input_callback = None
event = self._make_discord_event() event = self._make_discord_event()
event.source.chat_type = "group"
event.source.chat_name = "Hermes Server / #general"
runner.adapters[event.source.platform] = mock_adapter runner.adapters[event.source.platform] = mock_adapter
result = await runner._handle_voice_channel_join(event) result = await runner._handle_voice_channel_join(event)
assert "joined" in result.lower() assert "joined" in result.lower()
assert "General" in result assert "General" in result
assert runner._voice_mode["123"] == "all" assert runner._voice_mode["123"] == "all"
assert mock_adapter._voice_sources[111]["chat_id"] == "123"
assert mock_adapter._voice_sources[111]["chat_type"] == "group"
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_join_failure(self, runner): async def test_join_failure(self, runner):
@ -815,6 +821,7 @@ class TestVoiceChannelCommands:
from gateway.config import Platform from gateway.config import Platform
mock_adapter = AsyncMock() mock_adapter = AsyncMock()
mock_adapter._voice_text_channels = {111: 123} mock_adapter._voice_text_channels = {111: 123}
mock_adapter._voice_sources = {}
mock_channel = AsyncMock() mock_channel = AsyncMock()
mock_adapter._client = MagicMock() mock_adapter._client = MagicMock()
mock_adapter._client.get_channel = MagicMock(return_value=mock_channel) mock_adapter._client.get_channel = MagicMock(return_value=mock_channel)
@ -828,12 +835,45 @@ class TestVoiceChannelCommands:
assert event.source.chat_id == "123" assert event.source.chat_id == "123"
assert event.source.chat_type == "channel" assert event.source.chat_type == "channel"
@pytest.mark.asyncio
async def test_input_reuses_bound_source_metadata(self, runner):
"""Voice input should share the linked text channel session metadata."""
from gateway.config import Platform
bound_source = SessionSource(
chat_id="123",
chat_name="Hermes Server / #general",
chat_type="group",
user_id="user1",
user_name="user1",
platform=Platform.DISCORD,
)
mock_adapter = AsyncMock()
mock_adapter._voice_text_channels = {111: 123}
mock_adapter._voice_sources = {111: bound_source.to_dict()}
mock_channel = AsyncMock()
mock_adapter._client = MagicMock()
mock_adapter._client.get_channel = MagicMock(return_value=mock_channel)
mock_adapter.handle_message = AsyncMock()
runner.adapters[Platform.DISCORD] = mock_adapter
await runner._handle_voice_channel_input(111, 42, "Hello from VC")
mock_adapter.handle_message.assert_called_once()
event = mock_adapter.handle_message.call_args[0][0]
assert event.source.chat_id == "123"
assert event.source.chat_type == "group"
assert event.source.chat_name == "Hermes Server / #general"
assert event.source.user_id == "42"
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_input_posts_transcript_in_text_channel(self, runner): async def test_input_posts_transcript_in_text_channel(self, runner):
"""Voice input sends transcript message to text channel.""" """Voice input sends transcript message to text channel."""
from gateway.config import Platform from gateway.config import Platform
mock_adapter = AsyncMock() mock_adapter = AsyncMock()
mock_adapter._voice_text_channels = {111: 123} mock_adapter._voice_text_channels = {111: 123}
mock_adapter._voice_sources = {}
mock_channel = AsyncMock() mock_channel = AsyncMock()
mock_adapter._client = MagicMock() mock_adapter._client = MagicMock()
mock_adapter._client.get_channel = MagicMock(return_value=mock_channel) mock_adapter._client.get_channel = MagicMock(return_value=mock_channel)
@ -892,6 +932,7 @@ class TestDiscordVoiceChannelMethods:
adapter._client = MagicMock() adapter._client = MagicMock()
adapter._voice_clients = {} adapter._voice_clients = {}
adapter._voice_text_channels = {} adapter._voice_text_channels = {}
adapter._voice_sources = {}
adapter._voice_timeout_tasks = {} adapter._voice_timeout_tasks = {}
adapter._voice_receivers = {} adapter._voice_receivers = {}
adapter._voice_listen_tasks = {} adapter._voice_listen_tasks = {}
@ -926,6 +967,7 @@ class TestDiscordVoiceChannelMethods:
mock_vc.disconnect = AsyncMock() mock_vc.disconnect = AsyncMock()
adapter._voice_clients[111] = mock_vc adapter._voice_clients[111] = mock_vc
adapter._voice_text_channels[111] = 123 adapter._voice_text_channels[111] = 123
adapter._voice_sources[111] = {"chat_id": "123", "chat_type": "group"}
mock_receiver = MagicMock() mock_receiver = MagicMock()
adapter._voice_receivers[111] = mock_receiver adapter._voice_receivers[111] = mock_receiver
@ -944,6 +986,7 @@ class TestDiscordVoiceChannelMethods:
mock_timeout.cancel.assert_called_once() mock_timeout.cancel.assert_called_once()
assert 111 not in adapter._voice_clients assert 111 not in adapter._voice_clients
assert 111 not in adapter._voice_text_channels assert 111 not in adapter._voice_text_channels
assert 111 not in adapter._voice_sources
assert 111 not in adapter._voice_receivers assert 111 not in adapter._voice_receivers
@pytest.mark.asyncio @pytest.mark.asyncio
@ -1670,6 +1713,7 @@ class TestVoiceTimeoutCleansRunnerState:
adapter.config = config adapter.config = config
adapter._voice_clients = {} adapter._voice_clients = {}
adapter._voice_text_channels = {} adapter._voice_text_channels = {}
adapter._voice_sources = {}
adapter._voice_timeout_tasks = {} adapter._voice_timeout_tasks = {}
adapter._voice_receivers = {} adapter._voice_receivers = {}
adapter._voice_listen_tasks = {} adapter._voice_listen_tasks = {}
@ -1759,6 +1803,7 @@ class TestPlaybackTimeout:
adapter.config = config adapter.config = config
adapter._voice_clients = {} adapter._voice_clients = {}
adapter._voice_text_channels = {} adapter._voice_text_channels = {}
adapter._voice_sources = {}
adapter._voice_timeout_tasks = {} adapter._voice_timeout_tasks = {}
adapter._voice_receivers = {} adapter._voice_receivers = {}
adapter._voice_listen_tasks = {} adapter._voice_listen_tasks = {}
@ -1939,6 +1984,7 @@ class TestVoiceChannelAwareness:
adapter = object.__new__(DiscordAdapter) adapter = object.__new__(DiscordAdapter)
adapter._voice_clients = {} adapter._voice_clients = {}
adapter._voice_text_channels = {} adapter._voice_text_channels = {}
adapter._voice_sources = {}
adapter._voice_receivers = {} adapter._voice_receivers = {}
adapter._client = MagicMock() adapter._client = MagicMock()
adapter._client.user = SimpleNamespace(id=99999, name="HermesBot") adapter._client.user = SimpleNamespace(id=99999, name="HermesBot")
@ -2408,6 +2454,7 @@ class TestVoiceTTSPlayback:
adapter.config = config adapter.config = config
adapter._voice_clients = {} adapter._voice_clients = {}
adapter._voice_text_channels = {} adapter._voice_text_channels = {}
adapter._voice_sources = {}
adapter._voice_receivers = {} adapter._voice_receivers = {}
return adapter return adapter
@ -2587,6 +2634,7 @@ class TestUDPKeepalive:
adapter.config = config adapter.config = config
adapter._voice_clients = {} adapter._voice_clients = {}
adapter._voice_text_channels = {} adapter._voice_text_channels = {}
adapter._voice_sources = {}
adapter._voice_receivers = {} adapter._voice_receivers = {}
adapter._voice_listen_tasks = {} adapter._voice_listen_tasks = {}

View file

@ -0,0 +1,141 @@
"""Tests for gateway weak credential rejection at startup.
Ported from openclaw/openclaw#64586: rejects known-weak placeholder
tokens at gateway startup instead of letting them silently fail
against platform APIs.
"""
import logging
import pytest
from gateway.config import PlatformConfig, Platform, _validate_gateway_config
# ---------------------------------------------------------------------------
# Helper: create a minimal GatewayConfig with one enabled platform
# ---------------------------------------------------------------------------
def _make_gateway_config(platform, token, enabled=True, **extra_kwargs):
"""Create a minimal GatewayConfig-like object for validation testing."""
from gateway.config import GatewayConfig
config = GatewayConfig(platforms={})
pconfig = PlatformConfig(enabled=enabled, token=token, **extra_kwargs)
config.platforms[platform] = pconfig
return config
def _validate_and_return(config):
"""Call _validate_gateway_config and return the config (mutated in place)."""
_validate_gateway_config(config)
return config
# ---------------------------------------------------------------------------
# Unit tests: platform token placeholder rejection
# ---------------------------------------------------------------------------
class TestPlatformTokenPlaceholderGuard:
"""Verify that _validate_gateway_config disables platforms with placeholder tokens."""
def test_rejects_triple_asterisk(self, caplog):
"""'***' is the .env.example placeholder — should be rejected."""
config = _make_gateway_config(Platform.TELEGRAM, "***")
with caplog.at_level(logging.ERROR):
_validate_and_return(config)
assert config.platforms[Platform.TELEGRAM].enabled is False
assert "placeholder" in caplog.text.lower()
def test_rejects_changeme(self, caplog):
config = _make_gateway_config(Platform.DISCORD, "changeme")
with caplog.at_level(logging.ERROR):
_validate_and_return(config)
assert config.platforms[Platform.DISCORD].enabled is False
def test_rejects_your_api_key(self, caplog):
config = _make_gateway_config(Platform.SLACK, "your_api_key")
with caplog.at_level(logging.ERROR):
_validate_and_return(config)
assert config.platforms[Platform.SLACK].enabled is False
def test_rejects_placeholder(self, caplog):
config = _make_gateway_config(Platform.MATRIX, "placeholder")
with caplog.at_level(logging.ERROR):
_validate_and_return(config)
assert config.platforms[Platform.MATRIX].enabled is False
def test_accepts_real_token(self, caplog):
"""A real-looking bot token should pass validation."""
config = _make_gateway_config(
Platform.TELEGRAM, "7123456789:AAHdqTcvCH1vGWJxfSeOfSAs0K5PALDsaw"
)
with caplog.at_level(logging.ERROR):
_validate_and_return(config)
assert config.platforms[Platform.TELEGRAM].enabled is True
assert "placeholder" not in caplog.text.lower()
def test_accepts_empty_token_without_error(self, caplog):
"""Empty tokens get a warning (existing behavior), not a placeholder error."""
config = _make_gateway_config(Platform.TELEGRAM, "")
with caplog.at_level(logging.WARNING):
_validate_and_return(config)
# Empty token doesn't trigger placeholder rejection — enabled stays True
# (the existing empty-token warning is separate)
assert config.platforms[Platform.TELEGRAM].enabled is True
def test_disabled_platform_not_checked(self, caplog):
"""Disabled platforms should not be validated."""
config = _make_gateway_config(Platform.TELEGRAM, "***", enabled=False)
with caplog.at_level(logging.ERROR):
_validate_and_return(config)
assert "placeholder" not in caplog.text.lower()
def test_rejects_whitespace_padded_placeholder(self, caplog):
"""Whitespace-padded placeholders should still be caught."""
config = _make_gateway_config(Platform.TELEGRAM, " *** ")
with caplog.at_level(logging.ERROR):
_validate_and_return(config)
assert config.platforms[Platform.TELEGRAM].enabled is False
# ---------------------------------------------------------------------------
# Integration test: API server placeholder key on network-accessible host
# ---------------------------------------------------------------------------
class TestAPIServerPlaceholderKeyGuard:
"""Verify that the API server rejects placeholder keys on network hosts."""
@pytest.mark.asyncio
async def test_refuses_wildcard_with_placeholder_key(self):
from gateway.platforms.api_server import APIServerAdapter
adapter = APIServerAdapter(
PlatformConfig(enabled=True, extra={"host": "0.0.0.0", "key": "changeme"})
)
result = await adapter.connect()
assert result is False
@pytest.mark.asyncio
async def test_refuses_wildcard_with_asterisk_key(self):
from gateway.platforms.api_server import APIServerAdapter
adapter = APIServerAdapter(
PlatformConfig(enabled=True, extra={"host": "0.0.0.0", "key": "***"})
)
result = await adapter.connect()
assert result is False
def test_allows_loopback_with_placeholder_key(self):
"""Loopback with a placeholder key is fine — not network-exposed."""
from gateway.platforms.api_server import APIServerAdapter
from gateway.platforms.base import is_network_accessible
adapter = APIServerAdapter(
PlatformConfig(enabled=True, extra={"host": "127.0.0.1", "key": "changeme"})
)
# On loopback the placeholder guard doesn't fire
assert is_network_accessible(adapter._host) is False

View file

@ -30,7 +30,7 @@ class TestWeixinFormatting:
assert ( assert (
adapter.format_message(content) adapter.format_message(content)
== "【Title】\n\n**Plan**\n\nUse **bold** and [docs](https://example.com)." == "【Title】\n\n**Plan**\n\nUse **bold** and docs (https://example.com)."
) )
def test_format_message_rewrites_markdown_tables(self): def test_format_message_rewrites_markdown_tables(self):
@ -374,3 +374,149 @@ class TestWeixinRemoteMediaSafety:
assert "Blocked unsafe URL" in str(exc) assert "Blocked unsafe URL" in str(exc)
else: else:
raise AssertionError("expected ValueError for unsafe URL") raise AssertionError("expected ValueError for unsafe URL")
class TestWeixinMarkdownLinks:
"""Markdown links should be converted to plaintext since WeChat can't render them."""
def test_format_message_converts_markdown_links_to_plain_text(self):
adapter = _make_adapter()
content = "Check [the docs](https://example.com) and [GitHub](https://github.com) for details"
assert (
adapter.format_message(content)
== "Check the docs (https://example.com) and GitHub (https://github.com) for details"
)
def test_format_message_preserves_links_inside_code_blocks(self):
adapter = _make_adapter()
content = "See below:\n\n```\n[link](https://example.com)\n```\n\nDone."
result = adapter.format_message(content)
assert "[link](https://example.com)" in result
class TestWeixinBlankMessagePrevention:
"""Regression tests for the blank-bubble bugs.
Three separate guards now prevent a blank WeChat message from ever being
dispatched:
1. ``_split_text_for_weixin_delivery("")`` returns ``[]`` not ``[""]``.
2. ``send()`` filters out empty/whitespace-only chunks before calling
``_send_text_chunk``.
3. ``_send_message()`` raises ``ValueError`` for empty text as a last-resort
safety net.
"""
def test_split_text_returns_empty_list_for_empty_string(self):
adapter = _make_adapter()
assert adapter._split_text("") == []
def test_split_text_returns_empty_list_for_empty_string_split_per_line(self):
adapter = WeixinAdapter(
PlatformConfig(
enabled=True,
extra={
"account_id": "acct",
"token": "test-tok",
"split_multiline_messages": True,
},
)
)
assert adapter._split_text("") == []
@patch("gateway.platforms.weixin._send_message", new_callable=AsyncMock)
def test_send_empty_content_does_not_call_send_message(self, send_message_mock):
adapter = _make_adapter()
adapter._session = object()
adapter._token = "test-token"
adapter._base_url = "https://weixin.example.com"
adapter._token_store.get = lambda account_id, chat_id: "ctx-token"
result = asyncio.run(adapter.send("wxid_test123", ""))
# Empty content → no chunks → no _send_message calls
assert result.success is True
send_message_mock.assert_not_awaited()
def test_send_message_rejects_empty_text(self):
"""_send_message raises ValueError for empty/whitespace text."""
import pytest
with pytest.raises(ValueError, match="text must not be empty"):
asyncio.run(
weixin._send_message(
AsyncMock(),
base_url="https://example.com",
token="tok",
to="wxid_test",
text="",
context_token=None,
client_id="cid",
)
)
class TestWeixinStreamingCursorSuppression:
"""WeChat doesn't support message editing — cursor must be suppressed."""
def test_supports_message_editing_is_false(self):
adapter = _make_adapter()
assert adapter.SUPPORTS_MESSAGE_EDITING is False
class TestWeixinMediaBuilder:
"""Media builder uses base64(hex_key), not base64(raw_bytes) for aes_key."""
def test_image_builder_aes_key_is_base64_of_hex(self):
import base64
adapter = _make_adapter()
media_type, builder = adapter._outbound_media_builder("photo.jpg")
assert media_type == weixin.MEDIA_IMAGE
fake_hex_key = "0123456789abcdef0123456789abcdef"
expected_aes = base64.b64encode(fake_hex_key.encode("ascii")).decode("ascii")
item = builder(
encrypt_query_param="eq",
aes_key_for_api=expected_aes,
ciphertext_size=1024,
plaintext_size=1000,
filename="photo.jpg",
rawfilemd5="abc123",
)
assert item["image_item"]["media"]["aes_key"] == expected_aes
def test_video_builder_includes_md5(self):
adapter = _make_adapter()
media_type, builder = adapter._outbound_media_builder("clip.mp4")
assert media_type == weixin.MEDIA_VIDEO
item = builder(
encrypt_query_param="eq",
aes_key_for_api="fakekey",
ciphertext_size=2048,
plaintext_size=2000,
filename="clip.mp4",
rawfilemd5="deadbeef",
)
assert item["video_item"]["video_md5"] == "deadbeef"
def test_voice_builder_for_audio_files(self):
adapter = _make_adapter()
media_type, builder = adapter._outbound_media_builder("note.mp3")
assert media_type == weixin.MEDIA_VOICE
item = builder(
encrypt_query_param="eq",
aes_key_for_api="fakekey",
ciphertext_size=512,
plaintext_size=500,
filename="note.mp3",
rawfilemd5="abc",
)
assert item["type"] == weixin.ITEM_VOICE
assert "voice_item" in item
def test_voice_builder_for_silk_files(self):
adapter = _make_adapter()
media_type, builder = adapter._outbound_media_builder("recording.silk")
assert media_type == weixin.MEDIA_VOICE

View file

@ -0,0 +1,271 @@
"""Tests for WhatsApp message formatting and chunking.
Covers:
- format_message(): markdown WhatsApp syntax conversion
- send(): message chunking for long responses
- MAX_MESSAGE_LENGTH: practical UX limit
"""
import asyncio
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from gateway.config import Platform, PlatformConfig
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_adapter():
"""Create a WhatsAppAdapter with test attributes (bypass __init__)."""
from gateway.platforms.whatsapp import WhatsAppAdapter
adapter = WhatsAppAdapter.__new__(WhatsAppAdapter)
adapter.platform = Platform.WHATSAPP
adapter.config = MagicMock()
adapter.config.extra = {}
adapter._bridge_port = 3000
adapter._bridge_script = "/tmp/test-bridge.js"
adapter._session_path = MagicMock()
adapter._bridge_log_fh = None
adapter._bridge_log = None
adapter._bridge_process = None
adapter._reply_prefix = None
adapter._running = True
adapter._message_handler = None
adapter._fatal_error_code = None
adapter._fatal_error_message = None
adapter._fatal_error_retryable = True
adapter._fatal_error_handler = None
adapter._active_sessions = {}
adapter._pending_messages = {}
adapter._background_tasks = set()
adapter._auto_tts_disabled_chats = set()
adapter._message_queue = asyncio.Queue()
adapter._http_session = MagicMock()
adapter._mention_patterns = []
return adapter
class _AsyncCM:
"""Minimal async context manager returning a fixed value."""
def __init__(self, value):
self.value = value
async def __aenter__(self):
return self.value
async def __aexit__(self, *exc):
return False
# ---------------------------------------------------------------------------
# format_message tests
# ---------------------------------------------------------------------------
class TestFormatMessage:
"""WhatsApp markdown conversion."""
def test_bold_double_asterisk(self):
adapter = _make_adapter()
assert adapter.format_message("**hello**") == "*hello*"
def test_bold_double_underscore(self):
adapter = _make_adapter()
assert adapter.format_message("__hello__") == "*hello*"
def test_strikethrough(self):
adapter = _make_adapter()
assert adapter.format_message("~~deleted~~") == "~deleted~"
def test_headers_converted_to_bold(self):
adapter = _make_adapter()
assert adapter.format_message("# Title") == "*Title*"
assert adapter.format_message("## Subtitle") == "*Subtitle*"
assert adapter.format_message("### Deep") == "*Deep*"
def test_links_converted(self):
adapter = _make_adapter()
result = adapter.format_message("[click here](https://example.com)")
assert result == "click here (https://example.com)"
def test_code_blocks_protected(self):
"""Code blocks should not have their content reformatted."""
adapter = _make_adapter()
content = "before **bold** ```python\n**not bold**\n``` after **bold**"
result = adapter.format_message(content)
assert "```python\n**not bold**\n```" in result
assert result.startswith("before *bold*")
assert result.endswith("after *bold*")
def test_inline_code_protected(self):
"""Inline code should not have its content reformatted."""
adapter = _make_adapter()
content = "use `**raw**` here"
result = adapter.format_message(content)
assert "`**raw**`" in result
assert result.startswith("use ")
def test_empty_content(self):
adapter = _make_adapter()
assert adapter.format_message("") == ""
assert adapter.format_message(None) is None
def test_plain_text_unchanged(self):
adapter = _make_adapter()
assert adapter.format_message("hello world") == "hello world"
def test_already_whatsapp_italic(self):
"""Single *italic* should pass through unchanged."""
adapter = _make_adapter()
# After bold conversion, *text* is WhatsApp italic
assert adapter.format_message("*italic*") == "*italic*"
def test_multiline_mixed(self):
adapter = _make_adapter()
content = "# Header\n\n**Bold text** and ~~strike~~\n\n```\ncode\n```"
result = adapter.format_message(content)
assert "*Header*" in result
assert "*Bold text*" in result
assert "~strike~" in result
assert "```\ncode\n```" in result
# ---------------------------------------------------------------------------
# MAX_MESSAGE_LENGTH tests
# ---------------------------------------------------------------------------
class TestMessageLimits:
"""WhatsApp message length limits."""
def test_max_message_length_is_practical(self):
from gateway.platforms.whatsapp import WhatsAppAdapter
assert WhatsAppAdapter.MAX_MESSAGE_LENGTH == 4096
# ---------------------------------------------------------------------------
# send() chunking tests
# ---------------------------------------------------------------------------
class TestSendChunking:
"""WhatsApp send() splits long messages into chunks."""
@pytest.mark.asyncio
async def test_short_message_single_send(self):
adapter = _make_adapter()
resp = MagicMock(status=200)
resp.json = AsyncMock(return_value={"messageId": "msg1"})
adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp))
result = await adapter.send("chat1", "short message")
assert result.success
# Only one call to bridge /send
assert adapter._http_session.post.call_count == 1
@pytest.mark.asyncio
async def test_long_message_chunked(self):
adapter = _make_adapter()
resp = MagicMock(status=200)
resp.json = AsyncMock(return_value={"messageId": "msg1"})
adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp))
# Create a message longer than MAX_MESSAGE_LENGTH (4096)
long_msg = "a " * 3000 # ~6000 chars
result = await adapter.send("chat1", long_msg)
assert result.success
# Should have made multiple calls
assert adapter._http_session.post.call_count > 1
@pytest.mark.asyncio
async def test_empty_message_no_send(self):
adapter = _make_adapter()
result = await adapter.send("chat1", "")
assert result.success
assert adapter._http_session.post.call_count == 0
@pytest.mark.asyncio
async def test_whitespace_only_no_send(self):
adapter = _make_adapter()
result = await adapter.send("chat1", " \n ")
assert result.success
assert adapter._http_session.post.call_count == 0
@pytest.mark.asyncio
async def test_format_applied_before_send(self):
"""Markdown should be converted to WhatsApp format before sending."""
adapter = _make_adapter()
resp = MagicMock(status=200)
resp.json = AsyncMock(return_value={"messageId": "msg1"})
adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp))
await adapter.send("chat1", "**bold text**")
# Check the payload sent to the bridge
call_args = adapter._http_session.post.call_args
payload = call_args.kwargs.get("json") or call_args[1].get("json")
assert payload["message"] == "*bold text*"
@pytest.mark.asyncio
async def test_reply_to_only_on_first_chunk(self):
"""reply_to should only be set on the first chunk."""
adapter = _make_adapter()
resp = MagicMock(status=200)
resp.json = AsyncMock(return_value={"messageId": "msg1"})
adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp))
long_msg = "word " * 2000 # ~10000 chars, multiple chunks
await adapter.send("chat1", long_msg, reply_to="orig123")
calls = adapter._http_session.post.call_args_list
assert len(calls) > 1
# First chunk should have replyTo
first_payload = calls[0].kwargs.get("json") or calls[0][1].get("json")
assert first_payload.get("replyTo") == "orig123"
# Subsequent chunks should NOT have replyTo
for call in calls[1:]:
payload = call.kwargs.get("json") or call[1].get("json")
assert "replyTo" not in payload
@pytest.mark.asyncio
async def test_bridge_error_returns_failure(self):
adapter = _make_adapter()
resp = MagicMock(status=500)
resp.text = AsyncMock(return_value="Internal Server Error")
adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp))
result = await adapter.send("chat1", "hello")
assert not result.success
assert "Internal Server Error" in result.error
@pytest.mark.asyncio
async def test_not_connected_returns_failure(self):
adapter = _make_adapter()
adapter._running = False
result = await adapter.send("chat1", "hello")
assert not result.success
assert "Not connected" in result.error
# ---------------------------------------------------------------------------
# display_config tier classification
# ---------------------------------------------------------------------------
class TestWhatsAppTier:
"""WhatsApp should be classified as TIER_MEDIUM."""
def test_whatsapp_streaming_follows_global(self):
from gateway.display_config import resolve_display_setting
# TIER_MEDIUM has streaming: None (follow global), not False
assert resolve_display_setting({}, "whatsapp", "streaming") is None
def test_whatsapp_tool_progress_is_new(self):
from gateway.display_config import resolve_display_setting
assert resolve_display_setting({}, "whatsapp", "tool_progress") == "new"

View file

@ -23,9 +23,9 @@ from hermes_cli.auth import (
get_auth_status, get_auth_status,
AuthError, AuthError,
KIMI_CODE_BASE_URL, KIMI_CODE_BASE_URL,
_try_gh_cli_token,
_resolve_kimi_base_url, _resolve_kimi_base_url,
) )
from hermes_cli.copilot_auth import _try_gh_cli_token
# ============================================================================= # =============================================================================
@ -68,7 +68,7 @@ class TestProviderRegistry:
def test_copilot_env_vars(self): def test_copilot_env_vars(self):
pconfig = PROVIDER_REGISTRY["copilot"] pconfig = PROVIDER_REGISTRY["copilot"]
assert pconfig.api_key_env_vars == ("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN") assert pconfig.api_key_env_vars == ("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN")
assert pconfig.base_url_env_var == "" assert pconfig.base_url_env_var == "COPILOT_API_BASE_URL"
def test_kimi_env_vars(self): def test_kimi_env_vars(self):
pconfig = PROVIDER_REGISTRY["kimi-coding"] pconfig = PROVIDER_REGISTRY["kimi-coding"]
@ -381,13 +381,13 @@ class TestResolveApiKeyProviderCredentials:
assert creds["source"] == "gh auth token" assert creds["source"] == "gh auth token"
def test_try_gh_cli_token_uses_homebrew_path_when_not_on_path(self, monkeypatch): def test_try_gh_cli_token_uses_homebrew_path_when_not_on_path(self, monkeypatch):
monkeypatch.setattr("hermes_cli.auth.shutil.which", lambda command: None) monkeypatch.setattr("hermes_cli.copilot_auth.shutil.which", lambda command: None)
monkeypatch.setattr( monkeypatch.setattr(
"hermes_cli.auth.os.path.isfile", "hermes_cli.copilot_auth.os.path.isfile",
lambda path: path == "/opt/homebrew/bin/gh", lambda path: path == "/opt/homebrew/bin/gh",
) )
monkeypatch.setattr( monkeypatch.setattr(
"hermes_cli.auth.os.access", "hermes_cli.copilot_auth.os.access",
lambda path, mode: path == "/opt/homebrew/bin/gh" and mode == os.X_OK, lambda path, mode: path == "/opt/homebrew/bin/gh" and mode == os.X_OK,
) )
@ -397,11 +397,11 @@ class TestResolveApiKeyProviderCredentials:
returncode = 0 returncode = 0
stdout = "gh-cli-secret\n" stdout = "gh-cli-secret\n"
def _fake_run(cmd, capture_output, text, timeout): def _fake_run(cmd, **kwargs):
calls.append(cmd) calls.append(cmd)
return _Result() return _Result()
monkeypatch.setattr("hermes_cli.auth.subprocess.run", _fake_run) monkeypatch.setattr("hermes_cli.copilot_auth.subprocess.run", _fake_run)
assert _try_gh_cli_token() == "gh-cli-secret" assert _try_gh_cli_token() == "gh-cli-secret"
assert calls == [["/opt/homebrew/bin/gh", "auth", "token"]] assert calls == [["/opt/homebrew/bin/gh", "auth", "token"]]

View file

@ -1,6 +1,8 @@
"""Tests for hermes backup and import commands.""" """Tests for hermes backup and import commands."""
import json
import os import os
import sqlite3
import zipfile import zipfile
from argparse import Namespace from argparse import Namespace
from pathlib import Path from pathlib import Path
@ -232,6 +234,44 @@ class TestBackup:
assert len(zips) == 1 assert len(zips) == 1
# ---------------------------------------------------------------------------
# _validate_backup_zip tests
# ---------------------------------------------------------------------------
class TestValidateBackupZip:
def _make_zip(self, zip_path: Path, filenames: list[str]) -> None:
with zipfile.ZipFile(zip_path, "w") as zf:
for name in filenames:
zf.writestr(name, "dummy")
def test_state_db_passes(self, tmp_path):
"""A zip containing state.db is accepted as a valid Hermes backup."""
from hermes_cli.backup import _validate_backup_zip
zip_path = tmp_path / "backup.zip"
self._make_zip(zip_path, ["state.db", "sessions/abc.json"])
with zipfile.ZipFile(zip_path, "r") as zf:
ok, reason = _validate_backup_zip(zf)
assert ok, reason
def test_old_wrong_db_name_fails(self, tmp_path):
"""A zip with only hermes_state.db (old wrong name) is rejected."""
from hermes_cli.backup import _validate_backup_zip
zip_path = tmp_path / "old.zip"
self._make_zip(zip_path, ["hermes_state.db", "memory_store.db"])
with zipfile.ZipFile(zip_path, "r") as zf:
ok, reason = _validate_backup_zip(zf)
assert not ok
def test_config_yaml_passes(self, tmp_path):
"""A zip containing config.yaml is accepted (existing behaviour preserved)."""
from hermes_cli.backup import _validate_backup_zip
zip_path = tmp_path / "backup.zip"
self._make_zip(zip_path, ["config.yaml", "skills/x/SKILL.md"])
with zipfile.ZipFile(zip_path, "r") as zf:
ok, reason = _validate_backup_zip(zf)
assert ok, reason
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Import tests # Import tests
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@ -895,3 +935,181 @@ class TestProfileRestoration:
# Files should still be restored even if wrappers can't be created # Files should still be restored even if wrappers can't be created
assert (hermes_home / "profiles" / "coder" / "config.yaml").exists() assert (hermes_home / "profiles" / "coder" / "config.yaml").exists()
# ---------------------------------------------------------------------------
# SQLite safe copy tests
# ---------------------------------------------------------------------------
class TestSafeCopyDb:
def test_copies_valid_database(self, tmp_path):
from hermes_cli.backup import _safe_copy_db
src = tmp_path / "test.db"
dst = tmp_path / "copy.db"
conn = sqlite3.connect(str(src))
conn.execute("CREATE TABLE t (x INTEGER)")
conn.execute("INSERT INTO t VALUES (42)")
conn.commit()
conn.close()
result = _safe_copy_db(src, dst)
assert result is True
conn = sqlite3.connect(str(dst))
rows = conn.execute("SELECT x FROM t").fetchall()
conn.close()
assert rows == [(42,)]
def test_copies_wal_mode_database(self, tmp_path):
from hermes_cli.backup import _safe_copy_db
src = tmp_path / "wal.db"
dst = tmp_path / "copy.db"
conn = sqlite3.connect(str(src))
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("CREATE TABLE t (x TEXT)")
conn.execute("INSERT INTO t VALUES ('wal-test')")
conn.commit()
conn.close()
result = _safe_copy_db(src, dst)
assert result is True
conn = sqlite3.connect(str(dst))
rows = conn.execute("SELECT x FROM t").fetchall()
conn.close()
assert rows == [("wal-test",)]
# ---------------------------------------------------------------------------
# Quick state snapshot tests
# ---------------------------------------------------------------------------
class TestQuickSnapshot:
@pytest.fixture
def hermes_home(self, tmp_path):
"""Create a fake HERMES_HOME with critical state files."""
home = tmp_path / ".hermes"
home.mkdir()
(home / "config.yaml").write_text("model:\n provider: openrouter\n")
(home / ".env").write_text("OPENROUTER_API_KEY=test-key-123\n")
(home / "auth.json").write_text('{"providers": {}}\n')
(home / "cron").mkdir()
(home / "cron" / "jobs.json").write_text('{"jobs": []}\n')
# Real SQLite database
db_path = home / "state.db"
conn = sqlite3.connect(str(db_path))
conn.execute("CREATE TABLE sessions (id TEXT PRIMARY KEY, data TEXT)")
conn.execute("INSERT INTO sessions VALUES ('s1', 'hello world')")
conn.commit()
conn.close()
return home
def test_creates_snapshot(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot
snap_id = create_quick_snapshot(hermes_home=hermes_home)
assert snap_id is not None
snap_dir = hermes_home / "state-snapshots" / snap_id
assert snap_dir.is_dir()
assert (snap_dir / "manifest.json").exists()
def test_label_in_id(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot
snap_id = create_quick_snapshot(label="before-upgrade", hermes_home=hermes_home)
assert "before-upgrade" in snap_id
def test_state_db_safely_copied(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot
snap_id = create_quick_snapshot(hermes_home=hermes_home)
db_copy = hermes_home / "state-snapshots" / snap_id / "state.db"
assert db_copy.exists()
conn = sqlite3.connect(str(db_copy))
rows = conn.execute("SELECT * FROM sessions").fetchall()
conn.close()
assert len(rows) == 1
assert rows[0] == ("s1", "hello world")
def test_copies_nested_files(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot
snap_id = create_quick_snapshot(hermes_home=hermes_home)
assert (hermes_home / "state-snapshots" / snap_id / "cron" / "jobs.json").exists()
def test_missing_files_skipped(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot
snap_id = create_quick_snapshot(hermes_home=hermes_home)
with open(hermes_home / "state-snapshots" / snap_id / "manifest.json") as f:
meta = json.load(f)
# gateway_state.json etc. don't exist in fixture
assert "gateway_state.json" not in meta["files"]
def test_empty_home_returns_none(self, tmp_path):
from hermes_cli.backup import create_quick_snapshot
empty = tmp_path / "empty"
empty.mkdir()
assert create_quick_snapshot(hermes_home=empty) is None
def test_list_snapshots(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot, list_quick_snapshots
id1 = create_quick_snapshot(label="first", hermes_home=hermes_home)
id2 = create_quick_snapshot(label="second", hermes_home=hermes_home)
snaps = list_quick_snapshots(hermes_home=hermes_home)
assert len(snaps) == 2
assert snaps[0]["id"] == id2 # most recent first
assert snaps[1]["id"] == id1
def test_list_limit(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot, list_quick_snapshots
for i in range(5):
create_quick_snapshot(label=f"s{i}", hermes_home=hermes_home)
snaps = list_quick_snapshots(limit=3, hermes_home=hermes_home)
assert len(snaps) == 3
def test_restore_config(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot, restore_quick_snapshot
snap_id = create_quick_snapshot(hermes_home=hermes_home)
(hermes_home / "config.yaml").write_text("model:\n provider: anthropic\n")
assert "anthropic" in (hermes_home / "config.yaml").read_text()
result = restore_quick_snapshot(snap_id, hermes_home=hermes_home)
assert result is True
assert "openrouter" in (hermes_home / "config.yaml").read_text()
def test_restore_state_db(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot, restore_quick_snapshot
snap_id = create_quick_snapshot(hermes_home=hermes_home)
conn = sqlite3.connect(str(hermes_home / "state.db"))
conn.execute("INSERT INTO sessions VALUES ('s2', 'new')")
conn.commit()
conn.close()
restore_quick_snapshot(snap_id, hermes_home=hermes_home)
conn = sqlite3.connect(str(hermes_home / "state.db"))
rows = conn.execute("SELECT * FROM sessions").fetchall()
conn.close()
assert len(rows) == 1
def test_restore_nonexistent(self, hermes_home):
from hermes_cli.backup import restore_quick_snapshot
assert restore_quick_snapshot("nonexistent", hermes_home=hermes_home) is False
def test_auto_prune(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot, list_quick_snapshots, _QUICK_DEFAULT_KEEP
for i in range(_QUICK_DEFAULT_KEEP + 5):
create_quick_snapshot(label=f"snap-{i:03d}", hermes_home=hermes_home)
snaps = list_quick_snapshots(limit=100, hermes_home=hermes_home)
assert len(snaps) <= _QUICK_DEFAULT_KEEP
def test_manual_prune(self, hermes_home):
from hermes_cli.backup import create_quick_snapshot, prune_quick_snapshots, list_quick_snapshots
for i in range(10):
create_quick_snapshot(label=f"s{i}", hermes_home=hermes_home)
deleted = prune_quick_snapshots(keep=3, hermes_home=hermes_home)
assert deleted == 7
assert len(list_quick_snapshots(hermes_home=hermes_home)) == 3

View file

@ -1,6 +1,7 @@
"""Tests for hermes claw commands.""" """Tests for hermes claw commands."""
from argparse import Namespace from argparse import Namespace
import subprocess
from types import ModuleType from types import ModuleType
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
@ -197,6 +198,11 @@ class TestClawCommand:
class TestCmdMigrate: class TestCmdMigrate:
"""Test the migrate command handler.""" """Test the migrate command handler."""
@pytest.fixture(autouse=True)
def _mock_openclaw_running(self):
with patch.object(claw_mod, "_detect_openclaw_processes", return_value=[]):
yield
def test_error_when_source_missing(self, tmp_path, capsys): def test_error_when_source_missing(self, tmp_path, capsys):
args = Namespace( args = Namespace(
source=str(tmp_path / "nonexistent"), source=str(tmp_path / "nonexistent"),
@ -626,3 +632,120 @@ class TestPrintMigrationReport:
claw_mod._print_migration_report(report, dry_run=False) claw_mod._print_migration_report(report, dry_run=False)
captured = capsys.readouterr() captured = capsys.readouterr()
assert "Nothing to migrate" in captured.out assert "Nothing to migrate" in captured.out
class TestDetectOpenclawProcesses:
def test_returns_match_when_pgrep_finds_openclaw(self):
with patch.object(claw_mod, "sys") as mock_sys:
mock_sys.platform = "linux"
with patch.object(claw_mod, "subprocess") as mock_subprocess:
# systemd check misses, pgrep finds openclaw
mock_subprocess.run.side_effect = [
MagicMock(returncode=1, stdout=""), # systemctl
MagicMock(returncode=0, stdout="1234\n"), # pgrep
]
mock_subprocess.TimeoutExpired = subprocess.TimeoutExpired
result = claw_mod._detect_openclaw_processes()
assert len(result) == 1
assert "1234" in result[0]
def test_returns_empty_when_pgrep_finds_nothing(self):
with patch.object(claw_mod, "sys") as mock_sys:
mock_sys.platform = "darwin"
with patch.object(claw_mod, "subprocess") as mock_subprocess:
mock_subprocess.run.side_effect = [
MagicMock(returncode=1, stdout=""), # systemctl (not found)
MagicMock(returncode=1, stdout=""), # pgrep
]
mock_subprocess.TimeoutExpired = subprocess.TimeoutExpired
result = claw_mod._detect_openclaw_processes()
assert result == []
def test_detects_systemd_service(self):
with patch.object(claw_mod, "sys") as mock_sys:
mock_sys.platform = "linux"
with patch.object(claw_mod, "subprocess") as mock_subprocess:
mock_subprocess.run.side_effect = [
MagicMock(returncode=0, stdout="active\n"), # systemctl
MagicMock(returncode=1, stdout=""), # pgrep
]
mock_subprocess.TimeoutExpired = subprocess.TimeoutExpired
result = claw_mod._detect_openclaw_processes()
assert len(result) == 1
assert "systemd" in result[0]
def test_returns_match_on_windows_when_openclaw_exe_running(self):
with patch.object(claw_mod, "sys") as mock_sys:
mock_sys.platform = "win32"
with patch.object(claw_mod, "subprocess") as mock_subprocess:
mock_subprocess.run.side_effect = [
MagicMock(returncode=0, stdout="openclaw.exe 1234 Console 1 45,056 K\n"),
]
result = claw_mod._detect_openclaw_processes()
assert len(result) >= 1
assert any("openclaw.exe" in r for r in result)
def test_returns_match_on_windows_when_node_exe_has_openclaw_in_cmdline(self):
with patch.object(claw_mod, "sys") as mock_sys:
mock_sys.platform = "win32"
with patch.object(claw_mod, "subprocess") as mock_subprocess:
mock_subprocess.run.side_effect = [
MagicMock(returncode=0, stdout=""), # tasklist openclaw.exe
MagicMock(returncode=0, stdout=""), # tasklist clawd.exe
MagicMock(returncode=0, stdout="1234\n"), # PowerShell
]
result = claw_mod._detect_openclaw_processes()
assert len(result) >= 1
assert any("node.exe" in r for r in result)
def test_returns_empty_on_windows_when_nothing_found(self):
with patch.object(claw_mod, "sys") as mock_sys:
mock_sys.platform = "win32"
with patch.object(claw_mod, "subprocess") as mock_subprocess:
mock_subprocess.run.side_effect = [
MagicMock(returncode=0, stdout=""),
MagicMock(returncode=0, stdout=""),
MagicMock(returncode=0, stdout=""),
]
result = claw_mod._detect_openclaw_processes()
assert result == []
class TestWarnIfOpenclawRunning:
def test_noop_when_not_running(self, capsys):
with patch.object(claw_mod, "_detect_openclaw_processes", return_value=[]):
claw_mod._warn_if_openclaw_running(auto_yes=False)
captured = capsys.readouterr()
assert captured.out == ""
def test_warns_and_exits_when_running_and_user_declines(self, capsys):
with patch.object(claw_mod, "_detect_openclaw_processes", return_value=["openclaw process(es) (PIDs: 1234)"]):
with patch.object(claw_mod, "prompt_yes_no", return_value=False):
with patch.object(claw_mod.sys.stdin, "isatty", return_value=True):
with pytest.raises(SystemExit) as exc_info:
claw_mod._warn_if_openclaw_running(auto_yes=False)
assert exc_info.value.code == 0
captured = capsys.readouterr()
assert "OpenClaw appears to be running" in captured.out
def test_warns_and_continues_when_running_and_user_accepts(self, capsys):
with patch.object(claw_mod, "_detect_openclaw_processes", return_value=["openclaw process(es) (PIDs: 1234)"]):
with patch.object(claw_mod, "prompt_yes_no", return_value=True):
with patch.object(claw_mod.sys.stdin, "isatty", return_value=True):
claw_mod._warn_if_openclaw_running(auto_yes=False)
captured = capsys.readouterr()
assert "OpenClaw appears to be running" in captured.out
def test_warns_and_continues_in_auto_yes_mode(self, capsys):
with patch.object(claw_mod, "_detect_openclaw_processes", return_value=["openclaw process(es) (PIDs: 1234)"]):
claw_mod._warn_if_openclaw_running(auto_yes=True)
captured = capsys.readouterr()
assert "OpenClaw appears to be running" in captured.out
def test_warns_and_continues_in_non_interactive_session(self, capsys):
with patch.object(claw_mod, "_detect_openclaw_processes", return_value=["openclaw process(es) (PIDs: 1234)"]):
with patch.object(claw_mod.sys.stdin, "isatty", return_value=False):
claw_mod._warn_if_openclaw_running(auto_yes=False)
captured = capsys.readouterr()
assert "OpenClaw appears to be running" in captured.out
assert "Non-interactive session" in captured.out

View file

@ -10,6 +10,7 @@ from hermes_cli.config import (
DEFAULT_CONFIG, DEFAULT_CONFIG,
get_hermes_home, get_hermes_home,
ensure_hermes_home, ensure_hermes_home,
get_compatible_custom_providers,
load_config, load_config,
load_env, load_env,
migrate_config, migrate_config,
@ -424,6 +425,146 @@ class TestAnthropicTokenMigration:
assert load_env().get("ANTHROPIC_TOKEN") == "current-token" assert load_env().get("ANTHROPIC_TOKEN") == "current-token"
class TestCustomProviderCompatibility:
"""Custom provider compatibility across legacy and v12+ config schemas."""
def test_v11_upgrade_moves_custom_providers_into_providers(self, tmp_path):
config_path = tmp_path / "config.yaml"
config_path.write_text(
yaml.safe_dump(
{
"_config_version": 11,
"model": {
"default": "openai/gpt-5.4",
"provider": "openrouter",
},
"custom_providers": [
{
"name": "OpenAI Direct",
"base_url": "https://api.openai.com/v1",
"api_key": "test-key",
"api_mode": "codex_responses",
"model": "gpt-5-mini",
}
],
"fallback_providers": [
{"provider": "openai-direct", "model": "gpt-5-mini"}
],
}
),
encoding="utf-8",
)
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
migrate_config(interactive=False, quiet=True)
raw = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert raw["_config_version"] == 17
assert raw["providers"]["openai-direct"] == {
"api": "https://api.openai.com/v1",
"api_key": "test-key",
"default_model": "gpt-5-mini",
"name": "OpenAI Direct",
"transport": "codex_responses",
}
# custom_providers removed by migration — runtime reads via compat layer
assert "custom_providers" not in raw
def test_providers_dict_resolves_at_runtime(self, tmp_path):
"""After migration deleted custom_providers, get_compatible_custom_providers
still finds entries from the providers dict."""
config_path = tmp_path / "config.yaml"
config_path.write_text(
yaml.safe_dump(
{
"_config_version": 17,
"providers": {
"openai-direct": {
"api": "https://api.openai.com/v1",
"api_key": "test-key",
"default_model": "gpt-5-mini",
"name": "OpenAI Direct",
"transport": "codex_responses",
}
},
}
),
encoding="utf-8",
)
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
compatible = get_compatible_custom_providers()
assert len(compatible) == 1
assert compatible[0]["name"] == "OpenAI Direct"
assert compatible[0]["base_url"] == "https://api.openai.com/v1"
assert compatible[0]["provider_key"] == "openai-direct"
assert compatible[0]["api_mode"] == "codex_responses"
def test_compatible_custom_providers_prefers_api_then_url_then_base_url(self, tmp_path):
config_path = tmp_path / "config.yaml"
config_path.write_text(
yaml.safe_dump(
{
"_config_version": 17,
"providers": {
"my-provider": {
"name": "My Provider",
"api": "https://api.example.com/v1",
"url": "https://url.example.com/v1",
"base_url": "https://base.example.com/v1",
}
},
}
),
encoding="utf-8",
)
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
compatible = get_compatible_custom_providers()
assert compatible == [
{
"name": "My Provider",
"base_url": "https://api.example.com/v1",
"provider_key": "my-provider",
}
]
def test_dedup_across_legacy_and_providers(self, tmp_path):
"""Same name+url in both schemas should not produce duplicates."""
config_path = tmp_path / "config.yaml"
config_path.write_text(
yaml.safe_dump(
{
"_config_version": 17,
"custom_providers": [
{
"name": "OpenAI Direct",
"base_url": "https://api.openai.com/v1",
"api_key": "legacy-key",
}
],
"providers": {
"openai-direct": {
"api": "https://api.openai.com/v1",
"api_key": "new-key",
"name": "OpenAI Direct",
}
},
}
),
encoding="utf-8",
)
with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
compatible = get_compatible_custom_providers()
assert len(compatible) == 1
# Legacy entry wins (read first)
assert compatible[0]["api_key"] == "legacy-key"
class TestInterimAssistantMessageConfig: class TestInterimAssistantMessageConfig:
"""Test the explicit gateway interim-message config gate.""" """Test the explicit gateway interim-message config gate."""
@ -441,6 +582,6 @@ class TestInterimAssistantMessageConfig:
migrate_config(interactive=False, quiet=True) migrate_config(interactive=False, quiet=True)
raw = yaml.safe_load(config_path.read_text(encoding="utf-8")) raw = yaml.safe_load(config_path.read_text(encoding="utf-8"))
assert raw["_config_version"] == 16 assert raw["_config_version"] == 17
assert raw["display"]["tool_progress"] == "off" assert raw["display"]["tool_progress"] == "off"
assert raw["display"]["interim_assistant_messages"] is True assert raw["display"]["interim_assistant_messages"] is True

View file

@ -12,49 +12,10 @@ from unittest.mock import MagicMock, patch
import pytest import pytest
from hermes_cli.config import ( from hermes_cli.config import (
_is_inside_container,
get_container_exec_info, get_container_exec_info,
) )
# =============================================================================
# _is_inside_container
# =============================================================================
def test_is_inside_container_dockerenv():
"""Detects /.dockerenv marker file."""
with patch("os.path.exists") as mock_exists:
mock_exists.side_effect = lambda p: p == "/.dockerenv"
assert _is_inside_container() is True
def test_is_inside_container_containerenv():
"""Detects Podman's /run/.containerenv marker."""
with patch("os.path.exists") as mock_exists:
mock_exists.side_effect = lambda p: p == "/run/.containerenv"
assert _is_inside_container() is True
def test_is_inside_container_cgroup_docker():
"""Detects 'docker' in /proc/1/cgroup."""
with patch("os.path.exists", return_value=False), \
patch("builtins.open", create=True) as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = MagicMock(return_value=False)
mock_open.return_value.read = MagicMock(
return_value="12:memory:/docker/abc123\n"
)
assert _is_inside_container() is True
def test_is_inside_container_false_on_host():
"""Returns False when none of the container indicators are present."""
with patch("os.path.exists", return_value=False), \
patch("builtins.open", side_effect=OSError("no such file")):
assert _is_inside_container() is False
# ============================================================================= # =============================================================================
# get_container_exec_info # get_container_exec_info
# ============================================================================= # =============================================================================
@ -81,7 +42,7 @@ def container_env(tmp_path, monkeypatch):
def test_get_container_exec_info_returns_metadata(container_env): def test_get_container_exec_info_returns_metadata(container_env):
"""Reads .container-mode and returns all fields including exec_user.""" """Reads .container-mode and returns all fields including exec_user."""
with patch("hermes_cli.config._is_inside_container", return_value=False): with patch("hermes_constants.is_container", return_value=False):
info = get_container_exec_info() info = get_container_exec_info()
assert info is not None assert info is not None
@ -93,7 +54,7 @@ def test_get_container_exec_info_returns_metadata(container_env):
def test_get_container_exec_info_none_inside_container(container_env): def test_get_container_exec_info_none_inside_container(container_env):
"""Returns None when we're already inside a container.""" """Returns None when we're already inside a container."""
with patch("hermes_cli.config._is_inside_container", return_value=True): with patch("hermes_constants.is_container", return_value=True):
info = get_container_exec_info() info = get_container_exec_info()
assert info is None assert info is None
@ -106,7 +67,7 @@ def test_get_container_exec_info_none_without_file(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(hermes_home)) monkeypatch.setenv("HERMES_HOME", str(hermes_home))
monkeypatch.delenv("HERMES_DEV", raising=False) monkeypatch.delenv("HERMES_DEV", raising=False)
with patch("hermes_cli.config._is_inside_container", return_value=False): with patch("hermes_constants.is_container", return_value=False):
info = get_container_exec_info() info = get_container_exec_info()
assert info is None assert info is None
@ -116,7 +77,7 @@ def test_get_container_exec_info_skipped_when_hermes_dev(container_env, monkeypa
"""Returns None when HERMES_DEV=1 is set (dev mode bypass).""" """Returns None when HERMES_DEV=1 is set (dev mode bypass)."""
monkeypatch.setenv("HERMES_DEV", "1") monkeypatch.setenv("HERMES_DEV", "1")
with patch("hermes_cli.config._is_inside_container", return_value=False): with patch("hermes_constants.is_container", return_value=False):
info = get_container_exec_info() info = get_container_exec_info()
assert info is None assert info is None
@ -126,7 +87,7 @@ def test_get_container_exec_info_not_skipped_when_hermes_dev_zero(container_env,
"""HERMES_DEV=0 does NOT trigger bypass — only '1' does.""" """HERMES_DEV=0 does NOT trigger bypass — only '1' does."""
monkeypatch.setenv("HERMES_DEV", "0") monkeypatch.setenv("HERMES_DEV", "0")
with patch("hermes_cli.config._is_inside_container", return_value=False): with patch("hermes_constants.is_container", return_value=False):
info = get_container_exec_info() info = get_container_exec_info()
assert info is not None assert info is not None
@ -143,7 +104,7 @@ def test_get_container_exec_info_defaults():
"# minimal file with no keys\n" "# minimal file with no keys\n"
) )
with patch("hermes_cli.config._is_inside_container", return_value=False), \ with patch("hermes_constants.is_container", return_value=False), \
patch("hermes_cli.config.get_hermes_home", return_value=hermes_home), \ patch("hermes_cli.config.get_hermes_home", return_value=hermes_home), \
patch.dict(os.environ, {}, clear=False): patch.dict(os.environ, {}, clear=False):
os.environ.pop("HERMES_DEV", None) os.environ.pop("HERMES_DEV", None)
@ -165,7 +126,7 @@ def test_get_container_exec_info_docker_backend(container_env):
"hermes_bin=/opt/hermes/bin/hermes\n" "hermes_bin=/opt/hermes/bin/hermes\n"
) )
with patch("hermes_cli.config._is_inside_container", return_value=False): with patch("hermes_constants.is_container", return_value=False):
info = get_container_exec_info() info = get_container_exec_info()
assert info["backend"] == "docker" assert info["backend"] == "docker"
@ -176,7 +137,7 @@ def test_get_container_exec_info_docker_backend(container_env):
def test_get_container_exec_info_crashes_on_permission_error(container_env): def test_get_container_exec_info_crashes_on_permission_error(container_env):
"""PermissionError propagates instead of being silently swallowed.""" """PermissionError propagates instead of being silently swallowed."""
with patch("hermes_cli.config._is_inside_container", return_value=False), \ with patch("hermes_constants.is_container", return_value=False), \
patch("builtins.open", side_effect=PermissionError("permission denied")): patch("builtins.open", side_effect=PermissionError("permission denied")):
with pytest.raises(PermissionError): with pytest.raises(PermissionError):
get_container_exec_info() get_container_exec_info()

View file

@ -0,0 +1,461 @@
"""Tests for ``hermes debug`` CLI command and debug utilities."""
import os
import sys
import urllib.error
from pathlib import Path
from unittest.mock import MagicMock, patch, call
import pytest
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def hermes_home(tmp_path, monkeypatch):
"""Set up an isolated HERMES_HOME with minimal logs."""
home = tmp_path / ".hermes"
home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(home))
# Create log files
logs_dir = home / "logs"
logs_dir.mkdir()
(logs_dir / "agent.log").write_text(
"2026-04-12 17:00:00 INFO agent: session started\n"
"2026-04-12 17:00:01 INFO tools.terminal: running ls\n"
"2026-04-12 17:00:02 WARNING agent: high token usage\n"
)
(logs_dir / "errors.log").write_text(
"2026-04-12 17:00:05 ERROR gateway.run: connection lost\n"
)
(logs_dir / "gateway.log").write_text(
"2026-04-12 17:00:10 INFO gateway.run: started\n"
)
return home
# ---------------------------------------------------------------------------
# Unit tests for upload helpers
# ---------------------------------------------------------------------------
class TestUploadPasteRs:
"""Test paste.rs upload path."""
def test_upload_paste_rs_success(self):
from hermes_cli.debug import _upload_paste_rs
mock_resp = MagicMock()
mock_resp.read.return_value = b"https://paste.rs/abc123\n"
mock_resp.__enter__ = lambda s: s
mock_resp.__exit__ = MagicMock(return_value=False)
with patch("hermes_cli.debug.urllib.request.urlopen", return_value=mock_resp):
url = _upload_paste_rs("hello world")
assert url == "https://paste.rs/abc123"
def test_upload_paste_rs_bad_response(self):
from hermes_cli.debug import _upload_paste_rs
mock_resp = MagicMock()
mock_resp.read.return_value = b"<html>error</html>"
mock_resp.__enter__ = lambda s: s
mock_resp.__exit__ = MagicMock(return_value=False)
with patch("hermes_cli.debug.urllib.request.urlopen", return_value=mock_resp):
with pytest.raises(ValueError, match="Unexpected response"):
_upload_paste_rs("test")
def test_upload_paste_rs_network_error(self):
from hermes_cli.debug import _upload_paste_rs
with patch(
"hermes_cli.debug.urllib.request.urlopen",
side_effect=urllib.error.URLError("connection refused"),
):
with pytest.raises(urllib.error.URLError):
_upload_paste_rs("test")
class TestUploadDpasteCom:
"""Test dpaste.com fallback upload path."""
def test_upload_dpaste_com_success(self):
from hermes_cli.debug import _upload_dpaste_com
mock_resp = MagicMock()
mock_resp.read.return_value = b"https://dpaste.com/ABCDEFG\n"
mock_resp.__enter__ = lambda s: s
mock_resp.__exit__ = MagicMock(return_value=False)
with patch("hermes_cli.debug.urllib.request.urlopen", return_value=mock_resp):
url = _upload_dpaste_com("hello world", expiry_days=7)
assert url == "https://dpaste.com/ABCDEFG"
class TestUploadToPastebin:
"""Test the combined upload with fallback."""
def test_tries_paste_rs_first(self):
from hermes_cli.debug import upload_to_pastebin
with patch("hermes_cli.debug._upload_paste_rs",
return_value="https://paste.rs/test") as prs:
url = upload_to_pastebin("content")
assert url == "https://paste.rs/test"
prs.assert_called_once()
def test_falls_back_to_dpaste_com(self):
from hermes_cli.debug import upload_to_pastebin
with patch("hermes_cli.debug._upload_paste_rs",
side_effect=Exception("down")), \
patch("hermes_cli.debug._upload_dpaste_com",
return_value="https://dpaste.com/TEST") as dp:
url = upload_to_pastebin("content")
assert url == "https://dpaste.com/TEST"
dp.assert_called_once()
def test_raises_when_both_fail(self):
from hermes_cli.debug import upload_to_pastebin
with patch("hermes_cli.debug._upload_paste_rs",
side_effect=Exception("err1")), \
patch("hermes_cli.debug._upload_dpaste_com",
side_effect=Exception("err2")):
with pytest.raises(RuntimeError, match="Failed to upload"):
upload_to_pastebin("content")
# ---------------------------------------------------------------------------
# Log reading
# ---------------------------------------------------------------------------
class TestReadFullLog:
"""Test _read_full_log for standalone log uploads."""
def test_reads_small_file(self, hermes_home):
from hermes_cli.debug import _read_full_log
content = _read_full_log("agent")
assert content is not None
assert "session started" in content
def test_returns_none_for_missing(self, tmp_path, monkeypatch):
home = tmp_path / ".hermes"
home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(home))
from hermes_cli.debug import _read_full_log
assert _read_full_log("agent") is None
def test_returns_none_for_empty(self, hermes_home):
# Truncate agent.log to empty
(hermes_home / "logs" / "agent.log").write_text("")
from hermes_cli.debug import _read_full_log
assert _read_full_log("agent") is None
def test_truncates_large_file(self, hermes_home):
"""Files larger than max_bytes get tail-truncated."""
from hermes_cli.debug import _read_full_log
# Write a file larger than 1KB
big_content = "x" * 100 + "\n"
(hermes_home / "logs" / "agent.log").write_text(big_content * 200)
content = _read_full_log("agent", max_bytes=1024)
assert content is not None
assert "truncated" in content
def test_unknown_log_returns_none(self, hermes_home):
from hermes_cli.debug import _read_full_log
assert _read_full_log("nonexistent") is None
def test_falls_back_to_rotated_file(self, hermes_home):
"""When gateway.log doesn't exist, falls back to gateway.log.1."""
from hermes_cli.debug import _read_full_log
logs_dir = hermes_home / "logs"
# Remove the primary (if any) and create a .1 rotation
(logs_dir / "gateway.log").unlink(missing_ok=True)
(logs_dir / "gateway.log.1").write_text(
"2026-04-12 10:00:00 INFO gateway.run: rotated content\n"
)
content = _read_full_log("gateway")
assert content is not None
assert "rotated content" in content
def test_prefers_primary_over_rotated(self, hermes_home):
"""Primary log is used when it exists, even if .1 also exists."""
from hermes_cli.debug import _read_full_log
logs_dir = hermes_home / "logs"
(logs_dir / "gateway.log").write_text("primary content\n")
(logs_dir / "gateway.log.1").write_text("rotated content\n")
content = _read_full_log("gateway")
assert "primary content" in content
assert "rotated" not in content
def test_falls_back_when_primary_empty(self, hermes_home):
"""Empty primary log falls back to .1 rotation."""
from hermes_cli.debug import _read_full_log
logs_dir = hermes_home / "logs"
(logs_dir / "agent.log").write_text("")
(logs_dir / "agent.log.1").write_text("rotated agent data\n")
content = _read_full_log("agent")
assert content is not None
assert "rotated agent data" in content
# ---------------------------------------------------------------------------
# Debug report collection
# ---------------------------------------------------------------------------
class TestCollectDebugReport:
"""Test the debug report builder."""
def test_report_includes_dump_output(self, hermes_home):
from hermes_cli.debug import collect_debug_report
with patch("hermes_cli.dump.run_dump") as mock_dump:
mock_dump.side_effect = lambda args: print(
"--- hermes dump ---\nversion: 0.8.0\n--- end dump ---"
)
report = collect_debug_report(log_lines=50)
assert "--- hermes dump ---" in report
assert "version: 0.8.0" in report
def test_report_includes_agent_log(self, hermes_home):
from hermes_cli.debug import collect_debug_report
with patch("hermes_cli.dump.run_dump"):
report = collect_debug_report(log_lines=50)
assert "--- agent.log" in report
assert "session started" in report
def test_report_includes_errors_log(self, hermes_home):
from hermes_cli.debug import collect_debug_report
with patch("hermes_cli.dump.run_dump"):
report = collect_debug_report(log_lines=50)
assert "--- errors.log" in report
assert "connection lost" in report
def test_report_includes_gateway_log(self, hermes_home):
from hermes_cli.debug import collect_debug_report
with patch("hermes_cli.dump.run_dump"):
report = collect_debug_report(log_lines=50)
assert "--- gateway.log" in report
def test_missing_logs_handled(self, tmp_path, monkeypatch):
home = tmp_path / ".hermes"
home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(home))
from hermes_cli.debug import collect_debug_report
with patch("hermes_cli.dump.run_dump"):
report = collect_debug_report(log_lines=50)
assert "(file not found)" in report
# ---------------------------------------------------------------------------
# CLI entry point — run_debug_share
# ---------------------------------------------------------------------------
class TestRunDebugShare:
"""Test the run_debug_share CLI handler."""
def test_local_flag_prints_full_logs(self, hermes_home, capsys):
"""--local prints the report plus full log contents."""
from hermes_cli.debug import run_debug_share
args = MagicMock()
args.lines = 50
args.expire = 7
args.local = True
with patch("hermes_cli.dump.run_dump"):
run_debug_share(args)
out = capsys.readouterr().out
assert "--- agent.log" in out
assert "FULL agent.log" in out
assert "FULL gateway.log" in out
def test_share_uploads_three_pastes(self, hermes_home, capsys):
"""Successful share uploads report + agent.log + gateway.log."""
from hermes_cli.debug import run_debug_share
args = MagicMock()
args.lines = 50
args.expire = 7
args.local = False
call_count = [0]
uploaded_content = []
def _mock_upload(content, expiry_days=7):
call_count[0] += 1
uploaded_content.append(content)
return f"https://paste.rs/paste{call_count[0]}"
with patch("hermes_cli.dump.run_dump") as mock_dump, \
patch("hermes_cli.debug.upload_to_pastebin",
side_effect=_mock_upload):
mock_dump.side_effect = lambda a: print("--- hermes dump ---\nversion: test\n--- end dump ---")
run_debug_share(args)
out = capsys.readouterr().out
# Should have 3 uploads: report, agent.log, gateway.log
assert call_count[0] == 3
assert "paste.rs/paste1" in out # Report
assert "paste.rs/paste2" in out # agent.log
assert "paste.rs/paste3" in out # gateway.log
assert "Report" in out
assert "agent.log" in out
assert "gateway.log" in out
# Each log paste should start with the dump header
agent_paste = uploaded_content[1]
assert "--- hermes dump ---" in agent_paste
assert "--- full agent.log ---" in agent_paste
gateway_paste = uploaded_content[2]
assert "--- hermes dump ---" in gateway_paste
assert "--- full gateway.log ---" in gateway_paste
def test_share_skips_missing_logs(self, tmp_path, monkeypatch, capsys):
"""Only uploads logs that exist."""
home = tmp_path / ".hermes"
home.mkdir()
monkeypatch.setenv("HERMES_HOME", str(home))
from hermes_cli.debug import run_debug_share
args = MagicMock()
args.lines = 50
args.expire = 7
args.local = False
call_count = [0]
def _mock_upload(content, expiry_days=7):
call_count[0] += 1
return f"https://paste.rs/paste{call_count[0]}"
with patch("hermes_cli.dump.run_dump"), \
patch("hermes_cli.debug.upload_to_pastebin",
side_effect=_mock_upload):
run_debug_share(args)
out = capsys.readouterr().out
# Only the report should be uploaded (no log files exist)
assert call_count[0] == 1
assert "Report" in out
def test_share_continues_on_log_upload_failure(self, hermes_home, capsys):
"""Log upload failure doesn't stop the report from being shared."""
from hermes_cli.debug import run_debug_share
args = MagicMock()
args.lines = 50
args.expire = 7
args.local = False
call_count = [0]
def _mock_upload(content, expiry_days=7):
call_count[0] += 1
if call_count[0] > 1:
raise RuntimeError("upload failed")
return "https://paste.rs/report"
with patch("hermes_cli.dump.run_dump"), \
patch("hermes_cli.debug.upload_to_pastebin",
side_effect=_mock_upload):
run_debug_share(args)
out = capsys.readouterr().out
assert "Report" in out
assert "paste.rs/report" in out
assert "failed to upload" in out
def test_share_exits_on_report_upload_failure(self, hermes_home, capsys):
"""If the main report fails to upload, exit with code 1."""
from hermes_cli.debug import run_debug_share
args = MagicMock()
args.lines = 50
args.expire = 7
args.local = False
with patch("hermes_cli.dump.run_dump"), \
patch("hermes_cli.debug.upload_to_pastebin",
side_effect=RuntimeError("all failed")):
with pytest.raises(SystemExit) as exc_info:
run_debug_share(args)
assert exc_info.value.code == 1
out = capsys.readouterr()
assert "all failed" in out.err
# ---------------------------------------------------------------------------
# run_debug router
# ---------------------------------------------------------------------------
class TestRunDebug:
def test_no_subcommand_shows_usage(self, capsys):
from hermes_cli.debug import run_debug
args = MagicMock()
args.debug_command = None
run_debug(args)
out = capsys.readouterr().out
assert "hermes debug share" in out
def test_share_subcommand_routes(self, hermes_home):
from hermes_cli.debug import run_debug
args = MagicMock()
args.debug_command = "share"
args.lines = 200
args.expire = 7
args.local = True
with patch("hermes_cli.dump.run_dump"):
run_debug(args)
# ---------------------------------------------------------------------------
# Argparse integration
# ---------------------------------------------------------------------------
class TestArgparseIntegration:
def test_module_imports_clean(self):
from hermes_cli.debug import run_debug, run_debug_share
assert callable(run_debug)
assert callable(run_debug_share)
def test_cmd_debug_dispatches(self):
from hermes_cli.main import cmd_debug
args = MagicMock()
args.debug_command = None
cmd_debug(args)

View file

@ -0,0 +1,91 @@
"""Tests for .env sanitization during load to prevent token duplication (#8908)."""
import tempfile
from pathlib import Path
from unittest.mock import patch
def test_load_env_sanitizes_concatenated_lines():
"""Verify load_env() splits concatenated KEY=VALUE pairs.
Reproduces the scenario from #8908 where a corrupted .env file
contained multiple tokens on a single line, causing the bot token
to be duplicated 8 times.
"""
from hermes_cli.config import load_env
token = "8356550917:AAGGEkzg06Hrc3Hjb3Sa1jkGVDOdU_lYy2Q"
# Simulate concatenated line: TOKEN=xxx followed immediately by another key
corrupted = f"TELEGRAM_BOT_TOKEN={token}ANTHROPIC_API_KEY=sk-ant-test123\n"
with tempfile.NamedTemporaryFile(
mode="w", suffix=".env", delete=False, encoding="utf-8"
) as f:
f.write(corrupted)
env_path = Path(f.name)
try:
with patch("hermes_cli.config.get_env_path", return_value=env_path):
result = load_env()
assert result.get("TELEGRAM_BOT_TOKEN") == token, (
f"Token should be exactly '{token}', got '{result.get('TELEGRAM_BOT_TOKEN')}'"
)
assert result.get("ANTHROPIC_API_KEY") == "sk-ant-test123"
finally:
env_path.unlink(missing_ok=True)
def test_load_env_normal_file_unchanged():
"""A well-formed .env file should be parsed identically."""
from hermes_cli.config import load_env
content = (
"TELEGRAM_BOT_TOKEN=mytoken123\n"
"ANTHROPIC_API_KEY=sk-ant-key\n"
"# comment\n"
"\n"
"OPENAI_API_KEY=sk-openai\n"
)
with tempfile.NamedTemporaryFile(
mode="w", suffix=".env", delete=False, encoding="utf-8"
) as f:
f.write(content)
env_path = Path(f.name)
try:
with patch("hermes_cli.config.get_env_path", return_value=env_path):
result = load_env()
assert result["TELEGRAM_BOT_TOKEN"] == "mytoken123"
assert result["ANTHROPIC_API_KEY"] == "sk-ant-key"
assert result["OPENAI_API_KEY"] == "sk-openai"
finally:
env_path.unlink(missing_ok=True)
def test_env_loader_sanitizes_before_dotenv():
"""Verify env_loader._sanitize_env_file_if_needed fixes corrupted files."""
from hermes_cli.env_loader import _sanitize_env_file_if_needed
token = "8356550917:AAGGEkzg06Hrc3Hjb3Sa1jkGVDOdU_lYy2Q"
corrupted = f"TELEGRAM_BOT_TOKEN={token}ANTHROPIC_API_KEY=sk-ant-test\n"
with tempfile.NamedTemporaryFile(
mode="w", suffix=".env", delete=False, encoding="utf-8"
) as f:
f.write(corrupted)
env_path = Path(f.name)
try:
_sanitize_env_file_if_needed(env_path)
with open(env_path, encoding="utf-8") as f:
lines = f.readlines()
# Should be split into two separate lines
assert len(lines) == 2, f"Expected 2 lines, got {len(lines)}: {lines}"
assert lines[0].startswith("TELEGRAM_BOT_TOKEN=")
assert lines[1].startswith("ANTHROPIC_API_KEY=")
# Token should not contain the second key
parsed_token = lines[0].strip().split("=", 1)[1]
assert parsed_token == token
finally:
env_path.unlink(missing_ok=True)

Some files were not shown because too many files have changed in this diff Show more