diff --git a/.dockerignore b/.dockerignore index ecf199fc9..244e60340 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,6 +5,7 @@ # Dependencies node_modules +.venv # CI/CD .github diff --git a/.env.example b/.env.example index a6e98751a..f2c5769c6 100644 --- a/.env.example +++ b/.env.example @@ -43,6 +43,7 @@ # KIMI_BASE_URL=https://api.kimi.com/coding/v1 # Default for sk-kimi- keys # KIMI_BASE_URL=https://api.moonshot.ai/v1 # For legacy Moonshot keys # KIMI_BASE_URL=https://api.moonshot.cn/v1 # For Moonshot China keys +# KIMI_CN_API_KEY= # Dedicated Moonshot China key # ============================================================================= # LLM PROVIDER (MiniMax) diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..872621689 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Auto-generated files โ€” collapse diffs and exclude from language stats +web/package-lock.json linguist-generated=true diff --git a/.github/workflows/deploy-site.yml b/.github/workflows/deploy-site.yml index 3c471f376..c55a62908 100644 --- a/.github/workflows/deploy-site.yml +++ b/.github/workflows/deploy-site.yml @@ -41,11 +41,19 @@ jobs: python-version: '3.11' - name: Install PyYAML for skill extraction - run: pip install pyyaml + run: pip install pyyaml httpx - name: Extract skill metadata for dashboard run: python3 website/scripts/extract-skills.py + - name: Build skills index (if not already present) + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + if [ ! -f website/static/api/skills-index.json ]; then + python3 scripts/build_skills_index.py || echo "Skills index build failed (non-fatal)" + fi + - name: Install dependencies run: npm ci working-directory: website diff --git a/.github/workflows/skills-index.yml b/.github/workflows/skills-index.yml new file mode 100644 index 000000000..6c03e4074 --- /dev/null +++ b/.github/workflows/skills-index.yml @@ -0,0 +1,101 @@ +name: Build Skills Index + +on: + schedule: + # Run twice daily: 6 AM and 6 PM UTC + - cron: '0 6,18 * * *' + workflow_dispatch: # Manual trigger + push: + branches: [main] + paths: + - 'scripts/build_skills_index.py' + - '.github/workflows/skills-index.yml' + +permissions: + contents: read + +jobs: + build-index: + # Only run on the upstream repository, not on forks + if: github.repository == 'NousResearch/hermes-agent' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: pip install httpx pyyaml + + - name: Build skills index + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: python scripts/build_skills_index.py + + - name: Upload index artifact + uses: actions/upload-artifact@v4 + with: + name: skills-index + path: website/static/api/skills-index.json + retention-days: 7 + + deploy-with-index: + needs: build-index + runs-on: ubuntu-latest + permissions: + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deploy.outputs.page_url }} + # Only deploy on schedule or manual trigger (not on every push to the script) + if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' + steps: + - uses: actions/checkout@v4 + + - uses: actions/download-artifact@v4 + with: + name: skills-index + path: website/static/api/ + + - uses: actions/setup-node@v4 + with: + node-version: 20 + cache: npm + cache-dependency-path: website/package-lock.json + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install PyYAML for skill extraction + run: pip install pyyaml + + - name: Extract skill metadata for dashboard + run: python3 website/scripts/extract-skills.py + + - name: Install dependencies + run: npm ci + working-directory: website + + - name: Build Docusaurus + run: npm run build + working-directory: website + + - name: Stage deployment + run: | + mkdir -p _site/docs + cp -r landingpage/* _site/ + cp -r website/build/* _site/docs/ + echo "hermes-agent.nousresearch.com" > _site/CNAME + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: _site + + - name: Deploy to GitHub Pages + id: deploy + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/supply-chain-audit.yml b/.github/workflows/supply-chain-audit.yml index b94e1dda4..1cee4564d 100644 --- a/.github/workflows/supply-chain-audit.yml +++ b/.github/workflows/supply-chain-audit.yml @@ -183,7 +183,7 @@ jobs: --- *Automated scan triggered by [supply-chain-audit](/.github/workflows/supply-chain-audit.yml). If this is a false positive, a maintainer can approve after manual review.*" - gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY" + gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY" || echo "::warning::Could not post PR comment (expected for fork PRs โ€” GITHUB_TOKEN is read-only)" - name: Fail on critical findings if: steps.scan.outputs.critical == 'true' diff --git a/.gitignore b/.gitignore index b3cc8bfff..e516d154f 100644 --- a/.gitignore +++ b/.gitignore @@ -51,6 +51,9 @@ ignored/ .worktrees/ environments/benchmarks/evals/ +# Web UI build output +hermes_cli/web_dist/ + # Release script temp files .release_notes.md mini-swe-agent/ @@ -59,3 +62,4 @@ mini-swe-agent/ .direnv/ .nix-stamps/ result +website/static/api/skills-index.json diff --git a/Dockerfile b/Dockerfile index 4935d222a..370382332 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ ENV PLAYWRIGHT_BROWSERS_PATH=/opt/hermes/.playwright # Install system dependencies in one layer, clear APT cache RUN apt-get update && \ apt-get install -y --no-install-recommends \ - build-essential nodejs npm python3 ripgrep ffmpeg gcc python3-dev libffi-dev procps && \ + build-essential nodejs npm python3 ripgrep ffmpeg gcc python3-dev libffi-dev procps git && \ rm -rf /var/lib/apt/lists/* # Non-root user for runtime; UID can be overridden via HERMES_UID at runtime diff --git a/README.md b/README.md index b77cd6202..ea0758c83 100644 --- a/README.md +++ b/README.md @@ -167,6 +167,7 @@ python -m pytest tests/ -q - ๐Ÿ“š [Skills Hub](https://agentskills.io) - ๐Ÿ› [Issues](https://github.com/NousResearch/hermes-agent/issues) - ๐Ÿ’ก [Discussions](https://github.com/NousResearch/hermes-agent/discussions) +- ๐Ÿ”Œ [HermesClaw](https://github.com/AaronWong1999/hermesclaw) โ€” Community WeChat bridge: Run Hermes Agent and OpenClaw on the same WeChat account. --- diff --git a/RELEASE_v0.9.0.md b/RELEASE_v0.9.0.md new file mode 100644 index 000000000..e895d818b --- /dev/null +++ b/RELEASE_v0.9.0.md @@ -0,0 +1,328 @@ +# Hermes Agent v0.9.0 (v2026.4.13) + +**Release Date:** April 13, 2026 +**Since v0.8.0:** 487 commits ยท 269 merged PRs ยท 167 resolved issues ยท 493 files changed ยท 63,281 insertions ยท 24 contributors + +> The everywhere release โ€” Hermes goes mobile with Termux/Android, adds iMessage and WeChat, ships Fast Mode for OpenAI and Anthropic, introduces background process monitoring, launches a local web dashboard for managing your agent, and delivers the deepest security hardening pass yet across 16 supported platforms. + +--- + +## โœจ Highlights + +- **Local Web Dashboard** โ€” A new browser-based dashboard for managing your Hermes Agent locally. Configure settings, monitor sessions, browse skills, and manage your gateway โ€” all from a clean web interface without touching config files or the terminal. The easiest way to get started with Hermes. + +- **Fast Mode (`/fast`)** โ€” Priority processing for OpenAI and Anthropic models. Toggle `/fast` to route through priority queues for significantly lower latency on supported models (GPT-5.4, Codex, Claude). Expands across all OpenAI Priority Processing models and Anthropic's fast tier. ([#6875](https://github.com/NousResearch/hermes-agent/pull/6875), [#6960](https://github.com/NousResearch/hermes-agent/pull/6960), [#7037](https://github.com/NousResearch/hermes-agent/pull/7037)) + +- **iMessage via BlueBubbles** โ€” Full iMessage integration through BlueBubbles, bringing Hermes to Apple's messaging ecosystem. Auto-webhook registration, setup wizard integration, and crash resilience. ([#6437](https://github.com/NousResearch/hermes-agent/pull/6437), [#6460](https://github.com/NousResearch/hermes-agent/pull/6460), [#6494](https://github.com/NousResearch/hermes-agent/pull/6494)) + +- **WeChat (Weixin) & WeCom Callback Mode** โ€” Native WeChat support via iLink Bot API and a new WeCom callback-mode adapter for self-built enterprise apps. Streaming cursor, media uploads, markdown link handling, and atomic state persistence. Hermes now covers the Chinese messaging ecosystem end-to-end. ([#7166](https://github.com/NousResearch/hermes-agent/pull/7166), [#7943](https://github.com/NousResearch/hermes-agent/pull/7943)) + +- **Termux / Android Support** โ€” Run Hermes natively on Android via Termux. Adapted install paths, TUI optimizations for mobile screens, voice backend support, and the `/image` command work on-device. ([#6834](https://github.com/NousResearch/hermes-agent/pull/6834)) + +- **Background Process Monitoring (`watch_patterns`)** โ€” Set patterns to watch for in background process output and get notified in real-time when they match. Monitor for errors, wait for specific events ("listening on port"), or watch build logs โ€” all without polling. ([#7635](https://github.com/NousResearch/hermes-agent/pull/7635)) + +- **Native xAI & Xiaomi MiMo Providers** โ€” First-class provider support for xAI (Grok) and Xiaomi MiMo, with direct API access, model catalogs, and setup wizard integration. Plus Qwen OAuth with portal request support. ([#7372](https://github.com/NousResearch/hermes-agent/pull/7372), [#7855](https://github.com/NousResearch/hermes-agent/pull/7855)) + +- **Pluggable Context Engine** โ€” Context management is now a pluggable slot via `hermes plugins`. Swap in custom context engines that control what the agent sees each turn โ€” filtering, summarization, or domain-specific context injection. ([#7464](https://github.com/NousResearch/hermes-agent/pull/7464)) + +- **Unified Proxy Support** โ€” SOCKS proxy, `DISCORD_PROXY`, and system proxy auto-detection across all gateway platforms. Hermes behind corporate firewalls just works. ([#6814](https://github.com/NousResearch/hermes-agent/pull/6814)) + +- **Comprehensive Security Hardening** โ€” Path traversal protection in checkpoint manager, shell injection neutralization in sandbox writes, SSRF redirect guards in Slack image uploads, Twilio webhook signature validation (SMS RCE fix), API server auth enforcement, git argument injection prevention, and approval button authorization. ([#7933](https://github.com/NousResearch/hermes-agent/pull/7933), [#7944](https://github.com/NousResearch/hermes-agent/pull/7944), [#7940](https://github.com/NousResearch/hermes-agent/pull/7940), [#7151](https://github.com/NousResearch/hermes-agent/pull/7151), [#7156](https://github.com/NousResearch/hermes-agent/pull/7156)) + +- **`hermes backup` & `hermes import`** โ€” Full backup and restore of your Hermes configuration, sessions, skills, and memory. Migrate between machines or create snapshots before major changes. ([#7997](https://github.com/NousResearch/hermes-agent/pull/7997)) + +- **16 Supported Platforms** โ€” With BlueBubbles (iMessage) and WeChat joining Telegram, Discord, Slack, WhatsApp, Signal, Matrix, Email, SMS, DingTalk, Feishu, WeCom, Mattermost, Home Assistant, and Webhooks, Hermes now runs on 16 messaging platforms out of the box. + +- **`/debug` & `hermes debug share`** โ€” New debugging toolkit: `/debug` slash command across all platforms for quick diagnostics, plus `hermes debug share` to upload a full debug report to a pastebin for easy sharing when troubleshooting. ([#8681](https://github.com/NousResearch/hermes-agent/pull/8681)) + +--- + +## ๐Ÿ—๏ธ Core Agent & Architecture + +### Provider & Model Support +- **Native xAI (Grok) provider** with direct API access and model catalog ([#7372](https://github.com/NousResearch/hermes-agent/pull/7372)) +- **Xiaomi MiMo as first-class provider** โ€” setup wizard, model catalog, empty response recovery ([#7855](https://github.com/NousResearch/hermes-agent/pull/7855)) +- **Qwen OAuth provider** with portal request support ([#6282](https://github.com/NousResearch/hermes-agent/pull/6282)) +- **Fast Mode** โ€” `/fast` toggle for OpenAI Priority Processing + Anthropic fast tier ([#6875](https://github.com/NousResearch/hermes-agent/pull/6875), [#6960](https://github.com/NousResearch/hermes-agent/pull/6960), [#7037](https://github.com/NousResearch/hermes-agent/pull/7037)) +- **Structured API error classification** for smart failover decisions ([#6514](https://github.com/NousResearch/hermes-agent/pull/6514)) +- **Rate limit header capture** shown in `/usage` ([#6541](https://github.com/NousResearch/hermes-agent/pull/6541)) +- **API server model name** derived from profile name ([#6857](https://github.com/NousResearch/hermes-agent/pull/6857)) +- **Custom providers** now included in `/model` listings and resolution ([#7088](https://github.com/NousResearch/hermes-agent/pull/7088)) +- **Fallback provider activation** on repeated empty responses with user-visible status ([#7505](https://github.com/NousResearch/hermes-agent/pull/7505)) +- **OpenRouter variant tags** (`:free`, `:extended`, `:fast`) preserved during model switch ([#6383](https://github.com/NousResearch/hermes-agent/pull/6383)) +- **Credential exhaustion TTL** reduced from 24 hours to 1 hour ([#6504](https://github.com/NousResearch/hermes-agent/pull/6504)) +- **OAuth credential lifecycle** hardening โ€” stale pool keys, auth.json sync, Codex CLI race fixes ([#6874](https://github.com/NousResearch/hermes-agent/pull/6874)) +- Empty response recovery for reasoning models (MiMo, Qwen, GLM) ([#8609](https://github.com/NousResearch/hermes-agent/pull/8609)) +- MiniMax context lengths, thinking guard, endpoint corrections ([#6082](https://github.com/NousResearch/hermes-agent/pull/6082), [#7126](https://github.com/NousResearch/hermes-agent/pull/7126)) +- Z.AI endpoint auto-detect via probe and cache ([#5763](https://github.com/NousResearch/hermes-agent/pull/5763)) + +### Agent Loop & Conversation +- **Pluggable context engine slot** via `hermes plugins` ([#7464](https://github.com/NousResearch/hermes-agent/pull/7464)) +- **Background process monitoring** โ€” `watch_patterns` for real-time output alerts ([#7635](https://github.com/NousResearch/hermes-agent/pull/7635)) +- **Improved context compression** โ€” higher limits, tool tracking, degradation warnings, token-budget tail protection ([#6395](https://github.com/NousResearch/hermes-agent/pull/6395), [#6453](https://github.com/NousResearch/hermes-agent/pull/6453)) +- **`/compress `** โ€” guided compression with a focus topic ([#8017](https://github.com/NousResearch/hermes-agent/pull/8017)) +- **Tiered context pressure warnings** with gateway dedup ([#6411](https://github.com/NousResearch/hermes-agent/pull/6411)) +- **Staged inactivity warning** before timeout escalation ([#6387](https://github.com/NousResearch/hermes-agent/pull/6387)) +- **Prevent agent from stopping mid-task** โ€” compression floor, budget overhaul, activity tracking ([#7983](https://github.com/NousResearch/hermes-agent/pull/7983)) +- **Propagate child activity to parent** during `delegate_task` ([#7295](https://github.com/NousResearch/hermes-agent/pull/7295)) +- **Truncated streaming tool call detection** before execution ([#6847](https://github.com/NousResearch/hermes-agent/pull/6847)) +- Empty response retry (3 attempts with nudge) ([#6488](https://github.com/NousResearch/hermes-agent/pull/6488)) +- Adaptive streaming backoff + cursor strip to prevent message truncation ([#7683](https://github.com/NousResearch/hermes-agent/pull/7683)) +- Compression uses live session model instead of stale persisted config ([#8258](https://github.com/NousResearch/hermes-agent/pull/8258)) +- Strip `` tags from Gemma 4 responses ([#8562](https://github.com/NousResearch/hermes-agent/pull/8562)) +- Prevent `` in prose from suppressing response output ([#6968](https://github.com/NousResearch/hermes-agent/pull/6968)) +- Turn-exit diagnostic logging to agent loop ([#6549](https://github.com/NousResearch/hermes-agent/pull/6549)) +- Scope tool interrupt signal per-thread to prevent cross-session leaks ([#7930](https://github.com/NousResearch/hermes-agent/pull/7930)) + +### Memory & Sessions +- **Hindsight memory plugin** โ€” feature parity, setup wizard, config improvements โ€” @nicoloboschi ([#6428](https://github.com/NousResearch/hermes-agent/pull/6428)) +- **Honcho** โ€” opt-in `initOnSessionStart` for tools mode โ€” @Kathie-yu ([#6995](https://github.com/NousResearch/hermes-agent/pull/6995)) +- Orphan children instead of cascade-deleting in prune/delete ([#6513](https://github.com/NousResearch/hermes-agent/pull/6513)) +- Doctor command only checks the active memory provider ([#6285](https://github.com/NousResearch/hermes-agent/pull/6285)) + +--- + +## ๐Ÿ“ฑ Messaging Platforms (Gateway) + +### New Platforms +- **BlueBubbles (iMessage)** โ€” full adapter with auto-webhook registration, setup wizard, and crash resilience ([#6437](https://github.com/NousResearch/hermes-agent/pull/6437), [#6460](https://github.com/NousResearch/hermes-agent/pull/6460), [#6494](https://github.com/NousResearch/hermes-agent/pull/6494), [#7107](https://github.com/NousResearch/hermes-agent/pull/7107)) +- **Weixin (WeChat)** โ€” native support via iLink Bot API with streaming, media uploads, markdown links ([#7166](https://github.com/NousResearch/hermes-agent/pull/7166), [#8665](https://github.com/NousResearch/hermes-agent/pull/8665)) +- **WeCom Callback Mode** โ€” self-built enterprise app adapter with atomic state persistence ([#7943](https://github.com/NousResearch/hermes-agent/pull/7943), [#7928](https://github.com/NousResearch/hermes-agent/pull/7928)) + +### Discord +- **Allowed channels whitelist** config โ€” @jarvis-phw ([#7044](https://github.com/NousResearch/hermes-agent/pull/7044)) +- **Forum channel topic inheritance** in thread sessions โ€” @hermes-agent-dhabibi ([#6377](https://github.com/NousResearch/hermes-agent/pull/6377)) +- **DISCORD_REPLY_TO_MODE** setting ([#6333](https://github.com/NousResearch/hermes-agent/pull/6333)) +- Accept `.log` attachments, raise document size limit โ€” @kira-ariaki ([#6467](https://github.com/NousResearch/hermes-agent/pull/6467)) +- Decouple readiness from slash sync ([#8016](https://github.com/NousResearch/hermes-agent/pull/8016)) + +### Slack +- **Consolidated Slack improvements** โ€” 7 community PRs salvaged into one ([#6809](https://github.com/NousResearch/hermes-agent/pull/6809)) +- Handle assistant thread lifecycle events ([#6433](https://github.com/NousResearch/hermes-agent/pull/6433)) + +### Matrix +- **Migrated from matrix-nio to mautrix-python** ([#7518](https://github.com/NousResearch/hermes-agent/pull/7518)) +- SQLite crypto store replacing pickle (fixes E2EE decryption) โ€” @alt-glitch ([#7981](https://github.com/NousResearch/hermes-agent/pull/7981)) +- Cross-signing recovery key verification for E2EE migration ([#8282](https://github.com/NousResearch/hermes-agent/pull/8282)) +- DM mention threads + group chat events for Feishu ([#7423](https://github.com/NousResearch/hermes-agent/pull/7423)) + +### Gateway Core +- **Unified proxy support** โ€” SOCKS, DISCORD_PROXY, multi-platform with macOS auto-detection ([#6814](https://github.com/NousResearch/hermes-agent/pull/6814)) +- **Inbound text batching** for Discord, Matrix, WeCom + adaptive delay ([#6979](https://github.com/NousResearch/hermes-agent/pull/6979)) +- **Surface natural mid-turn assistant messages** in chat platforms ([#7978](https://github.com/NousResearch/hermes-agent/pull/7978)) +- **WSL-aware gateway** with smart systemd detection ([#7510](https://github.com/NousResearch/hermes-agent/pull/7510)) +- **All missing platforms added to setup wizard** ([#7949](https://github.com/NousResearch/hermes-agent/pull/7949)) +- **Per-platform `tool_progress` overrides** ([#6348](https://github.com/NousResearch/hermes-agent/pull/6348)) +- **Configurable 'still working' notification interval** ([#8572](https://github.com/NousResearch/hermes-agent/pull/8572)) +- `/model` switch persists across messages ([#7081](https://github.com/NousResearch/hermes-agent/pull/7081)) +- `/usage` shows rate limits, cost, and token details between turns ([#7038](https://github.com/NousResearch/hermes-agent/pull/7038)) +- Drain in-flight work before restart ([#7503](https://github.com/NousResearch/hermes-agent/pull/7503)) +- Don't evict cached agent on failed runs โ€” prevents MCP restart loop ([#7539](https://github.com/NousResearch/hermes-agent/pull/7539)) +- Replace `os.environ` session state with `contextvars` ([#7454](https://github.com/NousResearch/hermes-agent/pull/7454)) +- Derive channel directory platforms from enum instead of hardcoded list ([#7450](https://github.com/NousResearch/hermes-agent/pull/7450)) +- Validate image downloads before caching (cross-platform) ([#7125](https://github.com/NousResearch/hermes-agent/pull/7125)) +- Cross-platform webhook delivery for all platforms ([#7095](https://github.com/NousResearch/hermes-agent/pull/7095)) +- Cron Discord thread_id delivery support ([#7106](https://github.com/NousResearch/hermes-agent/pull/7106)) +- Feishu QR-based bot onboarding ([#8570](https://github.com/NousResearch/hermes-agent/pull/8570)) +- Gateway status scoped to active profile ([#7951](https://github.com/NousResearch/hermes-agent/pull/7951)) +- Prevent background process notifications from triggering false pairing requests ([#6434](https://github.com/NousResearch/hermes-agent/pull/6434)) + +--- + +## ๐Ÿ–ฅ๏ธ CLI & User Experience + +### Interactive CLI +- **Termux / Android support** โ€” adapted install paths, TUI, voice, `/image` ([#6834](https://github.com/NousResearch/hermes-agent/pull/6834)) +- **Native `/model` picker modal** for provider โ†’ model selection ([#8003](https://github.com/NousResearch/hermes-agent/pull/8003)) +- **Live per-tool elapsed timer** restored in TUI spinner ([#7359](https://github.com/NousResearch/hermes-agent/pull/7359)) +- **Stacked tool progress scrollback** in TUI ([#8201](https://github.com/NousResearch/hermes-agent/pull/8201)) +- **Random tips on new session start** (CLI + gateway, 279 tips) ([#8225](https://github.com/NousResearch/hermes-agent/pull/8225), [#8237](https://github.com/NousResearch/hermes-agent/pull/8237)) +- **`hermes dump`** โ€” copy-pasteable setup summary for debugging ([#6550](https://github.com/NousResearch/hermes-agent/pull/6550)) +- **`hermes backup` / `hermes import`** โ€” full config backup and restore ([#7997](https://github.com/NousResearch/hermes-agent/pull/7997)) +- **WSL environment hint** in system prompt ([#8285](https://github.com/NousResearch/hermes-agent/pull/8285)) +- **Profile creation UX** โ€” seed SOUL.md + credential warning ([#8553](https://github.com/NousResearch/hermes-agent/pull/8553)) +- Shell-aware sudo detection, empty password support ([#6517](https://github.com/NousResearch/hermes-agent/pull/6517)) +- Flush stdin after curses/terminal menus to prevent escape sequence leakage ([#7167](https://github.com/NousResearch/hermes-agent/pull/7167)) +- Handle broken stdin in prompt_toolkit startup ([#8560](https://github.com/NousResearch/hermes-agent/pull/8560)) + +### Setup & Configuration +- **Per-platform display verbosity** configuration ([#8006](https://github.com/NousResearch/hermes-agent/pull/8006)) +- **Component-separated logging** with session context and filtering ([#7991](https://github.com/NousResearch/hermes-agent/pull/7991)) +- **`network.force_ipv4`** config to fix IPv6 timeout issues ([#8196](https://github.com/NousResearch/hermes-agent/pull/8196)) +- **Standardize message whitespace and JSON formatting** ([#7988](https://github.com/NousResearch/hermes-agent/pull/7988)) +- **Rebrand OpenClaw โ†’ Hermes** during migration ([#8210](https://github.com/NousResearch/hermes-agent/pull/8210)) +- Config.yaml takes priority over env vars for auxiliary settings ([#7889](https://github.com/NousResearch/hermes-agent/pull/7889)) +- Harden setup provider flows + live OpenRouter catalog refresh ([#7078](https://github.com/NousResearch/hermes-agent/pull/7078)) +- Normalize reasoning effort ordering across all surfaces ([#6804](https://github.com/NousResearch/hermes-agent/pull/6804)) +- Remove dead `LLM_MODEL` env var + migration to clear stale entries ([#6543](https://github.com/NousResearch/hermes-agent/pull/6543)) +- Remove `/prompt` slash command โ€” prefix expansion footgun ([#6752](https://github.com/NousResearch/hermes-agent/pull/6752)) +- `HERMES_HOME_MODE` env var to override permissions โ€” @ygd58 ([#6993](https://github.com/NousResearch/hermes-agent/pull/6993)) +- Fall back to default model when model config is empty ([#8303](https://github.com/NousResearch/hermes-agent/pull/8303)) +- Warn when compression model context is too small ([#7894](https://github.com/NousResearch/hermes-agent/pull/7894)) + +--- + +## ๐Ÿ”ง Tool System + +### Environments & Execution +- **Unified spawn-per-call execution layer** for environments ([#6343](https://github.com/NousResearch/hermes-agent/pull/6343)) +- **Unified file sync** with mtime tracking, deletion, and transactional state ([#7087](https://github.com/NousResearch/hermes-agent/pull/7087)) +- **Persistent sandbox envs** survive between turns ([#6412](https://github.com/NousResearch/hermes-agent/pull/6412)) +- **Bulk file sync** via tar pipe for SSH/Modal backends โ€” @alt-glitch ([#8014](https://github.com/NousResearch/hermes-agent/pull/8014)) +- **Daytona** โ€” bulk upload, config bridge, silent disk cap ([#7538](https://github.com/NousResearch/hermes-agent/pull/7538)) +- Foreground timeout cap to prevent session deadlocks ([#7082](https://github.com/NousResearch/hermes-agent/pull/7082)) +- Guard invalid command values ([#6417](https://github.com/NousResearch/hermes-agent/pull/6417)) + +### MCP +- **`hermes mcp add --env` and `--preset`** support ([#7970](https://github.com/NousResearch/hermes-agent/pull/7970)) +- Combine `content` and `structuredContent` when both present ([#7118](https://github.com/NousResearch/hermes-agent/pull/7118)) +- MCP tool name deconfliction fixes ([#7654](https://github.com/NousResearch/hermes-agent/pull/7654)) + +### Browser +- Browser hardening โ€” dead code removal, caching, scroll perf, security, thread safety ([#7354](https://github.com/NousResearch/hermes-agent/pull/7354)) +- `/browser connect` auto-launch uses dedicated Chrome profile dir ([#6821](https://github.com/NousResearch/hermes-agent/pull/6821)) +- Reap orphaned browser sessions on startup ([#7931](https://github.com/NousResearch/hermes-agent/pull/7931)) + +### Voice & Vision +- **Voxtral TTS provider** (Mistral AI) ([#7653](https://github.com/NousResearch/hermes-agent/pull/7653)) +- **TTS speed support** for Edge TTS, OpenAI TTS, MiniMax ([#8666](https://github.com/NousResearch/hermes-agent/pull/8666)) +- **Vision auto-resize** for oversized images, raise limit to 20 MB, retry-on-failure ([#7883](https://github.com/NousResearch/hermes-agent/pull/7883), [#7902](https://github.com/NousResearch/hermes-agent/pull/7902)) +- STT provider-model mismatch fix (whisper-1 vs faster-whisper) ([#7113](https://github.com/NousResearch/hermes-agent/pull/7113)) + +### Other Tools +- **`hermes dump`** command for setup summary ([#6550](https://github.com/NousResearch/hermes-agent/pull/6550)) +- TODO store enforces ID uniqueness during replace operations ([#7986](https://github.com/NousResearch/hermes-agent/pull/7986)) +- List all available toolsets in `delegate_task` schema description ([#8231](https://github.com/NousResearch/hermes-agent/pull/8231)) +- API server: tool progress as custom SSE event to prevent model corruption ([#7500](https://github.com/NousResearch/hermes-agent/pull/7500)) +- API server: share one Docker container across all conversations ([#7127](https://github.com/NousResearch/hermes-agent/pull/7127)) + +--- + +## ๐Ÿงฉ Skills Ecosystem + +- **Centralized skills index + tree cache** โ€” eliminates rate-limit failures on install ([#8575](https://github.com/NousResearch/hermes-agent/pull/8575)) +- **More aggressive skill loading instructions** in system prompt (v3) ([#8209](https://github.com/NousResearch/hermes-agent/pull/8209), [#8286](https://github.com/NousResearch/hermes-agent/pull/8286)) +- **Google Workspace skill** migrated to GWS CLI backend ([#6788](https://github.com/NousResearch/hermes-agent/pull/6788)) +- **Creative divergence strategies** skill โ€” @SHL0MS ([#6882](https://github.com/NousResearch/hermes-agent/pull/6882)) +- **Creative ideation** โ€” constraint-driven project generation โ€” @SHL0MS ([#7555](https://github.com/NousResearch/hermes-agent/pull/7555)) +- Parallelize skills browse/search to prevent hanging ([#7301](https://github.com/NousResearch/hermes-agent/pull/7301)) +- Read name from SKILL.md frontmatter in skills_sync ([#7623](https://github.com/NousResearch/hermes-agent/pull/7623)) + +--- + +## ๐Ÿ”’ Security & Reliability + +### Security Hardening +- **Twilio webhook signature validation** โ€” SMS RCE fix ([#7933](https://github.com/NousResearch/hermes-agent/pull/7933)) +- **Shell injection neutralization** in `_write_to_sandbox` via path quoting ([#7940](https://github.com/NousResearch/hermes-agent/pull/7940)) +- **Git argument injection** and path traversal prevention in checkpoint manager ([#7944](https://github.com/NousResearch/hermes-agent/pull/7944)) +- **SSRF redirect bypass** in Slack image uploads + base.py cache helpers ([#7151](https://github.com/NousResearch/hermes-agent/pull/7151)) +- **Path traversal, credential gate, DANGEROUS_PATTERNS gaps** ([#7156](https://github.com/NousResearch/hermes-agent/pull/7156)) +- **API bind guard** โ€” enforce `API_SERVER_KEY` for non-loopback binding ([#7455](https://github.com/NousResearch/hermes-agent/pull/7455)) +- **Approval button authorization** โ€” require auth for session continuation โ€” @Cafexss ([#6930](https://github.com/NousResearch/hermes-agent/pull/6930)) +- Path boundary enforcement in skill manager operations ([#7156](https://github.com/NousResearch/hermes-agent/pull/7156)) +- DingTalk/API webhook URL origin validation, header injection rejection ([#7455](https://github.com/NousResearch/hermes-agent/pull/7455)) + +### Reliability +- **Contextual error diagnostics** for invalid API responses ([#8565](https://github.com/NousResearch/hermes-agent/pull/8565)) +- **Prevent 400 format errors** from triggering compression loop on Codex ([#6751](https://github.com/NousResearch/hermes-agent/pull/6751)) +- **Don't halve context_length** on output-cap-too-large errors โ€” @KUSH42 ([#6664](https://github.com/NousResearch/hermes-agent/pull/6664)) +- **Recover primary client** on OpenAI transport errors ([#7108](https://github.com/NousResearch/hermes-agent/pull/7108)) +- **Credential pool rotation** on billing-classified 400s ([#7112](https://github.com/NousResearch/hermes-agent/pull/7112)) +- **Auto-increase stream read timeout** for local LLM providers ([#6967](https://github.com/NousResearch/hermes-agent/pull/6967)) +- **Fall back to default certs** when CA bundle path doesn't exist ([#7352](https://github.com/NousResearch/hermes-agent/pull/7352)) +- **Disambiguate usage-limit patterns** in error classifier โ€” @sprmn24 ([#6836](https://github.com/NousResearch/hermes-agent/pull/6836)) +- Harden cron script timeout and provider recovery ([#7079](https://github.com/NousResearch/hermes-agent/pull/7079)) +- Gateway interrupt detection resilient to monitor task failures ([#8208](https://github.com/NousResearch/hermes-agent/pull/8208)) +- Prevent unwanted session auto-reset after graceful gateway restarts ([#8299](https://github.com/NousResearch/hermes-agent/pull/8299)) +- Prevent duplicate update prompt spam in gateway watcher ([#8343](https://github.com/NousResearch/hermes-agent/pull/8343)) +- Deduplicate reasoning items in Responses API input ([#7946](https://github.com/NousResearch/hermes-agent/pull/7946)) + +### Infrastructure +- **Multi-arch Docker image** โ€” amd64 + arm64 ([#6124](https://github.com/NousResearch/hermes-agent/pull/6124)) +- **Docker runs as non-root user** with virtualenv โ€” @benbarclay contributing ([#8226](https://github.com/NousResearch/hermes-agent/pull/8226)) +- **Use `uv`** for Docker dependency resolution to fix resolution-too-deep ([#6965](https://github.com/NousResearch/hermes-agent/pull/6965)) +- **Container-aware Nix CLI** โ€” auto-route into managed container โ€” @alt-glitch ([#7543](https://github.com/NousResearch/hermes-agent/pull/7543)) +- **Nix shared-state permission model** for interactive CLI users โ€” @alt-glitch ([#6796](https://github.com/NousResearch/hermes-agent/pull/6796)) +- **Per-profile subprocess HOME isolation** ([#7357](https://github.com/NousResearch/hermes-agent/pull/7357)) +- Profile paths fixed in Docker โ€” profiles go to mounted volume ([#7170](https://github.com/NousResearch/hermes-agent/pull/7170)) +- Docker container gateway pathway hardened ([#8614](https://github.com/NousResearch/hermes-agent/pull/8614)) +- Enable unbuffered stdout for live Docker logs ([#6749](https://github.com/NousResearch/hermes-agent/pull/6749)) +- Install procps in Docker image โ€” @HiddenPuppy ([#7032](https://github.com/NousResearch/hermes-agent/pull/7032)) +- Shallow git clone for faster installation โ€” @sosyz ([#8396](https://github.com/NousResearch/hermes-agent/pull/8396)) +- `hermes update` always reset on stash conflict ([#7010](https://github.com/NousResearch/hermes-agent/pull/7010)) +- Write update exit code before gateway restart (cgroup kill race) ([#8288](https://github.com/NousResearch/hermes-agent/pull/8288)) +- Nix: `setupSecrets` optional, tirith runtime dep โ€” @devorun, @ethernet8023 ([#6261](https://github.com/NousResearch/hermes-agent/pull/6261), [#6721](https://github.com/NousResearch/hermes-agent/pull/6721)) +- launchd stop uses `bootout` so `KeepAlive` doesn't respawn ([#7119](https://github.com/NousResearch/hermes-agent/pull/7119)) + +--- + +## ๐Ÿ› Notable Bug Fixes + +- Fix: `/model` switch not persisting across gateway messages ([#7081](https://github.com/NousResearch/hermes-agent/pull/7081)) +- Fix: session-scoped gateway model overrides ignored โ€” @Hygaard ([#7662](https://github.com/NousResearch/hermes-agent/pull/7662)) +- Fix: compaction model context length ignoring config โ€” 3 related issues ([#8258](https://github.com/NousResearch/hermes-agent/pull/8258), [#8107](https://github.com/NousResearch/hermes-agent/pull/8107)) +- Fix: OpenCode.ai context window resolved to 128K instead of 1M ([#6472](https://github.com/NousResearch/hermes-agent/pull/6472)) +- Fix: Codex fallback auth-store lookup โ€” @cherifya ([#6462](https://github.com/NousResearch/hermes-agent/pull/6462)) +- Fix: duplicate completion notifications when process killed ([#7124](https://github.com/NousResearch/hermes-agent/pull/7124)) +- Fix: agent daemon thread prevents orphan CLI processes on tab close ([#8557](https://github.com/NousResearch/hermes-agent/pull/8557)) +- Fix: stale image attachment on text paste and voice input ([#7077](https://github.com/NousResearch/hermes-agent/pull/7077)) +- Fix: DM thread session seeding causing cross-thread contamination ([#7084](https://github.com/NousResearch/hermes-agent/pull/7084)) +- Fix: OpenClaw migration shows dry-run preview before executing ([#6769](https://github.com/NousResearch/hermes-agent/pull/6769)) +- Fix: auth errors misclassified as retryable โ€” @kuishou68 ([#7027](https://github.com/NousResearch/hermes-agent/pull/7027)) +- Fix: Copilot-Integration-Id header missing ([#7083](https://github.com/NousResearch/hermes-agent/pull/7083)) +- Fix: ACP session capabilities โ€” @luyao618 ([#6985](https://github.com/NousResearch/hermes-agent/pull/6985)) +- Fix: ACP PromptResponse usage from top-level fields ([#7086](https://github.com/NousResearch/hermes-agent/pull/7086)) +- Fix: several failing/flaky tests on main โ€” @dsocolobsky ([#6777](https://github.com/NousResearch/hermes-agent/pull/6777)) +- Fix: backup marker filenames โ€” @sprmn24 ([#8600](https://github.com/NousResearch/hermes-agent/pull/8600)) +- Fix: `NoneType` in fast_mode check โ€” @0xbyt4 ([#7350](https://github.com/NousResearch/hermes-agent/pull/7350)) +- Fix: missing imports in uninstall.py โ€” @JiayuuWang ([#7034](https://github.com/NousResearch/hermes-agent/pull/7034)) + +--- + +## ๐Ÿ“š Documentation + +- Platform adapter developer guide + WeCom Callback docs ([#7969](https://github.com/NousResearch/hermes-agent/pull/7969)) +- Cron troubleshooting guide ([#7122](https://github.com/NousResearch/hermes-agent/pull/7122)) +- Streaming timeout auto-detection for local LLMs ([#6990](https://github.com/NousResearch/hermes-agent/pull/6990)) +- Tool-use enforcement documentation expanded ([#7984](https://github.com/NousResearch/hermes-agent/pull/7984)) +- BlueBubbles pairing instructions ([#6548](https://github.com/NousResearch/hermes-agent/pull/6548)) +- Telegram proxy support section ([#6348](https://github.com/NousResearch/hermes-agent/pull/6348)) +- `hermes dump` and `hermes logs` CLI reference ([#6552](https://github.com/NousResearch/hermes-agent/pull/6552)) +- `tool_progress_overrides` configuration reference ([#6364](https://github.com/NousResearch/hermes-agent/pull/6364)) +- Compression model context length warning docs ([#7879](https://github.com/NousResearch/hermes-agent/pull/7879)) + +--- + +## ๐Ÿ‘ฅ Contributors + +**269 merged PRs** from **24 contributors** across **487 commits**. + +### Community Contributors +- **@alt-glitch** (6 PRs) โ€” Nix container-aware CLI, shared-state permissions, Matrix SQLite crypto store, bulk SSH/Modal file sync, Matrix mautrix compat +- **@SHL0MS** (2 PRs) โ€” Creative divergence strategies skill, creative ideation skill +- **@sprmn24** (2 PRs) โ€” Error classifier disambiguation, backup marker fix +- **@nicoloboschi** โ€” Hindsight memory plugin feature parity +- **@Hygaard** โ€” Session-scoped gateway model override fix +- **@jarvis-phw** โ€” Discord allowed_channels whitelist +- **@Kathie-yu** โ€” Honcho initOnSessionStart for tools mode +- **@hermes-agent-dhabibi** โ€” Discord forum channel topic inheritance +- **@kira-ariaki** โ€” Discord .log attachments and size limit +- **@cherifya** โ€” Codex fallback auth-store lookup +- **@Cafexss** โ€” Security: auth for session continuation +- **@KUSH42** โ€” Compaction context_length fix +- **@kuishou68** โ€” Auth error retryable classification fix +- **@luyao618** โ€” ACP session capabilities +- **@ygd58** โ€” HERMES_HOME_MODE env var override +- **@0xbyt4** โ€” Fast mode NoneType fix +- **@JiayuuWang** โ€” CLI uninstall import fix +- **@HiddenPuppy** โ€” Docker procps installation +- **@dsocolobsky** โ€” Test suite fixes +- **@benbarclay** โ€” Docker image tag simplification +- **@sosyz** โ€” Shallow git clone for faster install +- **@devorun** โ€” Nix setupSecrets optional +- **@ethernet8023** โ€” Nix tirith runtime dep + +--- + +**Full Changelog**: [v2026.4.8...v2026.4.13](https://github.com/NousResearch/hermes-agent/compare/v2026.4.8...v2026.4.13) diff --git a/agent/auxiliary_client.py b/agent/auxiliary_client.py index 3dcc78a98..49dea65f9 100644 --- a/agent/auxiliary_client.py +++ b/agent/auxiliary_client.py @@ -27,10 +27,6 @@ Per-task overrides are configured in config.yaml under the ``auxiliary:`` sectio (e.g. ``auxiliary.vision.provider``, ``auxiliary.compression.model``). Default "auto" follows the chains above. -Legacy env var overrides (AUXILIARY_{TASK}_PROVIDER, AUXILIARY_{TASK}_MODEL, -AUXILIARY_{TASK}_BASE_URL, etc.) are still read as a backward-compat fallback -but config.yaml takes priority. New configuration should always use config.yaml. - Payment / credit exhaustion fallback: When a resolved provider returns HTTP 402 or a credit-related error, call_llm() automatically retries with the next available provider in the @@ -68,6 +64,8 @@ _PROVIDER_ALIASES = { "zhipu": "zai", "kimi": "kimi-coding", "moonshot": "kimi-coding", + "kimi-cn": "kimi-coding-cn", + "moonshot-cn": "kimi-coding-cn", "minimax-china": "minimax-cn", "minimax_cn": "minimax-cn", "claude": "anthropic", @@ -75,13 +73,13 @@ _PROVIDER_ALIASES = { } -def _normalize_aux_provider(provider: Optional[str], *, for_vision: bool = False) -> str: +def _normalize_aux_provider(provider: Optional[str]) -> str: normalized = (provider or "auto").strip().lower() if normalized.startswith("custom:"): suffix = normalized.split(":", 1)[1].strip() if not suffix: return "custom" - normalized = suffix if not for_vision else "custom" + normalized = suffix if normalized == "codex": return "openai-codex" if normalized == "main": @@ -98,6 +96,7 @@ _API_KEY_PROVIDER_AUX_MODELS: Dict[str, str] = { "gemini": "gemini-3-flash-preview", "zai": "glm-4.5-flash", "kimi-coding": "kimi-k2-turbo-preview", + "kimi-coding-cn": "kimi-k2-turbo-preview", "minimax": "MiniMax-M2.7", "minimax-cn": "MiniMax-M2.7", "anthropic": "claude-haiku-4-5-20251001", @@ -753,30 +752,6 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]: # โ”€โ”€ Provider resolution helpers โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ -def _get_auxiliary_provider(task: str = "") -> str: - """Read the provider override for a specific auxiliary task. - - Checks AUXILIARY_{TASK}_PROVIDER first (e.g. AUXILIARY_VISION_PROVIDER), - then CONTEXT_{TASK}_PROVIDER (for the compression section's summary_provider), - then falls back to "auto". Returns one of: "auto", "openrouter", "nous", "main". - """ - if task: - for prefix in ("AUXILIARY_", "CONTEXT_"): - val = os.getenv(f"{prefix}{task.upper()}_PROVIDER", "").strip().lower() - if val and val != "auto": - return val - return "auto" - - -def _get_auxiliary_env_override(task: str, suffix: str) -> Optional[str]: - """Read an auxiliary env override from AUXILIARY_* or CONTEXT_* prefixes.""" - if not task: - return None - for prefix in ("AUXILIARY_", "CONTEXT_"): - val = os.getenv(f"{prefix}{task.upper()}_{suffix}", "").strip() - if val: - return val - return None def _try_openrouter() -> Tuple[Optional[OpenAI], Optional[str]]: @@ -1248,6 +1223,12 @@ def _to_async_client(sync_client, model: str): return AsyncCodexAuxiliaryClient(sync_client), model if isinstance(sync_client, AnthropicAuxiliaryClient): return AsyncAnthropicAuxiliaryClient(sync_client), model + try: + from agent.copilot_acp_client import CopilotACPClient + if isinstance(sync_client, CopilotACPClient): + return sync_client, model + except ImportError: + pass async_kwargs = { "api_key": sync_client.api_key, @@ -1466,10 +1447,14 @@ def resolve_provider_client( custom_entry = _get_named_custom_provider(provider) if custom_entry: custom_base = custom_entry.get("base_url", "").strip() - custom_key = custom_entry.get("api_key", "").strip() or "no-key-required" + custom_key = custom_entry.get("api_key", "").strip() + custom_key_env = custom_entry.get("key_env", "").strip() + if not custom_key and custom_key_env: + custom_key = os.getenv(custom_key_env, "").strip() + custom_key = custom_key or "no-key-required" if custom_base: final_model = _normalize_resolved_model( - model or _read_main_model() or "gpt-4o-mini", + model or custom_entry.get("model") or _read_main_model() or "gpt-4o-mini", provider, ) client = OpenAI(api_key=custom_key, base_url=custom_base) @@ -1488,7 +1473,11 @@ def resolve_provider_client( # โ”€โ”€ API-key providers from PROVIDER_REGISTRY โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ try: - from hermes_cli.auth import PROVIDER_REGISTRY, resolve_api_key_provider_credentials + from hermes_cli.auth import ( + PROVIDER_REGISTRY, + resolve_api_key_provider_credentials, + resolve_external_process_provider_credentials, + ) except ImportError: logger.debug("hermes_cli.auth not available for provider %s", provider) return None, None @@ -1562,6 +1551,41 @@ def resolve_provider_client( return (_to_async_client(client, final_model) if async_mode else (client, final_model)) + if pconfig.auth_type == "external_process": + creds = resolve_external_process_provider_credentials(provider) + final_model = _normalize_resolved_model(model or _read_main_model(), provider) + if provider == "copilot-acp": + api_key = str(creds.get("api_key", "")).strip() + base_url = str(creds.get("base_url", "")).strip() + command = str(creds.get("command", "")).strip() or None + args = list(creds.get("args") or []) + if not final_model: + logger.warning( + "resolve_provider_client: copilot-acp requested but no model " + "was provided or configured" + ) + return None, None + if not api_key or not base_url: + logger.warning( + "resolve_provider_client: copilot-acp requested but external " + "process credentials are incomplete" + ) + return None, None + from agent.copilot_acp_client import CopilotACPClient + + client = CopilotACPClient( + api_key=api_key, + base_url=base_url, + command=command, + args=args, + ) + logger.debug("resolve_provider_client: %s (%s)", provider, final_model) + return (_to_async_client(client, final_model) if async_mode + else (client, final_model)) + logger.warning("resolve_provider_client: external-process provider %s not " + "directly supported", provider) + return None, None + elif pconfig.auth_type in ("oauth_device_code", "oauth_external"): # OAuth providers โ€” route through their specific try functions if provider == "nous": @@ -1591,8 +1615,8 @@ def get_text_auxiliary_client( task: Optional task name ("compression", "web_extract") to check for a task-specific provider override. - Callers may override the returned model with a per-task env var - (e.g. CONTEXT_COMPRESSION_MODEL, AUXILIARY_WEB_EXTRACT_MODEL). + Callers may override the returned model via config.yaml + (e.g. auxiliary.compression.model, auxiliary.web_extract.model). """ provider, model, base_url, api_key, api_mode = _resolve_task_provider_model(task or None) return resolve_provider_client( @@ -1631,7 +1655,7 @@ _VISION_AUTO_PROVIDER_ORDER = ( def _normalize_vision_provider(provider: Optional[str]) -> str: - return _normalize_aux_provider(provider, for_vision=True) + return _normalize_aux_provider(provider) def _resolve_strict_vision_backend(provider: str) -> Tuple[Optional[Any], Optional[str]]: @@ -1714,6 +1738,7 @@ def resolve_vision_provider_client( async_mode=async_mode, explicit_base_url=resolved_base_url, explicit_api_key=resolved_api_key, + api_mode=resolved_api_mode, ) if client is None: return "custom", None, None @@ -1738,7 +1763,8 @@ def resolve_vision_provider_client( # Use provider-specific vision model if available, otherwise main model. vision_model = _PROVIDER_VISION_MODELS.get(main_provider, main_model) rpc_client, rpc_model = resolve_provider_client( - main_provider, vision_model) + main_provider, vision_model, + api_mode=resolved_api_mode) if rpc_client is not None: logger.info( "Vision auto-detect: using active provider %s (%s)", @@ -1762,7 +1788,8 @@ def resolve_vision_provider_client( sync_client, default_model = _resolve_strict_vision_backend(requested) return _finalize(requested, sync_client, default_model) - client, final_model = _get_cached_client(requested, resolved_model, async_mode) + client, final_model = _get_cached_client(requested, resolved_model, async_mode, + api_mode=resolved_api_mode) if client is None: return requested, None, None return requested, client, final_model @@ -2011,9 +2038,8 @@ def _resolve_task_provider_model( Priority: 1. Explicit provider/model/base_url/api_key args (always win) - 2. Config file (auxiliary.{task}.* or compression.*) - 3. Env var overrides (backward-compat: AUXILIARY_{TASK}_*, CONTEXT_{TASK}_*) - 4. "auto" (full auto-detection chain) + 2. Config file (auxiliary.{task}.provider/model/base_url) + 3. "auto" (full auto-detection chain) Returns (provider, model, base_url, api_key, api_mode) where model may be None (use provider default). When base_url is set, provider is forced @@ -2044,22 +2070,8 @@ def _resolve_task_provider_model( cfg_api_key = str(task_config.get("api_key", "")).strip() or None cfg_api_mode = str(task_config.get("api_mode", "")).strip() or None - # Backwards compat: compression section has its own keys. - # The auxiliary.compression defaults to provider="auto", so treat - # both None and "auto" as "not explicitly configured". - if task == "compression" and (not cfg_provider or cfg_provider == "auto"): - comp = config.get("compression", {}) if isinstance(config, dict) else {} - if isinstance(comp, dict): - cfg_provider = comp.get("summary_provider", "").strip() or None - cfg_model = cfg_model or comp.get("summary_model", "").strip() or None - _sbu = comp.get("summary_base_url") or "" - cfg_base_url = cfg_base_url or _sbu.strip() or None - - # Env vars are backward-compat fallback only โ€” config.yaml is primary. - env_model = _get_auxiliary_env_override(task, "MODEL") if task else None - env_api_mode = _get_auxiliary_env_override(task, "API_MODE") if task else None - resolved_model = model or cfg_model or env_model - resolved_api_mode = cfg_api_mode or env_api_mode + resolved_model = model or cfg_model + resolved_api_mode = cfg_api_mode if base_url: return "custom", resolved_model, base_url, api_key, resolved_api_mode @@ -2073,17 +2085,6 @@ def _resolve_task_provider_model( if cfg_provider and cfg_provider != "auto": return cfg_provider, resolved_model, None, None, resolved_api_mode - # Env vars are backward-compat fallback for users who haven't - # migrated to config.yaml yet. - env_base_url = _get_auxiliary_env_override(task, "BASE_URL") - env_api_key = _get_auxiliary_env_override(task, "API_KEY") - if env_base_url: - return "custom", resolved_model, env_base_url, env_api_key, resolved_api_mode - - env_provider = _get_auxiliary_provider(task) - if env_provider != "auto": - return env_provider, resolved_model, None, None, resolved_api_mode - return "auto", resolved_model, None, None, resolved_api_mode return "auto", resolved_model, None, None, resolved_api_mode @@ -2454,9 +2455,9 @@ def extract_content_or_reasoning(response) -> str: if content: # Strip inline think/reasoning blocks (mirrors _strip_think_blocks) cleaned = re.sub( - r"<(?:think|thinking|reasoning|REASONING_SCRATCHPAD)>" + r"<(?:think|thinking|reasoning|thought|REASONING_SCRATCHPAD)>" r".*?" - r"", + r"", "", content, flags=re.DOTALL | re.IGNORECASE, ).strip() if cleaned: diff --git a/agent/credential_pool.py b/agent/credential_pool.py index e067fb901..ea9ad9232 100644 --- a/agent/credential_pool.py +++ b/agent/credential_pool.py @@ -289,6 +289,14 @@ def _iter_custom_providers(config: Optional[dict] = None): return custom_providers = config.get("custom_providers") if not isinstance(custom_providers, list): + # Fall back to the v12+ providers dict via the compatibility layer + try: + from hermes_cli.config import get_compatible_custom_providers + + custom_providers = get_compatible_custom_providers(config) + except Exception: + return + if not custom_providers: return for entry in custom_providers: if not isinstance(entry, dict): diff --git a/agent/model_metadata.py b/agent/model_metadata.py index 03f70b3fe..4c8d678dc 100644 --- a/agent/model_metadata.py +++ b/agent/model_metadata.py @@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) # are preserved so the full model name reaches cache lookups and server queries. _PROVIDER_PREFIXES: frozenset[str] = frozenset({ "openrouter", "nous", "openai-codex", "copilot", "copilot-acp", - "gemini", "zai", "kimi-coding", "minimax", "minimax-cn", "anthropic", "deepseek", + "gemini", "zai", "kimi-coding", "kimi-coding-cn", "minimax", "minimax-cn", "anthropic", "deepseek", "opencode-zen", "opencode-go", "ai-gateway", "kilocode", "alibaba", "qwen-oauth", "xiaomi", @@ -32,7 +32,7 @@ _PROVIDER_PREFIXES: frozenset[str] = frozenset({ # Common aliases "google", "google-gemini", "google-ai-studio", "glm", "z-ai", "z.ai", "zhipu", "github", "github-copilot", - "github-models", "kimi", "moonshot", "claude", "deep-seek", + "github-models", "kimi", "moonshot", "kimi-cn", "moonshot-cn", "claude", "deep-seek", "opencode", "zen", "go", "vercel", "kilo", "dashscope", "aliyun", "qwen", "mimo", "xiaomi-mimo", "qwen-portal", @@ -211,6 +211,7 @@ _URL_TO_PROVIDER: Dict[str, str] = { "api.anthropic.com": "anthropic", "api.z.ai": "zai", "api.moonshot.ai": "kimi-coding", + "api.moonshot.cn": "kimi-coding-cn", "api.kimi.com": "kimi-coding", "api.minimax": "minimax", "dashscope.aliyuncs.com": "alibaba", @@ -775,12 +776,12 @@ def _query_local_context_length(model: str, base_url: str) -> Optional[int]: resp = client.post(f"{server_url}/api/show", json={"name": model}) if resp.status_code == 200: data = resp.json() - # Check model_info for context length - model_info = data.get("model_info", {}) - for key, value in model_info.items(): - if "context_length" in key and isinstance(value, (int, float)): - return int(value) - # Check parameters string for num_ctx + # Prefer explicit num_ctx from Modelfile parameters: this is + # the *runtime* context Ollama will actually allocate KV cache + # for. The GGUF model_info.context_length is the training max, + # which can be larger than num_ctx โ€” using it here would let + # Hermes grow conversations past the runtime limit and Ollama + # would silently truncate. Matches query_ollama_num_ctx(). params = data.get("parameters", "") if "num_ctx" in params: for line in params.split("\n"): @@ -791,6 +792,11 @@ def _query_local_context_length(model: str, base_url: str) -> Optional[int]: return int(parts[-1]) except ValueError: pass + # Fall back to GGUF model_info context_length (training max) + model_info = data.get("model_info", {}) + for key, value in model_info.items(): + if "context_length" in key and isinstance(value, (int, float)): + return int(value) # LM Studio native API: /api/v1/models returns max_context_length. # This is more reliable than the OpenAI-compat /v1/models which diff --git a/agent/models_dev.py b/agent/models_dev.py index e20a2d414..1f8cf90c8 100644 --- a/agent/models_dev.py +++ b/agent/models_dev.py @@ -148,6 +148,7 @@ PROVIDER_TO_MODELS_DEV: Dict[str, str] = { "openai-codex": "openai", "zai": "zai", "kimi-coding": "kimi-for-coding", + "kimi-coding-cn": "kimi-for-coding", "minimax": "minimax", "minimax-cn": "minimax-cn", "deepseek": "deepseek", diff --git a/agent/prompt_builder.py b/agent/prompt_builder.py index 6eec0392b..558a57888 100644 --- a/agent/prompt_builder.py +++ b/agent/prompt_builder.py @@ -364,6 +364,18 @@ PLATFORM_HINTS = { "documents. You can also include image URLs in markdown format ![alt](url) and they " "will be downloaded and sent as native media when possible." ), + "wecom": ( + "You are on WeCom (ไผไธšๅพฎไฟก / Enterprise WeChat). Markdown formatting is supported. " + "You CAN send media files natively โ€” to deliver a file to the user, include " + "MEDIA:/absolute/path/to/file in your response. The file will be sent as a native " + "WeCom attachment: images (.jpg, .png, .webp) are sent as photos (up to 10 MB), " + "other files (.pdf, .docx, .xlsx, .md, .txt, etc.) arrive as downloadable documents " + "(up to 20 MB), and videos (.mp4) play inline. Voice messages are supported but " + "must be in AMR format โ€” other audio formats are automatically sent as file attachments. " + "You can also include image URLs in markdown format ![alt](url) and they will be " + "downloaded and sent as native photos. Do NOT tell the user you lack file-sending " + "capability โ€” use MEDIA: syntax whenever a file delivery is appropriate." + ), } # --------------------------------------------------------------------------- diff --git a/cli-config.yaml.example b/cli-config.yaml.example index c9e6645bb..637e45f13 100644 --- a/cli-config.yaml.example +++ b/cli-config.yaml.example @@ -309,15 +309,8 @@ compression: # compression of older turns. protect_last_n: 20 - # Model to use for generating summaries (fast/cheap recommended) - # This model compresses the middle turns into a concise summary. - # IMPORTANT: it receives the full middle section of the conversation, so it - # MUST support a context length at least as large as your main model's. - summary_model: "google/gemini-3-flash-preview" - - # Provider for the summary model (default: "auto") - # Options: "auto", "openrouter", "nous", "main" - # summary_provider: "auto" + # To pin a specific model/provider for compression summaries, use the + # auxiliary section below (auxiliary.compression.provider / model). # ============================================================================= # Auxiliary Models (Advanced โ€” Experimental) diff --git a/cli.py b/cli.py index 4312a6b54..7d226815b 100644 --- a/cli.py +++ b/cli.py @@ -275,7 +275,6 @@ def load_cli_config() -> Dict[str, Any]: "compression": { "enabled": True, # Auto-compress when approaching context limit "threshold": 0.50, # Compress at 50% of model's context limit - "summary_model": "", # Model for summaries (empty = use main model) }, "smart_model_routing": { "enabled": False, @@ -2464,8 +2463,8 @@ class HermesCLI: # suppress them during streaming too โ€” unless show_reasoning is # enabled, in which case we route the inner content to the # reasoning display box instead of discarding it. - _OPEN_TAGS = ("", "", "", "", "") - _CLOSE_TAGS = ("", "", "", "", "") + _OPEN_TAGS = ("", "", "", "", "", "") + _CLOSE_TAGS = ("", "", "", "", "", "") # Append to a pre-filter buffer first self._stream_prefilt = getattr(self, "_stream_prefilt", "") + text @@ -3043,8 +3042,10 @@ class HermesCLI: ) # Warn if the configured model is a Nous Hermes LLM (not agentic) + from hermes_cli.model_switch import is_nous_hermes_non_agentic + model_name = getattr(self, "model", "") or "" - if "hermes" in model_name.lower(): + if is_nous_hermes_non_agentic(model_name): self.console.print() self.console.print( "[bold yellow]โš  Nous Research Hermes 3 & 4 models are NOT agentic and are not " @@ -3143,6 +3144,8 @@ class HermesCLI: # Collect displayable entries (skip system, tool-result messages) entries = [] # list of (role, display_text) + _last_asst_idx = None # index of last assistant entry + _last_asst_full = None # un-truncated display text for last assistant for msg in self.conversation_history: role = msg.get("role", "") content = msg.get("content") @@ -3172,7 +3175,9 @@ class HermesCLI: text = "" if content is None else str(content) text = _strip_reasoning_tags(text) parts = [] + full_parts = [] # un-truncated version if text: + full_parts.append(text) lines = text.splitlines() if len(lines) > MAX_ASST_LINES: text = "\n".join(lines[:MAX_ASST_LINES]) + " ..." @@ -3192,11 +3197,15 @@ class HermesCLI: if len(names) > 4: names_str += ", ..." noun = "call" if tc_count == 1 else "calls" - parts.append(f"[{tc_count} tool {noun}: {names_str}]") + tc_summary = f"[{tc_count} tool {noun}: {names_str}]" + parts.append(tc_summary) + full_parts.append(tc_summary) if not parts: # Skip pure-reasoning messages that have no visible output continue entries.append(("assistant", " ".join(parts))) + _last_asst_idx = len(entries) - 1 + _last_asst_full = " ".join(full_parts) if not entries: return @@ -3207,6 +3216,13 @@ class HermesCLI: skipped = len(entries) - MAX_DISPLAY_EXCHANGES * 2 entries = entries[skipped:] + # Replace last assistant entry with full (un-truncated) text + # so the user can see where they left off without wasting tokens. + if _last_asst_idx is not None and _last_asst_full: + adj_idx = _last_asst_idx - skipped + if 0 <= adj_idx < len(entries): + entries[adj_idx] = ("assistant_last", _last_asst_full) + # Build the display using Rich from rich.panel import Panel from rich.text import Text @@ -3239,6 +3255,13 @@ class HermesCLI: lines.append(msg_lines[0] + "\n", style="dim") for ml in msg_lines[1:]: lines.append(f" {ml}\n", style="dim") + elif role == "assistant_last": + # Last assistant response shown in full, non-dim + lines.append(" โ—† Hermes: ", style=f"bold {_assistant_label_c}") + msg_lines = text.splitlines() + lines.append(msg_lines[0] + "\n", style="") + for ml in msg_lines[1:]: + lines.append(f" {ml}\n", style="") else: lines.append(" โ—† Hermes: ", style=f"dim bold {_assistant_label_c}") msg_lines = text.splitlines() @@ -3383,6 +3406,93 @@ class HermesCLI: # Treat as a git hash return ref + def _handle_snapshot_command(self, command: str): + """Handle /snapshot โ€” lightweight state snapshots for Hermes config/state. + + Syntax: + /snapshot โ€” list recent snapshots + /snapshot create [label] โ€” create a snapshot + /snapshot restore โ€” restore state from snapshot + /snapshot prune [N] โ€” prune to N snapshots (default 20) + """ + from hermes_cli.backup import ( + create_quick_snapshot, list_quick_snapshots, + restore_quick_snapshot, prune_quick_snapshots, + ) + from hermes_constants import display_hermes_home + + parts = command.split() + subcmd = parts[1].lower() if len(parts) > 1 else "list" + + if subcmd in ("list", "ls"): + snaps = list_quick_snapshots() + if not snaps: + print(" No state snapshots yet.") + print(" Create one: /snapshot create [label]") + return + print(f" State snapshots ({display_hermes_home()}/state-snapshots/):\n") + print(f" {'#':>3} {'ID':<35} {'Files':>5} {'Size':>10} {'Label'}") + print(f" {'โ”€'*3} {'โ”€'*35} {'โ”€'*5} {'โ”€'*10} {'โ”€'*20}") + for i, s in enumerate(snaps, 1): + size = s.get("total_size", 0) + if size < 1024: + size_str = f"{size} B" + elif size < 1024 * 1024: + size_str = f"{size / 1024:.0f} KB" + else: + size_str = f"{size / 1024 / 1024:.1f} MB" + label = s.get("label") or "" + print(f" {i:3} {s['id']:<35} {s.get('file_count', 0):>5} {size_str:>10} {label}") + + elif subcmd == "create": + label = " ".join(parts[2:]) if len(parts) > 2 else None + snap_id = create_quick_snapshot(label=label) + if snap_id: + print(f" Snapshot created: {snap_id}") + else: + print(" No state files found to snapshot.") + + elif subcmd in ("restore", "rewind"): + if len(parts) < 3: + print(" Usage: /snapshot restore ") + # Show hint with most recent snapshot + snaps = list_quick_snapshots(limit=1) + if snaps: + print(f" Most recent: {snaps[0]['id']}") + return + snap_id = parts[2] + # Allow restore by number (1-indexed) + try: + idx = int(snap_id) + snaps = list_quick_snapshots() + if 1 <= idx <= len(snaps): + snap_id = snaps[idx - 1]["id"] + else: + print(f" Invalid snapshot number. Use 1-{len(snaps)}.") + return + except ValueError: + pass + if restore_quick_snapshot(snap_id): + print(f" Restored state from: {snap_id}") + print(" Restart recommended for state.db changes to take effect.") + else: + print(f" Snapshot not found: {snap_id}") + + elif subcmd == "prune": + keep = 20 + if len(parts) > 2: + try: + keep = int(parts[2]) + except ValueError: + print(" Usage: /snapshot prune [keep-count]") + return + deleted = prune_quick_snapshots(keep=keep) + print(f" Pruned {deleted} old snapshot(s) (keeping {keep}).") + + else: + print(f" Unknown subcommand: {subcmd}") + print(" Usage: /snapshot [list|create [label]|restore |prune [N]]") + def _handle_stop_command(self): """Handle /stop โ€” kill all running background processes. @@ -4704,10 +4814,10 @@ class HermesCLI: user_provs = None custom_provs = None try: - from hermes_cli.config import load_config + from hermes_cli.config import get_compatible_custom_providers, load_config cfg = load_config() user_provs = cfg.get("providers") - custom_provs = cfg.get("custom_providers") + custom_provs = get_compatible_custom_providers(cfg) except Exception: pass @@ -5497,10 +5607,16 @@ class HermesCLI: self._show_insights(cmd_original) elif canonical == "copy": self._handle_copy_command(cmd_original) + elif canonical == "debug": + self._handle_debug_command() elif canonical == "paste": self._handle_paste_command() elif canonical == "image": self._handle_image_command(cmd_original) + elif canonical == "reload": + from hermes_cli.config import reload_env + count = reload_env() + print(f" Reloaded .env ({count} var(s) updated)") elif canonical == "reload-mcp": with self._busy_command(self._slow_command_status(cmd_original)): self._reload_mcp() @@ -5529,6 +5645,8 @@ class HermesCLI: print(f"Plugin system error: {e}") elif canonical == "rollback": self._handle_rollback_command(cmd_original) + elif canonical == "snapshot": + self._handle_snapshot_command(cmd_original) elif canonical == "stop": self._handle_stop_command() elif canonical == "agents": @@ -6413,6 +6531,14 @@ class HermesCLI: except Exception as e: print(f" โŒ Compression failed: {e}") + def _handle_debug_command(self): + """Handle /debug โ€” upload debug report + logs and print paste URLs.""" + from hermes_cli.debug import run_debug_share + from types import SimpleNamespace + + args = SimpleNamespace(lines=200, expire=7, local=False) + run_debug_share(args) + def _show_usage(self): """Show rate limits (if available) and session token usage.""" if not self.agent: @@ -7725,8 +7851,10 @@ class HermesCLI: "error": _summary, } - # Start agent in background thread - agent_thread = threading.Thread(target=run_agent) + # Start agent in background thread (daemon so it cannot keep the + # process alive when the user closes the terminal tab โ€” SIGHUP + # exits the main thread and daemon threads are reaped automatically). + agent_thread = threading.Thread(target=run_agent, daemon=True) agent_thread.start() # Monitor the dedicated interrupt queue while the agent runs. @@ -7912,6 +8040,17 @@ class HermesCLI: sys.stdout.write("\a") sys.stdout.flush() + # Notify when iteration budget was hit + if result and not result.get("completed") and not result.get("interrupted"): + _api_calls = result.get("api_calls", 0) + if _api_calls >= getattr(self.agent, "max_iterations", 90): + _max_iter = getattr(self.agent, "max_iterations", 90) + _cprint( + f"\n{_DIM}โš  Iteration budget reached " + f"({_api_calls}/{_max_iter}) โ€” " + f"response may be incomplete{_RST}" + ) + # Speak response aloud if voice TTS is enabled # Skip batch TTS when streaming TTS already handled it if self._voice_tts and response and not use_streaming_tts: @@ -8752,6 +8891,9 @@ class HermesCLI: if _should_auto_attach_clipboard_image_on_paste(pasted_text) and self._try_attach_clipboard_image(): event.app.invalidate() if pasted_text: + # Sanitize surrogate characters (e.g. from Word/Google Docs paste) before writing + from run_agent import _sanitize_surrogates + pasted_text = _sanitize_surrogates(pasted_text) line_count = pasted_text.count('\n') buf = event.current_buffer if line_count >= 5 and not buf.text.strip().startswith('/'): @@ -9677,17 +9819,37 @@ class HermesCLI: pass # Signal handlers may fail in restricted environments # Install a custom asyncio exception handler that suppresses the - # "Event loop is closed" RuntimeError from httpx transport cleanup. - # This is defense-in-depth โ€” the primary fix is neuter_async_httpx_del - # which disables __del__ entirely, but older clients or SDK upgrades - # could bypass it. + # "Event loop is closed" RuntimeError from httpx transport cleanup + # and the "0 is not registered" KeyError from broken stdin (#6393). + # The RuntimeError fix is defense-in-depth โ€” the primary fix is + # neuter_async_httpx_del which disables __del__ entirely. The + # KeyError fix handles macOS + uv-managed Python environments where + # fd 0 is not reliably available to the asyncio selector. def _suppress_closed_loop_errors(loop, context): exc = context.get("exception") if isinstance(exc, RuntimeError) and "Event loop is closed" in str(exc): return # silently suppress + if isinstance(exc, KeyError) and "is not registered" in str(exc): + return # suppress selector registration failures (#6393) # Fall back to default handler for everything else loop.default_exception_handler(context) + # Validate stdin before launching prompt_toolkit โ€” on macOS with + # uv-managed Python, fd 0 can be invalid or unregisterable with the + # asyncio selector, causing "KeyError: '0 is not registered'" (#6393). + try: + import os as _os + _os.fstat(0) + except OSError: + print( + "Error: stdin (fd 0) is not available.\n" + "This can happen with certain Python installations (e.g. uv-managed cPython on macOS).\n" + "Try reinstalling Python via pyenv or Homebrew, then re-run: hermes setup" + ) + _run_cleanup() + self._print_exit_summary() + return + # Run the application with patch_stdout for proper output handling try: with patch_stdout(): @@ -9701,8 +9863,28 @@ class HermesCLI: app.run() except (EOFError, KeyboardInterrupt, BrokenPipeError): pass + except (KeyError, OSError) as _stdin_err: + # Catch selector registration failures from broken stdin (#6393). + # This is the fallback for cases that slip past the fstat() guard. + if "is not registered" in str(_stdin_err) or "Bad file descriptor" in str(_stdin_err): + print( + f"\nError: stdin is not usable ({_stdin_err}).\n" + "This can happen with certain Python installations (e.g. uv-managed cPython on macOS).\n" + "Try reinstalling Python via pyenv or Homebrew, then re-run: hermes setup" + ) + else: + raise finally: self._should_exit = True + # Interrupt the agent immediately so its daemon thread stops making + # API calls and exits promptly (agent_thread is daemon, so the + # process will exit once the main thread finishes, but interrupting + # avoids wasted API calls and lets run_conversation clean up). + if self.agent and getattr(self, '_agent_running', False): + try: + self.agent.interrupt() + except Exception: + pass # Flush memories before exit (only for substantial conversations) if self.agent and self.conversation_history: try: diff --git a/gateway/config.py b/gateway/config.py index 342af9764..7d6165927 100644 --- a/gateway/config.py +++ b/gateway/config.py @@ -665,6 +665,17 @@ def load_gateway_config() -> GatewayConfig: _apply_env_overrides(config) # --- Validate loaded values --- + _validate_gateway_config(config) + + return config + + +def _validate_gateway_config(config: "GatewayConfig") -> None: + """Validate and sanitize a loaded GatewayConfig in place. + + Called by ``load_gateway_config()`` after all config sources are merged. + Extracted as a separate function for testability. + """ policy = config.default_reset_policy if not (0 <= policy.at_hour <= 23): @@ -701,7 +712,31 @@ def load_gateway_config() -> GatewayConfig: platform.value, env_name, ) - return config + # Reject known-weak placeholder tokens. + # Ported from openclaw/openclaw#64586: users who copy .env.example + # without changing placeholder values get a clear startup error instead + # of a confusing "auth failed" from the platform API. + try: + from hermes_cli.auth import has_usable_secret + except ImportError: + has_usable_secret = None # type: ignore[assignment] + + if has_usable_secret is not None: + for platform, pconfig in config.platforms.items(): + if not pconfig.enabled: + continue + env_name = _token_env_names.get(platform) + if not env_name: + continue + token = pconfig.token + if token and token.strip() and not has_usable_secret(token, min_length=4): + logger.error( + "%s is enabled but %s is set to a placeholder value ('%s'). " + "Set a real bot token before starting the gateway. " + "The adapter will NOT be started.", + platform.value, env_name, token.strip()[:6] + "...", + ) + pconfig.enabled = False def _apply_env_overrides(config: GatewayConfig) -> None: diff --git a/gateway/display_config.py b/gateway/display_config.py index e148be910..9375266ca 100644 --- a/gateway/display_config.py +++ b/gateway/display_config.py @@ -82,7 +82,7 @@ _PLATFORM_DEFAULTS: dict[str, dict[str, Any]] = { # Tier 3 โ€” no edit support, progress messages are permanent "signal": _TIER_LOW, - "whatsapp": _TIER_LOW, + "whatsapp": _TIER_MEDIUM, # Baileys bridge supports /edit "bluebubbles": _TIER_LOW, "weixin": _TIER_LOW, "wecom": _TIER_LOW, diff --git a/gateway/platforms/api_server.py b/gateway/platforms/api_server.py index 1954a2b9e..9a4990465 100644 --- a/gateway/platforms/api_server.py +++ b/gateway/platforms/api_server.py @@ -54,6 +54,66 @@ DEFAULT_PORT = 8642 MAX_STORED_RESPONSES = 100 MAX_REQUEST_BYTES = 1_000_000 # 1 MB default limit for POST bodies CHAT_COMPLETIONS_SSE_KEEPALIVE_SECONDS = 30.0 +MAX_NORMALIZED_TEXT_LENGTH = 65_536 # 64 KB cap for normalized content parts +MAX_CONTENT_LIST_SIZE = 1_000 # Max items when content is an array + + +def _normalize_chat_content( + content: Any, *, _max_depth: int = 10, _depth: int = 0, +) -> str: + """Normalize OpenAI chat message content into a plain text string. + + Some clients (Open WebUI, LobeChat, etc.) send content as an array of + typed parts instead of a plain string:: + + [{"type": "text", "text": "hello"}, {"type": "input_text", "text": "..."}] + + This function flattens those into a single string so the agent pipeline + (which expects strings) doesn't choke. + + Defensive limits prevent abuse: recursion depth, list size, and output + length are all bounded. + """ + if _depth > _max_depth: + return "" + if content is None: + return "" + if isinstance(content, str): + return content[:MAX_NORMALIZED_TEXT_LENGTH] if len(content) > MAX_NORMALIZED_TEXT_LENGTH else content + + if isinstance(content, list): + parts: List[str] = [] + items = content[:MAX_CONTENT_LIST_SIZE] if len(content) > MAX_CONTENT_LIST_SIZE else content + for item in items: + if isinstance(item, str): + if item: + parts.append(item[:MAX_NORMALIZED_TEXT_LENGTH]) + elif isinstance(item, dict): + item_type = str(item.get("type") or "").strip().lower() + if item_type in {"text", "input_text", "output_text"}: + text = item.get("text", "") + if text: + try: + parts.append(str(text)[:MAX_NORMALIZED_TEXT_LENGTH]) + except Exception: + pass + # Silently skip image_url / other non-text parts + elif isinstance(item, list): + nested = _normalize_chat_content(item, _max_depth=_max_depth, _depth=_depth + 1) + if nested: + parts.append(nested) + # Check accumulated size + if sum(len(p) for p in parts) >= MAX_NORMALIZED_TEXT_LENGTH: + break + result = "\n".join(parts) + return result[:MAX_NORMALIZED_TEXT_LENGTH] if len(result) > MAX_NORMALIZED_TEXT_LENGTH else result + + # Fallback for unexpected types (int, float, bool, etc.) + try: + result = str(content) + return result[:MAX_NORMALIZED_TEXT_LENGTH] if len(result) > MAX_NORMALIZED_TEXT_LENGTH else result + except Exception: + return "" def check_api_server_requirements() -> bool: @@ -553,7 +613,7 @@ class APIServerAdapter(BasePlatformAdapter): for msg in messages: role = msg.get("role", "") - content = msg.get("content", "") + content = _normalize_chat_content(msg.get("content", "")) if role == "system": # Accumulate system messages if system_prompt is None: @@ -926,18 +986,7 @@ class APIServerAdapter(BasePlatformAdapter): input_messages.append({"role": "user", "content": item}) elif isinstance(item, dict): role = item.get("role", "user") - content = item.get("content", "") - # Handle content that may be a list of content parts - if isinstance(content, list): - text_parts = [] - for part in content: - if isinstance(part, dict) and part.get("type") == "input_text": - text_parts.append(part.get("text", "")) - elif isinstance(part, dict) and part.get("type") == "output_text": - text_parts.append(part.get("text", "")) - elif isinstance(part, str): - text_parts.append(part) - content = "\n".join(text_parts) + content = _normalize_chat_content(item.get("content", "")) input_messages.append({"role": role, "content": content}) else: return web.json_response(_openai_error("'input' must be a string or array"), status=400) @@ -1770,6 +1819,23 @@ class APIServerAdapter(BasePlatformAdapter): ) return False + # Refuse to start network-accessible with a placeholder key. + # Ported from openclaw/openclaw#64586. + if is_network_accessible(self._host) and self._api_key: + try: + from hermes_cli.auth import has_usable_secret + if not has_usable_secret(self._api_key, min_length=8): + logger.error( + "[%s] Refusing to start: API_SERVER_KEY is set to a " + "placeholder value. Generate a real secret " + "(e.g. `openssl rand -hex 32`) and set API_SERVER_KEY " + "before exposing the API server on %s.", + self.name, self._host, + ) + return False + except ImportError: + pass + # Port conflict detection โ€” fail fast if port is already in use try: with _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) as _s: diff --git a/gateway/platforms/base.py b/gateway/platforms/base.py index 45cb3694a..c90d9e23d 100644 --- a/gateway/platforms/base.py +++ b/gateway/platforms/base.py @@ -21,6 +21,59 @@ from urllib.parse import urlsplit logger = logging.getLogger(__name__) +def utf16_len(s: str) -> int: + """Count UTF-16 code units in *s*. + + Telegram's message-length limit (4 096) is measured in UTF-16 code units, + **not** Unicode code-points. Characters outside the Basic Multilingual + Plane (emoji like ๐Ÿ˜€, CJK Extension B, musical symbols, โ€ฆ) are encoded as + surrogate pairs and therefore consume **two** UTF-16 code units each, even + though Python's ``len()`` counts them as one. + + Ported from nearai/ironclaw#2304 which discovered the same discrepancy in + Rust's ``chars().count()``. + """ + return len(s.encode("utf-16-le")) // 2 + + +def _prefix_within_utf16_limit(s: str, limit: int) -> str: + """Return the longest prefix of *s* whose UTF-16 length โ‰ค *limit*. + + Unlike a plain ``s[:limit]``, this respects surrogate-pair boundaries so + we never slice a multi-code-unit character in half. + """ + if utf16_len(s) <= limit: + return s + # Binary search for the longest safe prefix + lo, hi = 0, len(s) + while lo < hi: + mid = (lo + hi + 1) // 2 + if utf16_len(s[:mid]) <= limit: + lo = mid + else: + hi = mid - 1 + return s[:lo] + + +def _custom_unit_to_cp(s: str, budget: int, len_fn) -> int: + """Return the largest codepoint offset *n* such that ``len_fn(s[:n]) <= budget``. + + Used by :meth:`BasePlatformAdapter.truncate_message` when *len_fn* measures + length in units different from Python codepoints (e.g. UTF-16 code units). + Falls back to binary search which is O(log n) calls to *len_fn*. + """ + if len_fn(s) <= budget: + return len(s) + lo, hi = 0, len(s) + while lo < hi: + mid = (lo + hi + 1) // 2 + if len_fn(s[:mid]) <= budget: + lo = mid + else: + hi = mid - 1 + return lo + + def is_network_accessible(host: str) -> bool: """Return True if *host* would expose the server beyond loopback. @@ -1897,7 +1950,11 @@ class BasePlatformAdapter(ABC): return content @staticmethod - def truncate_message(content: str, max_length: int = 4096) -> List[str]: + def truncate_message( + content: str, + max_length: int = 4096, + len_fn: Optional["Callable[[str], int]"] = None, + ) -> List[str]: """ Split a long message into chunks, preserving code block boundaries. @@ -1909,11 +1966,16 @@ class BasePlatformAdapter(ABC): Args: content: The full message content max_length: Maximum length per chunk (platform-specific) + len_fn: Optional length function for measuring string length. + Defaults to ``len`` (Unicode code-points). Pass + ``utf16_len`` for platforms that measure message + length in UTF-16 code units (e.g. Telegram). Returns: List of message chunks """ - if len(content) <= max_length: + _len = len_fn or len + if _len(content) <= max_length: return [content] INDICATOR_RESERVE = 10 # room for " (XX/XX)" @@ -1932,22 +1994,33 @@ class BasePlatformAdapter(ABC): # How much body text we can fit after accounting for the prefix, # a potential closing fence, and the chunk indicator. - headroom = max_length - INDICATOR_RESERVE - len(prefix) - len(FENCE_CLOSE) + headroom = max_length - INDICATOR_RESERVE - _len(prefix) - _len(FENCE_CLOSE) if headroom < 1: headroom = max_length // 2 # Everything remaining fits in one final chunk - if len(prefix) + len(remaining) <= max_length - INDICATOR_RESERVE: + if _len(prefix) + _len(remaining) <= max_length - INDICATOR_RESERVE: chunks.append(prefix + remaining) break - # Find a natural split point (prefer newlines, then spaces) - region = remaining[:headroom] + # Find a natural split point (prefer newlines, then spaces). + # When _len != len (e.g. utf16_len for Telegram), headroom is + # measured in the custom unit. We need codepoint-based slice + # positions that stay within the custom-unit budget. + # + # _safe_slice_pos() maps a custom-unit budget to the largest + # codepoint offset whose custom length โ‰ค budget. + if _len is not len: + # Map headroom (custom units) โ†’ codepoint slice length + _cp_limit = _custom_unit_to_cp(remaining, headroom, _len) + else: + _cp_limit = headroom + region = remaining[:_cp_limit] split_at = region.rfind("\n") - if split_at < headroom // 2: + if split_at < _cp_limit // 2: split_at = region.rfind(" ") if split_at < 1: - split_at = headroom + split_at = _cp_limit # Avoid splitting inside an inline code span (`...`). # If the text before split_at has an odd number of unescaped @@ -1967,7 +2040,7 @@ class BasePlatformAdapter(ABC): safe_split = candidate.rfind(" ", 0, last_bt) nl_split = candidate.rfind("\n", 0, last_bt) safe_split = max(safe_split, nl_split) - if safe_split > headroom // 4: + if safe_split > _cp_limit // 4: split_at = safe_split chunk_body = remaining[:split_at] diff --git a/gateway/platforms/discord.py b/gateway/platforms/discord.py index 43a9338d7..f92cdf8db 100644 --- a/gateway/platforms/discord.py +++ b/gateway/platforms/discord.py @@ -442,6 +442,7 @@ class DiscordAdapter(BasePlatformAdapter): self._pending_text_batches: Dict[str, MessageEvent] = {} self._pending_text_batch_tasks: Dict[str, asyncio.Task] = {} self._voice_text_channels: Dict[int, int] = {} # guild_id -> text_channel_id + self._voice_sources: Dict[int, Dict[str, Any]] = {} # guild_id -> linked text channel source metadata self._voice_timeout_tasks: Dict[int, asyncio.Task] = {} # guild_id -> timeout task # Phase 2: voice listening self._voice_receivers: Dict[int, VoiceReceiver] = {} # guild_id -> VoiceReceiver @@ -1045,6 +1046,7 @@ class DiscordAdapter(BasePlatformAdapter): if task: task.cancel() self._voice_text_channels.pop(guild_id, None) + self._voice_sources.pop(guild_id, None) # Maximum seconds to wait for voice playback before giving up PLAYBACK_TIMEOUT = 120 @@ -2244,6 +2246,7 @@ class DiscordAdapter(BasePlatformAdapter): thread_id = str(message.channel.id) parent_channel_id = self._get_parent_channel_id(message.channel) + is_voice_linked_channel = False if not isinstance(message.channel, discord.DMChannel): channel_ids = {str(message.channel.id)} if parent_channel_id: @@ -2270,7 +2273,12 @@ class DiscordAdapter(BasePlatformAdapter): channel_ids.add(parent_channel_id) require_mention = os.getenv("DISCORD_REQUIRE_MENTION", "true").lower() not in ("false", "0", "no") - is_free_channel = bool(channel_ids & free_channels) + # Voice-linked text channels act as free-response while voice is active. + # Only the exact bound channel gets the exemption, not sibling threads. + voice_linked_ids = {str(ch_id) for ch_id in self._voice_text_channels.values()} + current_channel_id = str(message.channel.id) + is_voice_linked_channel = current_channel_id in voice_linked_ids + is_free_channel = bool(channel_ids & free_channels) or is_voice_linked_channel # Skip the mention check if the message is in a thread where # the bot has previously participated (auto-created or replied in). @@ -2294,7 +2302,7 @@ class DiscordAdapter(BasePlatformAdapter): no_thread_channels = {ch.strip() for ch in no_thread_channels_raw.split(",") if ch.strip()} skip_thread = bool(channel_ids & no_thread_channels) auto_thread = os.getenv("DISCORD_AUTO_THREAD", "true").lower() in ("true", "1", "yes") - if auto_thread and not skip_thread: + if auto_thread and not skip_thread and not is_voice_linked_channel: thread = await self._auto_create_thread(message) if thread: is_thread = True diff --git a/gateway/platforms/feishu.py b/gateway/platforms/feishu.py index 16f5467b2..7fce74def 100644 --- a/gateway/platforms/feishu.py +++ b/gateway/platforms/feishu.py @@ -34,6 +34,9 @@ from datetime import datetime from pathlib import Path from types import SimpleNamespace from typing import Any, Dict, List, Optional +from urllib.error import HTTPError, URLError +from urllib.parse import urlencode +from urllib.request import Request, urlopen # aiohttp/websockets are independent optional deps โ€” import outside lark_oapi # so they remain available for tests and webhook mode even if lark_oapi is missing. @@ -169,6 +172,19 @@ _FEISHU_CARD_ACTION_DEDUP_TTL_SECONDS = 15 * 60 # card action token dedup win _FEISHU_BOT_MSG_TRACK_SIZE = 512 # LRU size for tracking sent message IDs _FEISHU_REPLY_FALLBACK_CODES = frozenset({230011, 231003}) # reply target withdrawn/missing โ†’ create fallback _FEISHU_ACK_EMOJI = "OK" + +# QR onboarding constants +_ONBOARD_ACCOUNTS_URLS = { + "feishu": "https://accounts.feishu.cn", + "lark": "https://accounts.larksuite.com", +} +_ONBOARD_OPEN_URLS = { + "feishu": "https://open.feishu.cn", + "lark": "https://open.larksuite.com", +} +_REGISTRATION_PATH = "/oauth/v1/app/registration" +_ONBOARD_REQUEST_TIMEOUT_S = 10 + # --------------------------------------------------------------------------- # Fallback display strings # --------------------------------------------------------------------------- @@ -3621,3 +3637,328 @@ class FeishuAdapter(BasePlatformAdapter): return _FEISHU_FILE_UPLOAD_TYPE, "file" return _FEISHU_FILE_UPLOAD_TYPE, "file" + + +# ============================================================================= +# QR scan-to-create onboarding +# +# Device-code flow: user scans a QR code with Feishu/Lark mobile app and the +# platform creates a fully configured bot application automatically. +# Called by `hermes gateway setup` via _setup_feishu() in hermes_cli/gateway.py. +# ============================================================================= + + +def _accounts_base_url(domain: str) -> str: + return _ONBOARD_ACCOUNTS_URLS.get(domain, _ONBOARD_ACCOUNTS_URLS["feishu"]) + + +def _onboard_open_base_url(domain: str) -> str: + return _ONBOARD_OPEN_URLS.get(domain, _ONBOARD_OPEN_URLS["feishu"]) + + +def _post_registration(base_url: str, body: Dict[str, str]) -> dict: + """POST form-encoded data to the registration endpoint, return parsed JSON. + + The registration endpoint returns JSON even on 4xx (e.g. poll returns + authorization_pending as a 400). We always parse the body regardless of + HTTP status. + """ + url = f"{base_url}{_REGISTRATION_PATH}" + data = urlencode(body).encode("utf-8") + req = Request(url, data=data, headers={"Content-Type": "application/x-www-form-urlencoded"}) + try: + with urlopen(req, timeout=_ONBOARD_REQUEST_TIMEOUT_S) as resp: + return json.loads(resp.read().decode("utf-8")) + except HTTPError as exc: + body_bytes = exc.read() + if body_bytes: + try: + return json.loads(body_bytes.decode("utf-8")) + except (ValueError, json.JSONDecodeError): + raise exc from None + raise + + +def _init_registration(domain: str = "feishu") -> None: + """Verify the environment supports client_secret auth. + + Raises RuntimeError if not supported. + """ + base_url = _accounts_base_url(domain) + res = _post_registration(base_url, {"action": "init"}) + methods = res.get("supported_auth_methods") or [] + if "client_secret" not in methods: + raise RuntimeError( + f"Feishu / Lark registration environment does not support client_secret auth. " + f"Supported: {methods}" + ) + + +def _begin_registration(domain: str = "feishu") -> dict: + """Start the device-code flow. Returns device_code, qr_url, user_code, interval, expire_in.""" + base_url = _accounts_base_url(domain) + res = _post_registration(base_url, { + "action": "begin", + "archetype": "PersonalAgent", + "auth_method": "client_secret", + "request_user_info": "open_id", + }) + device_code = res.get("device_code") + if not device_code: + raise RuntimeError("Feishu / Lark registration did not return a device_code") + qr_url = res.get("verification_uri_complete", "") + if "?" in qr_url: + qr_url += "&from=hermes&tp=hermes" + else: + qr_url += "?from=hermes&tp=hermes" + return { + "device_code": device_code, + "qr_url": qr_url, + "user_code": res.get("user_code", ""), + "interval": res.get("interval") or 5, + "expire_in": res.get("expire_in") or 600, + } + + +def _poll_registration( + *, + device_code: str, + interval: int, + expire_in: int, + domain: str = "feishu", +) -> Optional[dict]: + """Poll until the user scans the QR code, or timeout/denial. + + Returns dict with app_id, app_secret, domain, open_id on success. + Returns None on failure. + """ + deadline = time.time() + expire_in + current_domain = domain + domain_switched = False + poll_count = 0 + + while time.time() < deadline: + base_url = _accounts_base_url(current_domain) + try: + res = _post_registration(base_url, { + "action": "poll", + "device_code": device_code, + "tp": "ob_app", + }) + except (URLError, OSError, json.JSONDecodeError): + time.sleep(interval) + continue + + poll_count += 1 + if poll_count == 1: + print(" Fetching configuration results...", end="", flush=True) + elif poll_count % 6 == 0: + print(".", end="", flush=True) + + # Domain auto-detection + user_info = res.get("user_info") or {} + tenant_brand = user_info.get("tenant_brand") + if tenant_brand == "lark" and not domain_switched: + current_domain = "lark" + domain_switched = True + # Fall through โ€” server may return credentials in this same response. + + # Success + if res.get("client_id") and res.get("client_secret"): + if poll_count > 0: + print() # newline after "Fetching configuration results..." dots + return { + "app_id": res["client_id"], + "app_secret": res["client_secret"], + "domain": current_domain, + "open_id": user_info.get("open_id"), + } + + # Terminal errors + error = res.get("error", "") + if error in ("access_denied", "expired_token"): + if poll_count > 0: + print() + logger.warning("[Feishu onboard] Registration %s", error) + return None + + # authorization_pending or unknown โ€” keep polling + time.sleep(interval) + + if poll_count > 0: + print() + logger.warning("[Feishu onboard] Poll timed out after %ds", expire_in) + return None + + +try: + import qrcode as _qrcode_mod +except (ImportError, TypeError): + _qrcode_mod = None # type: ignore[assignment] + + +def _render_qr(url: str) -> bool: + """Try to render a QR code in the terminal. Returns True if successful.""" + if _qrcode_mod is None: + return False + try: + qr = _qrcode_mod.QRCode() + qr.add_data(url) + qr.make(fit=True) + qr.print_ascii(invert=True) + return True + except Exception: + return False + + +def probe_bot(app_id: str, app_secret: str, domain: str) -> Optional[dict]: + """Verify bot connectivity via /open-apis/bot/v3/info. + + Uses lark_oapi SDK when available, falls back to raw HTTP otherwise. + Returns {"bot_name": ..., "bot_open_id": ...} on success, None on failure. + """ + if FEISHU_AVAILABLE: + return _probe_bot_sdk(app_id, app_secret, domain) + return _probe_bot_http(app_id, app_secret, domain) + + +def _build_onboard_client(app_id: str, app_secret: str, domain: str) -> Any: + """Build a lark Client for the given credentials and domain.""" + sdk_domain = LARK_DOMAIN if domain == "lark" else FEISHU_DOMAIN + return ( + lark.Client.builder() + .app_id(app_id) + .app_secret(app_secret) + .domain(sdk_domain) + .log_level(lark.LogLevel.WARNING) + .build() + ) + + +def _parse_bot_response(data: dict) -> Optional[dict]: + """Extract bot_name and bot_open_id from a /bot/v3/info response.""" + if data.get("code") != 0: + return None + bot = data.get("bot") or data.get("data", {}).get("bot") or {} + return { + "bot_name": bot.get("bot_name"), + "bot_open_id": bot.get("open_id"), + } + + +def _probe_bot_sdk(app_id: str, app_secret: str, domain: str) -> Optional[dict]: + """Probe bot info using lark_oapi SDK.""" + try: + client = _build_onboard_client(app_id, app_secret, domain) + resp = client.request( + method="GET", + url="/open-apis/bot/v3/info", + body=None, + raw_response=True, + ) + return _parse_bot_response(json.loads(resp.content)) + except Exception as exc: + logger.debug("[Feishu onboard] SDK probe failed: %s", exc) + return None + + +def _probe_bot_http(app_id: str, app_secret: str, domain: str) -> Optional[dict]: + """Fallback probe using raw HTTP (when lark_oapi is not installed).""" + base_url = _onboard_open_base_url(domain) + try: + token_data = json.dumps({"app_id": app_id, "app_secret": app_secret}).encode("utf-8") + token_req = Request( + f"{base_url}/open-apis/auth/v3/tenant_access_token/internal", + data=token_data, + headers={"Content-Type": "application/json"}, + ) + with urlopen(token_req, timeout=_ONBOARD_REQUEST_TIMEOUT_S) as resp: + token_res = json.loads(resp.read().decode("utf-8")) + + access_token = token_res.get("tenant_access_token") + if not access_token: + return None + + bot_req = Request( + f"{base_url}/open-apis/bot/v3/info", + headers={ + "Authorization": f"Bearer {access_token}", + "Content-Type": "application/json", + }, + ) + with urlopen(bot_req, timeout=_ONBOARD_REQUEST_TIMEOUT_S) as resp: + bot_res = json.loads(resp.read().decode("utf-8")) + + return _parse_bot_response(bot_res) + except (URLError, OSError, KeyError, json.JSONDecodeError) as exc: + logger.debug("[Feishu onboard] HTTP probe failed: %s", exc) + return None + + +def qr_register( + *, + initial_domain: str = "feishu", + timeout_seconds: int = 600, +) -> Optional[dict]: + """Run the Feishu / Lark scan-to-create QR registration flow. + + Returns on success:: + + { + "app_id": str, + "app_secret": str, + "domain": "feishu" | "lark", + "open_id": str | None, + "bot_name": str | None, + "bot_open_id": str | None, + } + + Returns None on expected failures (network, auth denied, timeout). + Unexpected errors (bugs, protocol regressions) propagate to the caller. + """ + try: + return _qr_register_inner(initial_domain=initial_domain, timeout_seconds=timeout_seconds) + except (RuntimeError, URLError, OSError, json.JSONDecodeError) as exc: + logger.warning("[Feishu onboard] Registration failed: %s", exc) + return None + + +def _qr_register_inner( + *, + initial_domain: str, + timeout_seconds: int, +) -> Optional[dict]: + """Run init โ†’ begin โ†’ poll โ†’ probe. Raises on network/protocol errors.""" + print(" Connecting to Feishu / Lark...", end="", flush=True) + _init_registration(initial_domain) + begin = _begin_registration(initial_domain) + print(" done.") + + print() + qr_url = begin["qr_url"] + if _render_qr(qr_url): + print(f"\n Scan the QR code above, or open this URL directly:\n {qr_url}") + else: + print(f" Open this URL in Feishu / Lark on your phone:\n\n {qr_url}\n") + print(" Tip: pip install qrcode to display a scannable QR code here next time") + print() + + result = _poll_registration( + device_code=begin["device_code"], + interval=begin["interval"], + expire_in=min(begin["expire_in"], timeout_seconds), + domain=initial_domain, + ) + if not result: + return None + + # Probe bot โ€” best-effort, don't fail the registration + bot_info = probe_bot(result["app_id"], result["app_secret"], result["domain"]) + if bot_info: + result["bot_name"] = bot_info.get("bot_name") + result["bot_open_id"] = bot_info.get("bot_open_id") + else: + result["bot_name"] = None + result["bot_open_id"] = None + + return result diff --git a/gateway/platforms/matrix.py b/gateway/platforms/matrix.py index 9f3d6358c..654d77070 100644 --- a/gateway/platforms/matrix.py +++ b/gateway/platforms/matrix.py @@ -782,7 +782,7 @@ class MatrixAdapter(BasePlatformAdapter): # Try aiohttp first (always available), fall back to httpx try: import aiohttp as _aiohttp - async with _aiohttp.ClientSession() as http: + async with _aiohttp.ClientSession(trust_env=True) as http: async with http.get(image_url, timeout=_aiohttp.ClientTimeout(total=30)) as resp: resp.raise_for_status() data = await resp.read() @@ -1135,7 +1135,10 @@ class MatrixAdapter(BasePlatformAdapter): thread_id = relates_to.get("event_id") formatted_body = source_content.get("formatted_body") - is_mentioned = self._is_bot_mentioned(body, formatted_body) + # m.mentions.user_ids (MSC3952 / Matrix v1.7) โ€” authoritative mention signal. + mentions_block = source_content.get("m.mentions") or {} + mention_user_ids = mentions_block.get("user_ids") if isinstance(mentions_block, dict) else None + is_mentioned = self._is_bot_mentioned(body, formatted_body, mention_user_ids) # Require-mention gating. if not is_dm: @@ -1822,8 +1825,24 @@ class MatrixAdapter(BasePlatformAdapter): # Mention detection helpers # ------------------------------------------------------------------ - def _is_bot_mentioned(self, body: str, formatted_body: Optional[str] = None) -> bool: - """Return True if the bot is mentioned in the message.""" + def _is_bot_mentioned( + self, + body: str, + formatted_body: Optional[str] = None, + mention_user_ids: Optional[list] = None, + ) -> bool: + """Return True if the bot is mentioned in the message. + + Per MSC3952, ``m.mentions.user_ids`` is the authoritative mention + signal in the Matrix spec. When the sender's client populates that + field with the bot's user-id, we trust it โ€” even when the visible + body text does not contain an explicit ``@bot`` string (some clients + only render mention "pills" in ``formatted_body`` or use display + names). + """ + # m.mentions.user_ids โ€” authoritative per MSC3952 / Matrix v1.7. + if mention_user_ids and self._user_id and self._user_id in mention_user_ids: + return True if not body and not formatted_body: return False if self._user_id and self._user_id in body: diff --git a/gateway/platforms/telegram.py b/gateway/platforms/telegram.py index 265329602..439367b7d 100644 --- a/gateway/platforms/telegram.py +++ b/gateway/platforms/telegram.py @@ -65,7 +65,10 @@ from gateway.platforms.base import ( cache_image_from_bytes, cache_audio_from_bytes, cache_document_from_bytes, + resolve_proxy_url, SUPPORTED_DOCUMENT_TYPES, + utf16_len, + _prefix_within_utf16_limit, ) from gateway.platforms.telegram_network import ( TelegramFallbackTransport, @@ -537,10 +540,7 @@ class TelegramAdapter(BasePlatformAdapter): "write_timeout": _env_float("HERMES_TELEGRAM_HTTP_WRITE_TIMEOUT", 20.0), } - proxy_configured = any( - (os.getenv(k) or "").strip() - for k in ("HTTPS_PROXY", "HTTP_PROXY", "ALL_PROXY", "https_proxy", "http_proxy", "all_proxy") - ) + proxy_url = resolve_proxy_url() disable_fallback = (os.getenv("HERMES_TELEGRAM_DISABLE_FALLBACK_IPS", "").strip().lower() in ("1", "true", "yes", "on")) fallback_ips = self._fallback_ips() if not fallback_ips: @@ -551,7 +551,7 @@ class TelegramAdapter(BasePlatformAdapter): ", ".join(fallback_ips), ) - if fallback_ips and not proxy_configured and not disable_fallback: + if fallback_ips and not proxy_url and not disable_fallback: logger.info( "[%s] Telegram fallback IPs active: %s", self.name, @@ -567,10 +567,12 @@ class TelegramAdapter(BasePlatformAdapter): **request_kwargs, httpx_kwargs={"transport": TelegramFallbackTransport(fallback_ips)}, ) + elif proxy_url: + logger.info("[%s] Proxy detected; passing explicitly to HTTPXRequest: %s", self.name, proxy_url) + request = HTTPXRequest(**request_kwargs, proxy=proxy_url) + get_updates_request = HTTPXRequest(**request_kwargs, proxy=proxy_url) else: - if proxy_configured: - logger.info("[%s] Proxy configured; skipping Telegram fallback-IP transport", self.name) - elif disable_fallback: + if disable_fallback: logger.info("[%s] Telegram fallback-IP transport disabled via env", self.name) request = HTTPXRequest(**request_kwargs) get_updates_request = HTTPXRequest(**request_kwargs) @@ -799,7 +801,9 @@ class TelegramAdapter(BasePlatformAdapter): try: # Format and split message if needed formatted = self.format_message(content) - chunks = self.truncate_message(formatted, self.MAX_MESSAGE_LENGTH) + chunks = self.truncate_message( + formatted, self.MAX_MESSAGE_LENGTH, len_fn=utf16_len, + ) if len(chunks) > 1: # truncate_message appends a raw " (1/2)" suffix. Escape the # MarkdownV2-special parentheses so Telegram doesn't reject the @@ -970,7 +974,9 @@ class TelegramAdapter(BasePlatformAdapter): # streaming). Truncate and succeed so the stream consumer can # split the overflow into a new message instead of dying. if "message_too_long" in err_str or "too long" in err_str: - truncated = content[: self.MAX_MESSAGE_LENGTH - 20] + "โ€ฆ" + truncated = _prefix_within_utf16_limit( + content, self.MAX_MESSAGE_LENGTH - 20 + ) + "โ€ฆ" try: await self._bot.edit_message_text( chat_id=int(chat_id), diff --git a/gateway/platforms/wecom.py b/gateway/platforms/wecom.py index a0e71e01b..0249ae675 100644 --- a/gateway/platforms/wecom.py +++ b/gateway/platforms/wecom.py @@ -266,7 +266,7 @@ class WeComAdapter(BasePlatformAdapter): async def _open_connection(self) -> None: """Open and authenticate a websocket connection.""" await self._cleanup_ws() - self._session = aiohttp.ClientSession() + self._session = aiohttp.ClientSession(trust_env=True) self._ws = await self._session.ws_connect( self._ws_url, heartbeat=HEARTBEAT_INTERVAL_SECONDS * 2, diff --git a/gateway/platforms/weixin.py b/gateway/platforms/weixin.py index dc4e7cf96..e5859e41a 100644 --- a/gateway/platforms/weixin.py +++ b/gateway/platforms/weixin.py @@ -112,6 +112,7 @@ TYPING_STOP = 2 _HEADER_RE = re.compile(r"^(#{1,6})\s+(.+?)\s*$") _TABLE_RULE_RE = re.compile(r"^\s*\|?(?:\s*:?-{3,}:?\s*\|)+\s*:?-{3,}:?\s*\|?\s*$") _FENCE_RE = re.compile(r"^```([^\n`]*)\s*$") +_MARKDOWN_LINK_RE = re.compile(r"\[([^\]]+)\]\(([^)]+)\)") def check_weixin_requirements() -> bool: @@ -398,15 +399,16 @@ async def _send_message( context_token: Optional[str], client_id: str, ) -> None: + if not text or not text.strip(): + raise ValueError("_send_message: text must not be empty") message: Dict[str, Any] = { "from_user_id": "", "to_user_id": to, "client_id": client_id, "message_type": MSG_TYPE_BOT, "message_state": MSG_STATE_FINISH, + "item_list": [{"type": ITEM_TEXT, "text_item": {"text": text}}], } - if text: - message["item_list"] = [{"type": ITEM_TEXT, "text_item": {"text": text}}] if context_token: message["context_token"] = context_token await _api_post( @@ -499,13 +501,15 @@ async def _upload_ciphertext( session: "aiohttp.ClientSession", *, ciphertext: bytes, - cdn_base_url: str, - upload_param: str, - filekey: str, + upload_url: str, ) -> str: - url = _cdn_upload_url(cdn_base_url, upload_param, filekey) + """Upload encrypted media to the CDN. + + Accepts either a constructed CDN URL (from upload_param) or a direct + upload_full_url โ€” both use POST with the raw ciphertext as the body. + """ timeout = aiohttp.ClientTimeout(total=120) - async with session.post(url, data=ciphertext, headers={"Content-Type": "application/octet-stream"}, timeout=timeout) as response: + async with session.post(upload_url, data=ciphertext, headers={"Content-Type": "application/octet-stream"}, timeout=timeout) as response: if response.status == 200: encrypted_param = response.headers.get("x-encrypted-param") if encrypted_param: @@ -649,7 +653,7 @@ def _normalize_markdown_blocks(content: str) -> str: result.append(_rewrite_table_block_for_weixin(table_lines)) continue - result.append(_rewrite_headers_for_weixin(line)) + result.append(_MARKDOWN_LINK_RE.sub(r"\1 (\2)", _rewrite_headers_for_weixin(line))) i += 1 normalized = "\n".join(item.rstrip() for item in result) @@ -811,6 +815,8 @@ def _split_text_for_weixin_delivery( ``platforms.weixin.extra.split_multiline_messages`` (``true`` / ``false``) or the env var ``WEIXIN_SPLIT_MULTILINE_MESSAGES``. """ + if not content: + return [] if split_per_line: # Legacy: one message per top-level delivery unit. if len(content) <= max_length and "\n" not in content: @@ -821,14 +827,14 @@ def _split_text_for_weixin_delivery( chunks.append(unit) continue chunks.extend(_pack_markdown_blocks_for_weixin(unit, max_length)) - return chunks or [content] + return [c for c in chunks if c] or [content] # Compact (default): single message when under the limit โ€” unless the # content looks like a short chatty exchange, in which case split into # separate bubbles for a more natural chat feel. if len(content) <= max_length: return ( - _split_delivery_units_for_weixin(content) + [u for u in _split_delivery_units_for_weixin(content) if u] if _should_split_short_chat_block_for_weixin(content) else [content] ) @@ -929,7 +935,7 @@ async def qr_login( if not AIOHTTP_AVAILABLE: raise RuntimeError("aiohttp is required for Weixin QR login") - async with aiohttp.ClientSession() as session: + async with aiohttp.ClientSession(trust_env=True) as session: try: qr_resp = await _api_get( session, @@ -1042,6 +1048,10 @@ class WeixinAdapter(BasePlatformAdapter): MAX_MESSAGE_LENGTH = 4000 + # WeChat does not support editing sent messages โ€” streaming must use the + # fallback "send-final-only" path so the cursor (โ–‰) is never left visible. + SUPPORTS_MESSAGE_EDITING = False + def __init__(self, config: PlatformConfig): super().__init__(config, Platform.WEIXIN) extra = config.extra or {} @@ -1124,7 +1134,7 @@ class WeixinAdapter(BasePlatformAdapter): except Exception as exc: logger.debug("[%s] Token lock unavailable (non-fatal): %s", self.name, exc) - self._session = aiohttp.ClientSession() + self._session = aiohttp.ClientSession(trust_env=True) self._token_store.restore(self._account_id) self._poll_task = asyncio.create_task(self._poll_loop(), name="weixin-poll") self._mark_connected() @@ -1451,7 +1461,7 @@ class WeixinAdapter(BasePlatformAdapter): context_token = self._token_store.get(self._account_id, chat_id) last_message_id: Optional[str] = None try: - chunks = self._split_text(self.format_message(content)) + chunks = [c for c in self._split_text(self.format_message(content)) if c and c.strip()] for idx, chunk in enumerate(chunks): client_id = f"hermes-weixin-{uuid.uuid4().hex}" await self._send_text_chunk( @@ -1537,24 +1547,51 @@ class WeixinAdapter(BasePlatformAdapter): reply_to: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None, ) -> SendResult: - return await self.send_document(chat_id, path, caption=caption, metadata=metadata) + return await self.send_document(chat_id, file_path=path, caption=caption, metadata=metadata) async def send_document( self, chat_id: str, - path: str, + file_path: str, caption: str = "", metadata: Optional[Dict[str, Any]] = None, ) -> SendResult: if not self._session or not self._token: return SendResult(success=False, error="Not connected") try: - message_id = await self._send_file(chat_id, path, caption) + message_id = await self._send_file(chat_id, file_path, caption) return SendResult(success=True, message_id=message_id) except Exception as exc: logger.error("[%s] send_document failed to=%s: %s", self.name, _safe_id(chat_id), exc) return SendResult(success=False, error=str(exc)) + async def send_video( + self, + chat_id: str, + video_path: str, + caption: Optional[str] = None, + reply_to: Optional[str] = None, + metadata: Optional[Dict[str, Any]] = None, + ) -> SendResult: + if not self._session or not self._token: + return SendResult(success=False, error="Not connected") + try: + message_id = await self._send_file(chat_id, video_path, caption or "") + return SendResult(success=True, message_id=message_id) + except Exception as exc: + logger.error("[%s] send_video failed to=%s: %s", self.name, _safe_id(chat_id), exc) + return SendResult(success=False, error=str(exc)) + + async def send_voice( + self, + chat_id: str, + audio_path: str, + caption: Optional[str] = None, + reply_to: Optional[str] = None, + metadata: Optional[Dict[str, Any]] = None, + ) -> SendResult: + return await self.send_document(chat_id, audio_path, caption=caption or "", metadata=metadata) + async def _download_remote_media(self, url: str) -> str: from tools.url_safety import is_safe_url @@ -1577,6 +1614,7 @@ class WeixinAdapter(BasePlatformAdapter): filekey = secrets.token_hex(16) aes_key = secrets.token_bytes(16) rawsize = len(plaintext) + rawfilemd5 = hashlib.md5(plaintext).hexdigest() upload_response = await _get_upload_url( self._session, base_url=self._base_url, @@ -1585,41 +1623,42 @@ class WeixinAdapter(BasePlatformAdapter): media_type=media_type, filekey=filekey, rawsize=rawsize, - rawfilemd5=hashlib.md5(plaintext).hexdigest(), + rawfilemd5=rawfilemd5, filesize=_aes_padded_size(rawsize), aeskey_hex=aes_key.hex(), ) upload_param = str(upload_response.get("upload_param") or "") upload_full_url = str(upload_response.get("upload_full_url") or "") ciphertext = _aes128_ecb_encrypt(plaintext, aes_key) - if upload_param: - encrypted_query_param = await _upload_ciphertext( - self._session, - ciphertext=ciphertext, - cdn_base_url=self._cdn_base_url, - upload_param=upload_param, - filekey=filekey, - ) - elif upload_full_url: - timeout = aiohttp.ClientTimeout(total=120) - async with self._session.put( - upload_full_url, - data=ciphertext, - headers={"Content-Type": "application/octet-stream"}, - timeout=timeout, - ) as response: - response.raise_for_status() - encrypted_query_param = response.headers.get("x-encrypted-param") or filekey + + # Prefer upload_full_url (direct CDN), fall back to constructed CDN URL + # from upload_param. Both paths use POST โ€” the old PUT for + # upload_full_url caused 404s on the WeChat CDN. + if upload_full_url: + upload_url = upload_full_url + elif upload_param: + upload_url = _cdn_upload_url(self._cdn_base_url, upload_param, filekey) else: raise RuntimeError(f"getUploadUrl returned neither upload_param nor upload_full_url: {upload_response}") + encrypted_query_param = await _upload_ciphertext( + self._session, + ciphertext=ciphertext, + upload_url=upload_url, + ) + context_token = self._token_store.get(self._account_id, chat_id) + # The iLink API expects aes_key as base64(hex_string), not base64(raw_bytes). + # Sending base64(raw_bytes) causes images to show as grey boxes on the + # receiver side because the decryption key doesn't match. + aes_key_for_api = base64.b64encode(aes_key.hex().encode("ascii")).decode("ascii") media_item = item_builder( encrypt_query_param=encrypted_query_param, - aes_key_b64=base64.b64encode(aes_key).decode("ascii"), + aes_key_for_api=aes_key_for_api, ciphertext_size=len(ciphertext), plaintext_size=rawsize, filename=Path(path).name, + rawfilemd5=rawfilemd5, ) last_message_id = None @@ -1659,39 +1698,53 @@ class WeixinAdapter(BasePlatformAdapter): def _outbound_media_builder(self, path: str): mime = mimetypes.guess_type(path)[0] or "application/octet-stream" if mime.startswith("image/"): - return MEDIA_IMAGE, lambda **kwargs: { + return MEDIA_IMAGE, lambda **kw: { "type": ITEM_IMAGE, "image_item": { "media": { - "encrypt_query_param": kwargs["encrypt_query_param"], - "aes_key": kwargs["aes_key_b64"], + "encrypt_query_param": kw["encrypt_query_param"], + "aes_key": kw["aes_key_for_api"], "encrypt_type": 1, }, - "mid_size": kwargs["ciphertext_size"], + "mid_size": kw["ciphertext_size"], }, } if mime.startswith("video/"): - return MEDIA_VIDEO, lambda **kwargs: { + return MEDIA_VIDEO, lambda **kw: { "type": ITEM_VIDEO, "video_item": { "media": { - "encrypt_query_param": kwargs["encrypt_query_param"], - "aes_key": kwargs["aes_key_b64"], + "encrypt_query_param": kw["encrypt_query_param"], + "aes_key": kw["aes_key_for_api"], "encrypt_type": 1, }, - "video_size": kwargs["ciphertext_size"], + "video_size": kw["ciphertext_size"], + "play_length": kw.get("play_length", 0), + "video_md5": kw.get("rawfilemd5", ""), }, } - return MEDIA_FILE, lambda **kwargs: { + if mime.startswith("audio/") or path.endswith(".silk"): + return MEDIA_VOICE, lambda **kw: { + "type": ITEM_VOICE, + "voice_item": { + "media": { + "encrypt_query_param": kw["encrypt_query_param"], + "aes_key": kw["aes_key_for_api"], + "encrypt_type": 1, + }, + "playtime": kw.get("playtime", 0), + }, + } + return MEDIA_FILE, lambda **kw: { "type": ITEM_FILE, "file_item": { "media": { - "encrypt_query_param": kwargs["encrypt_query_param"], - "aes_key": kwargs["aes_key_b64"], + "encrypt_query_param": kw["encrypt_query_param"], + "aes_key": kw["aes_key_for_api"], "encrypt_type": 1, }, - "file_name": kwargs["filename"], - "len": str(kwargs["plaintext_size"]), + "file_name": kw["filename"], + "len": str(kw["plaintext_size"]), }, } @@ -1731,7 +1784,7 @@ async def send_weixin_direct( token_store.restore(account_id) context_token = token_store.get(account_id, chat_id) - async with aiohttp.ClientSession() as session: + async with aiohttp.ClientSession(trust_env=True) as session: adapter = WeixinAdapter( PlatformConfig( enabled=True, diff --git a/gateway/platforms/whatsapp.py b/gateway/platforms/whatsapp.py index c616f7244..d1de5b856 100644 --- a/gateway/platforms/whatsapp.py +++ b/gateway/platforms/whatsapp.py @@ -120,8 +120,9 @@ class WhatsAppAdapter(BasePlatformAdapter): - session_path: Path to store WhatsApp session data """ - # WhatsApp message limits - MAX_MESSAGE_LENGTH = 65536 # WhatsApp allows longer messages + # WhatsApp message limits โ€” practical UX limit, not protocol max. + # WhatsApp allows ~65K but long messages are unreadable on mobile. + MAX_MESSAGE_LENGTH = 4096 # Default bridge location relative to the hermes-agent install _DEFAULT_BRIDGE_DIR = Path(__file__).resolve().parents[2] / "scripts" / "whatsapp-bridge" @@ -531,6 +532,63 @@ class WhatsAppAdapter(BasePlatformAdapter): self._close_bridge_log() print(f"[{self.name}] Disconnected") + def format_message(self, content: str) -> str: + """Convert standard markdown to WhatsApp-compatible formatting. + + WhatsApp supports: *bold*, _italic_, ~strikethrough~, ```code```, + and monospaced `inline`. Standard markdown uses different syntax + for bold/italic/strikethrough, so we convert here. + + Code blocks (``` fenced) and inline code (`) are protected from + conversion via placeholder substitution. + """ + if not content: + return content + + # --- 1. Protect fenced code blocks from formatting changes --- + _FENCE_PH = "\x00FENCE" + fences: list[str] = [] + + def _save_fence(m: re.Match) -> str: + fences.append(m.group(0)) + return f"{_FENCE_PH}{len(fences) - 1}\x00" + + result = re.sub(r"```[\s\S]*?```", _save_fence, content) + + # --- 2. Protect inline code --- + _CODE_PH = "\x00CODE" + codes: list[str] = [] + + def _save_code(m: re.Match) -> str: + codes.append(m.group(0)) + return f"{_CODE_PH}{len(codes) - 1}\x00" + + result = re.sub(r"`[^`\n]+`", _save_code, result) + + # --- 3. Convert markdown formatting to WhatsApp syntax --- + # Bold: **text** or __text__ โ†’ *text* + result = re.sub(r"\*\*(.+?)\*\*", r"*\1*", result) + result = re.sub(r"__(.+?)__", r"*\1*", result) + # Strikethrough: ~~text~~ โ†’ ~text~ + result = re.sub(r"~~(.+?)~~", r"~\1~", result) + # Italic: *text* is already WhatsApp italic โ€” leave as-is + # _text_ is already WhatsApp italic โ€” leave as-is + + # --- 4. Convert markdown headers to bold text --- + # # Header โ†’ *Header* + result = re.sub(r"^#{1,6}\s+(.+)$", r"*\1*", result, flags=re.MULTILINE) + + # --- 5. Convert markdown links: [text](url) โ†’ text (url) --- + result = re.sub(r"\[([^\]]+)\]\(([^)]+)\)", r"\1 (\2)", result) + + # --- 6. Restore protected sections --- + for i, fence in enumerate(fences): + result = result.replace(f"{_FENCE_PH}{i}\x00", fence) + for i, code in enumerate(codes): + result = result.replace(f"{_CODE_PH}{i}\x00", code) + + return result + async def send( self, chat_id: str, @@ -538,38 +596,57 @@ class WhatsAppAdapter(BasePlatformAdapter): reply_to: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None ) -> SendResult: - """Send a message via the WhatsApp bridge.""" + """Send a message via the WhatsApp bridge. + + Formats markdown for WhatsApp, splits long messages into chunks + that preserve code block boundaries, and sends each chunk sequentially. + """ if not self._running or not self._http_session: return SendResult(success=False, error="Not connected") bridge_exit = await self._check_managed_bridge_exit() if bridge_exit: return SendResult(success=False, error=bridge_exit) - + + if not content or not content.strip(): + return SendResult(success=True, message_id=None) + try: import aiohttp - payload = { - "chatId": chat_id, - "message": content, - } - if reply_to: - payload["replyTo"] = reply_to - - async with self._http_session.post( - f"http://127.0.0.1:{self._bridge_port}/send", - json=payload, - timeout=aiohttp.ClientTimeout(total=30) - ) as resp: - if resp.status == 200: - data = await resp.json() - return SendResult( - success=True, - message_id=data.get("messageId"), - raw_response=data - ) - else: - error = await resp.text() - return SendResult(success=False, error=error) + # Format and chunk the message + formatted = self.format_message(content) + chunks = self.truncate_message(formatted, self.MAX_MESSAGE_LENGTH) + + last_message_id = None + for chunk in chunks: + payload: Dict[str, Any] = { + "chatId": chat_id, + "message": chunk, + } + if reply_to and last_message_id is None: + # Only reply-to on the first chunk + payload["replyTo"] = reply_to + + async with self._http_session.post( + f"http://127.0.0.1:{self._bridge_port}/send", + json=payload, + timeout=aiohttp.ClientTimeout(total=30) + ) as resp: + if resp.status == 200: + data = await resp.json() + last_message_id = data.get("messageId") + else: + error = await resp.text() + return SendResult(success=False, error=error) + + # Small delay between chunks to avoid rate limiting + if len(chunks) > 1: + await asyncio.sleep(0.3) + + return SendResult( + success=True, + message_id=last_message_id, + ) except Exception as e: return SendResult(success=False, error=str(e)) diff --git a/gateway/run.py b/gateway/run.py index fbc878414..93d058162 100644 --- a/gateway/run.py +++ b/gateway/run.py @@ -186,6 +186,8 @@ if _config_path.exists(): os.environ["HERMES_AGENT_TIMEOUT"] = str(_agent_cfg["gateway_timeout"]) if "gateway_timeout_warning" in _agent_cfg and "HERMES_AGENT_TIMEOUT_WARNING" not in os.environ: os.environ["HERMES_AGENT_TIMEOUT_WARNING"] = str(_agent_cfg["gateway_timeout_warning"]) + if "gateway_notify_interval" in _agent_cfg and "HERMES_AGENT_NOTIFY_INTERVAL" not in os.environ: + os.environ["HERMES_AGENT_NOTIFY_INTERVAL"] = str(_agent_cfg["gateway_notify_interval"]) if "restart_drain_timeout" in _agent_cfg and "HERMES_RESTART_DRAIN_TIMEOUT" not in os.environ: os.environ["HERMES_RESTART_DRAIN_TIMEOUT"] = str(_agent_cfg["restart_drain_timeout"]) _display_cfg = _cfg.get("display", {}) @@ -1715,6 +1717,9 @@ class GatewayRunner: ): self._schedule_update_notification_watch() + # Notify the chat that initiated /restart that the gateway is back. + await self._send_restart_notification() + # Drain any recovered process watchers (from crash recovery checkpoint) try: from tools.process_registry import process_registry @@ -2541,11 +2546,8 @@ class GatewayRunner: self._pending_messages.pop(_quick_key, None) if _quick_key in self._running_agents: del self._running_agents[_quick_key] - # Mark session suspended so the next message starts fresh - # instead of resuming the stuck context (#7536). - self.session_store.suspend_session(_quick_key) - logger.info("HARD STOP for session %s โ€” suspended, session lock released", _quick_key[:20]) - return "โšก Force-stopped. The session is suspended โ€” your next message will start fresh." + logger.info("STOP for session %s โ€” agent interrupted, session lock released", _quick_key[:20]) + return "โšก Stopped. You can continue this session." # /reset and /new must bypass the running-agent guard so they # actually dispatch as commands instead of being queued as user @@ -2762,6 +2764,9 @@ class GatewayRunner: if canonical == "update": return await self._handle_update_command(event) + if canonical == "debug": + return await self._handle_debug_command(event) + if canonical == "title": return await self._handle_title_command(event) @@ -3329,21 +3334,26 @@ class GatewayRunner: # Must run after runtime resolution so _hyg_base_url is set. if _hyg_config_context_length is None and _hyg_base_url: try: - _hyg_custom_providers = _hyg_data.get("custom_providers") - if isinstance(_hyg_custom_providers, list): - for _cp in _hyg_custom_providers: - if not isinstance(_cp, dict): - continue - _cp_url = (_cp.get("base_url") or "").rstrip("/") - if _cp_url and _cp_url == _hyg_base_url.rstrip("/"): - _cp_models = _cp.get("models", {}) - if isinstance(_cp_models, dict): - _cp_model_cfg = _cp_models.get(_hyg_model, {}) - if isinstance(_cp_model_cfg, dict): - _cp_ctx = _cp_model_cfg.get("context_length") - if _cp_ctx is not None: - _hyg_config_context_length = int(_cp_ctx) - break + try: + from hermes_cli.config import get_compatible_custom_providers as _gw_gcp + _hyg_custom_providers = _gw_gcp(_hyg_data) + except Exception: + _hyg_custom_providers = _hyg_data.get("custom_providers") + if not isinstance(_hyg_custom_providers, list): + _hyg_custom_providers = [] + for _cp in _hyg_custom_providers: + if not isinstance(_cp, dict): + continue + _cp_url = (_cp.get("base_url") or "").rstrip("/") + if _cp_url and _cp_url == _hyg_base_url.rstrip("/"): + _cp_models = _cp.get("models", {}) + if isinstance(_cp_models, dict): + _cp_model_cfg = _cp_models.get(_hyg_model, {}) + if isinstance(_cp_model_cfg, dict): + _cp_ctx = _cp_model_cfg.get("context_length") + if _cp_ctx is not None: + _hyg_config_context_length = int(_cp_ctx) + break except (TypeError, ValueError): pass except Exception: @@ -4204,9 +4214,7 @@ class GatewayRunner: only through normal command dispatch (no running agent) or as a fallback. Force-clean the session lock in all cases for safety. - When there IS a running/pending agent, the session is also marked - as *suspended* so the next message starts a fresh session instead - of resuming the stuck context (#7536). + The session is preserved so the user can continue the conversation. """ source = event.source session_entry = self.session_store.get_or_create_session(source) @@ -4217,17 +4225,15 @@ class GatewayRunner: # Force-clean the sentinel so the session is unlocked. if session_key in self._running_agents: del self._running_agents[session_key] - self.session_store.suspend_session(session_key) - logger.info("HARD STOP (pending) for session %s โ€” suspended, sentinel cleared", session_key[:20]) - return "โšก Force-stopped. The agent was still starting โ€” your next message will start fresh." + logger.info("STOP (pending) for session %s โ€” sentinel cleared", session_key[:20]) + return "โšก Stopped. The agent hadn't started yet โ€” you can continue this session." if agent: agent.interrupt("Stop requested") # Force-clean the session lock so a truly hung agent doesn't # keep it locked forever. if session_key in self._running_agents: del self._running_agents[session_key] - self.session_store.suspend_session(session_key) - return "โšก Force-stopped. Your next message will start a fresh session." + return "โšก Stopped. You can continue this session." else: return "No active task to stop." @@ -4239,11 +4245,36 @@ class GatewayRunner: return f"โณ Draining {count} active agent(s) before restart..." return "โณ Gateway restart already in progress..." + # Save the requester's routing info so the new gateway process can + # notify them once it comes back online. + try: + import json as _json + notify_data = { + "platform": event.source.platform.value if event.source.platform else None, + "chat_id": event.source.chat_id, + } + if event.source.thread_id: + notify_data["thread_id"] = event.source.thread_id + (_hermes_home / ".restart_notify.json").write_text( + _json.dumps(notify_data) + ) + except Exception as e: + logger.debug("Failed to write restart notify file: %s", e) + active_agents = self._running_agent_count() - self.request_restart(detached=True, via_service=False) + # When running under a service manager (systemd/launchd), use the + # service restart path: exit with code 75 so the service manager + # restarts us. The detached subprocess approach (setsid + bash) + # doesn't work under systemd because KillMode=mixed kills all + # processes in the cgroup, including the detached helper. + _under_service = bool(os.environ.get("INVOCATION_ID")) # systemd sets this + if _under_service: + self.request_restart(detached=False, via_service=True) + else: + self.request_restart(detached=True, via_service=False) if active_agents: return f"โณ Draining {active_agents} active agent(s) before restart..." - return "โ™ป Restarting gateway..." + return "โ™ป Restarting gateway. If you aren't notified within 60 seconds, restart from the console with `hermes gateway restart`." async def _handle_help_command(self, event: MessageEvent) -> str: """Handle /help command - list available commands.""" @@ -4360,7 +4391,11 @@ class GatewayRunner: current_provider = model_cfg.get("provider", current_provider) current_base_url = model_cfg.get("base_url", "") user_provs = cfg.get("providers") - custom_provs = cfg.get("custom_providers") + try: + from hermes_cli.config import get_compatible_custom_providers + custom_provs = get_compatible_custom_providers(cfg) + except Exception: + custom_provs = cfg.get("custom_providers") except Exception: pass @@ -4991,6 +5026,8 @@ class GatewayRunner: if success: adapter._voice_text_channels[guild_id] = int(event.source.chat_id) + if hasattr(adapter, "_voice_sources"): + adapter._voice_sources[guild_id] = event.source.to_dict() self._voice_mode[event.source.chat_id] = "all" self._save_voice_modes() self._set_adapter_auto_tts_disabled(adapter, event.source.chat_id, disabled=False) @@ -5051,14 +5088,23 @@ class GatewayRunner: if not text_ch_id: return + # Build source โ€” reuse the linked text channel's metadata when available + # so voice input shares the same session as the bound text conversation. + source_data = getattr(adapter, "_voice_sources", {}).get(guild_id) + if source_data: + source = SessionSource.from_dict(source_data) + source.user_id = str(user_id) + source.user_name = str(user_id) + else: + source = SessionSource( + platform=Platform.DISCORD, + chat_id=str(text_ch_id), + user_id=str(user_id), + user_name=str(user_id), + chat_type="channel", + ) + # Check authorization before processing voice input - source = SessionSource( - platform=Platform.DISCORD, - chat_id=str(text_ch_id), - user_id=str(user_id), - user_name=str(user_id), - chat_type="channel", - ) if not self._is_user_authorized(source): logger.debug("Unauthorized voice input from user %d, ignoring", user_id) return @@ -6523,6 +6569,61 @@ class GatewayRunner: Platform.FEISHU, Platform.WECOM, Platform.WECOM_CALLBACK, Platform.WEIXIN, Platform.BLUEBUBBLES, Platform.LOCAL, }) + async def _handle_debug_command(self, event: MessageEvent) -> str: + """Handle /debug โ€” upload debug report + logs and return paste URLs.""" + import asyncio + from hermes_cli.debug import ( + _capture_dump, collect_debug_report, _read_full_log, + upload_to_pastebin, + ) + + loop = asyncio.get_running_loop() + + # Run blocking I/O (dump capture, log reads, uploads) in a thread. + def _collect_and_upload(): + dump_text = _capture_dump() + report = collect_debug_report(log_lines=200, dump_text=dump_text) + agent_log = _read_full_log("agent") + gateway_log = _read_full_log("gateway") + + if agent_log: + agent_log = dump_text + "\n\n--- full agent.log ---\n" + agent_log + if gateway_log: + gateway_log = dump_text + "\n\n--- full gateway.log ---\n" + gateway_log + + urls = {} + failures = [] + + try: + urls["Report"] = upload_to_pastebin(report) + except Exception as exc: + return f"โœ— Failed to upload debug report: {exc}" + + if agent_log: + try: + urls["agent.log"] = upload_to_pastebin(agent_log) + except Exception: + failures.append("agent.log") + + if gateway_log: + try: + urls["gateway.log"] = upload_to_pastebin(gateway_log) + except Exception: + failures.append("gateway.log") + + lines = ["**Debug report uploaded:**", ""] + label_width = max(len(k) for k in urls) + for label, url in urls.items(): + lines.append(f"`{label:<{label_width}}` {url}") + + if failures: + lines.append(f"\n_(failed to upload: {', '.join(failures)})_") + + lines.append("\nShare these links with the Hermes team for support.") + return "\n".join(lines) + + return await loop.run_in_executor(None, _collect_and_upload) + async def _handle_update_command(self, event: MessageEvent) -> str: """Handle /update command โ€” update Hermes Agent to the latest version. @@ -6917,6 +7018,48 @@ class GatewayRunner: return True + async def _send_restart_notification(self) -> None: + """Notify the chat that initiated /restart that the gateway is back.""" + import json as _json + + notify_path = _hermes_home / ".restart_notify.json" + if not notify_path.exists(): + return + + try: + data = _json.loads(notify_path.read_text()) + platform_str = data.get("platform") + chat_id = data.get("chat_id") + thread_id = data.get("thread_id") + + if not platform_str or not chat_id: + return + + platform = Platform(platform_str) + adapter = self.adapters.get(platform) + if not adapter: + logger.debug( + "Restart notification skipped: %s adapter not connected", + platform_str, + ) + return + + metadata = {"thread_id": thread_id} if thread_id else None + await adapter.send( + chat_id, + "โ™ป Gateway restarted successfully. Your session continues.", + metadata=metadata, + ) + logger.info( + "Sent restart notification to %s:%s", + platform_str, + chat_id, + ) + except Exception as e: + logger.warning("Restart notification failed: %s", e) + finally: + notify_path.unlink(missing_ok=True) + def _set_session_env(self, context: SessionContext) -> list: """Set session context variables for the current async task. @@ -7448,9 +7591,11 @@ class GatewayRunner: _pl = get_tool_preview_max_len() import json as _json args_str = _json.dumps(args, ensure_ascii=False, default=str) - _cap = _pl if _pl > 0 else 200 - if len(args_str) > _cap: - args_str = args_str[:_cap - 3] + "..." + # When tool_preview_length is 0 (default), don't truncate + # in verbose mode โ€” the user explicitly asked for full + # detail. Platform message-length limits handle the rest. + if _pl > 0 and len(args_str) > _pl: + args_str = args_str[:_pl - 3] + "..." msg = f"{emoji} {tool_name}({list(args.keys())})\n{args_str}" elif preview: msg = f"{emoji} {tool_name}: \"{preview}\"" @@ -7760,10 +7905,18 @@ class GatewayRunner: from gateway.stream_consumer import GatewayStreamConsumer, StreamConsumerConfig _adapter = self.adapters.get(source.platform) if _adapter: + # Platforms that don't support editing sent messages + # (e.g. WeChat) must not show a cursor in intermediate + # sends โ€” the cursor would be permanently visible because + # it can never be edited away. Use an empty cursor for + # such platforms so streaming still delivers the final + # response, just without the typing indicator. + _adapter_supports_edit = getattr(_adapter, "SUPPORTS_MESSAGE_EDITING", True) + _effective_cursor = _scfg.cursor if _adapter_supports_edit else "" _consumer_cfg = StreamConsumerConfig( edit_interval=_scfg.edit_interval, buffer_threshold=_scfg.buffer_threshold, - cursor=_scfg.cursor, + cursor=_effective_cursor, ) _stream_consumer = GatewayStreamConsumer( adapter=_adapter, @@ -8243,11 +8396,17 @@ class GatewayRunner: interrupt_monitor = asyncio.create_task(monitor_for_interrupt()) # Periodic "still working" notifications for long-running tasks. - # Fires every 10 minutes so the user knows the agent hasn't died. - _NOTIFY_INTERVAL = 600 # 10 minutes + # Fires every N seconds so the user knows the agent hasn't died. + # Config: agent.gateway_notify_interval in config.yaml, or + # HERMES_AGENT_NOTIFY_INTERVAL env var. Default 600s (10 min). + # 0 = disable notifications. + _NOTIFY_INTERVAL_RAW = float(os.getenv("HERMES_AGENT_NOTIFY_INTERVAL", 600)) + _NOTIFY_INTERVAL = _NOTIFY_INTERVAL_RAW if _NOTIFY_INTERVAL_RAW > 0 else None _notify_start = time.time() async def _notify_long_running(): + if _NOTIFY_INTERVAL is None: + return # Notifications disabled (gateway_notify_interval: 0) _notify_adapter = self.adapters.get(source.platform) if not _notify_adapter: return @@ -8842,16 +9001,19 @@ async def start_gateway(config: Optional[GatewayConfig] = None, replace: bool = runner.request_restart(detached=False, via_service=True) loop = asyncio.get_event_loop() - for sig in (signal.SIGINT, signal.SIGTERM): - try: - loop.add_signal_handler(sig, shutdown_signal_handler) - except NotImplementedError: - pass - if hasattr(signal, "SIGUSR1"): - try: - loop.add_signal_handler(signal.SIGUSR1, restart_signal_handler) - except NotImplementedError: - pass + if threading.current_thread() is threading.main_thread(): + for sig in (signal.SIGINT, signal.SIGTERM): + try: + loop.add_signal_handler(sig, shutdown_signal_handler) + except NotImplementedError: + pass + if hasattr(signal, "SIGUSR1"): + try: + loop.add_signal_handler(signal.SIGUSR1, restart_signal_handler) + except NotImplementedError: + pass + else: + logger.info("Skipping signal handlers (not running in main thread).") # Start the gateway success = await runner.start() diff --git a/gateway/session.py b/gateway/session.py index a11ade898..62beeffa8 100644 --- a/gateway/session.py +++ b/gateway/session.py @@ -878,7 +878,8 @@ class SessionStore: Used by ``/resume`` to restore a previously-named session. Ends the current session in SQLite (like reset), but instead of generating a fresh session ID, re-uses ``target_session_id`` so the - old transcript is loaded on the next message. + old transcript is loaded on the next message. If the target session was + previously ended, re-open it so gateway resume semantics match the CLI. """ db_end_session_id = None new_entry = None @@ -918,6 +919,12 @@ class SessionStore: except Exception as e: logger.debug("Session DB end_session failed: %s", e) + if self._db: + try: + self._db.reopen_session(target_session_id) + except Exception as e: + logger.debug("Session DB reopen_session failed: %s", e) + return new_entry def list_sessions(self, active_minutes: Optional[int] = None) -> List[SessionEntry]: diff --git a/gateway/status.py b/gateway/status.py index d7f357b36..a801cfe5b 100644 --- a/gateway/status.py +++ b/gateway/status.py @@ -290,6 +290,15 @@ def acquire_scoped_lock(scope: str, identity: str, metadata: Optional[dict[str, } existing = _read_json_file(lock_path) + if existing is None and lock_path.exists(): + # Lock file exists but is empty or contains invalid JSON โ€” treat as + # stale. This happens when a previous process was killed between + # O_CREAT|O_EXCL and the subsequent json.dump() (e.g. DNS failure + # during rapid Slack reconnect retries). + try: + lock_path.unlink(missing_ok=True) + except OSError: + pass if existing: try: existing_pid = int(existing["pid"]) diff --git a/hermes_cli/__init__.py b/hermes_cli/__init__.py index 959332e81..632aa5bae 100644 --- a/hermes_cli/__init__.py +++ b/hermes_cli/__init__.py @@ -11,5 +11,5 @@ Provides subcommands for: - hermes cron - Manage cron jobs """ -__version__ = "0.8.0" -__release_date__ = "2026.4.8" +__version__ = "0.9.0" +__release_date__ = "2026.4.13" diff --git a/hermes_cli/auth.py b/hermes_cli/auth.py index 04a7d0c13..795e5ea09 100644 --- a/hermes_cli/auth.py +++ b/hermes_cli/auth.py @@ -127,6 +127,7 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = { auth_type="api_key", inference_base_url=DEFAULT_GITHUB_MODELS_BASE_URL, api_key_env_vars=("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"), + base_url_env_var="COPILOT_API_BASE_URL", ), "copilot-acp": ProviderConfig( id="copilot-acp", @@ -159,6 +160,13 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = { api_key_env_vars=("KIMI_API_KEY",), base_url_env_var="KIMI_BASE_URL", ), + "kimi-coding-cn": ProviderConfig( + id="kimi-coding-cn", + name="Kimi / Moonshot (China)", + auth_type="api_key", + inference_base_url="https://api.moonshot.cn/v1", + api_key_env_vars=("KIMI_CN_API_KEY",), + ), "minimax": ProviderConfig( id="minimax", name="MiniMax", @@ -307,44 +315,6 @@ def _resolve_kimi_base_url(api_key: str, default_url: str, env_override: str) -> return default_url -def _gh_cli_candidates() -> list[str]: - """Return candidate ``gh`` binary paths, including common Homebrew installs.""" - candidates: list[str] = [] - - resolved = shutil.which("gh") - if resolved: - candidates.append(resolved) - - for candidate in ( - "/opt/homebrew/bin/gh", - "/usr/local/bin/gh", - str(Path.home() / ".local" / "bin" / "gh"), - ): - if candidate in candidates: - continue - if os.path.isfile(candidate) and os.access(candidate, os.X_OK): - candidates.append(candidate) - - return candidates - - -def _try_gh_cli_token() -> Optional[str]: - """Return a token from ``gh auth token`` when the GitHub CLI is available.""" - for gh_path in _gh_cli_candidates(): - try: - result = subprocess.run( - [gh_path, "auth", "token"], - capture_output=True, - text=True, - timeout=5, - ) - except (FileNotFoundError, subprocess.TimeoutExpired) as exc: - logger.debug("gh CLI token lookup failed (%s): %s", gh_path, exc) - continue - if result.returncode == 0 and result.stdout.strip(): - return result.stdout.strip() - return None - _PLACEHOLDER_SECRET_VALUES = { "*", @@ -929,6 +899,7 @@ def resolve_provider( "glm": "zai", "z-ai": "zai", "z.ai": "zai", "zhipu": "zai", "google": "gemini", "google-gemini": "gemini", "google-ai-studio": "gemini", "kimi": "kimi-coding", "kimi-for-coding": "kimi-coding", "moonshot": "kimi-coding", + "kimi-cn": "kimi-coding-cn", "moonshot-cn": "kimi-coding-cn", "minimax-china": "minimax-cn", "minimax_cn": "minimax-cn", "claude": "anthropic", "claude-code": "anthropic", "github": "copilot", "github-copilot": "copilot", diff --git a/hermes_cli/auth_commands.py b/hermes_cli/auth_commands.py index 0532faa77..c1cf0ff61 100644 --- a/hermes_cli/auth_commands.py +++ b/hermes_cli/auth_commands.py @@ -36,25 +36,23 @@ _OAUTH_CAPABLE_PROVIDERS = {"anthropic", "nous", "openai-codex", "qwen-oauth"} def _get_custom_provider_names() -> list: - """Return list of (display_name, pool_key) tuples for custom_providers in config.""" + """Return list of (display_name, pool_key, provider_key) tuples.""" try: - from hermes_cli.config import load_config + from hermes_cli.config import get_compatible_custom_providers, load_config config = load_config() except Exception: return [] - custom_providers = config.get("custom_providers") - if not isinstance(custom_providers, list): - return [] result = [] - for entry in custom_providers: + for entry in get_compatible_custom_providers(config): if not isinstance(entry, dict): continue name = entry.get("name") if not isinstance(name, str) or not name.strip(): continue pool_key = f"{CUSTOM_POOL_PREFIX}{_normalize_custom_pool_name(name)}" - result.append((name.strip(), pool_key)) + provider_key = str(entry.get("provider_key", "") or "").strip() + result.append((name.strip(), pool_key, provider_key)) return result @@ -66,9 +64,11 @@ def _resolve_custom_provider_input(raw: str) -> str | None: # Direct match on 'custom:name' format if normalized.startswith(CUSTOM_POOL_PREFIX): return normalized - for display_name, pool_key in _get_custom_provider_names(): + for display_name, pool_key, provider_key in _get_custom_provider_names(): if _normalize_custom_pool_name(display_name) == normalized: return pool_key + if provider_key and provider_key.strip().lower() == normalized: + return pool_key return None @@ -405,7 +405,7 @@ def _pick_provider(prompt: str = "Provider") -> str: known = sorted(set(list(PROVIDER_REGISTRY.keys()) + ["openrouter"])) custom_names = _get_custom_provider_names() if custom_names: - custom_display = [name for name, _key in custom_names] + custom_display = [name for name, _key, _provider_key in custom_names] print(f"\nKnown providers: {', '.join(known)}") print(f"Custom endpoints: {', '.join(custom_display)}") else: diff --git a/hermes_cli/backup.py b/hermes_cli/backup.py index 9aca0f822..667b8915a 100644 --- a/hermes_cli/backup.py +++ b/hermes_cli/backup.py @@ -8,14 +8,22 @@ Backup and import commands for hermes CLI. HERMES_HOME root. """ +import json +import logging import os +import shutil +import sqlite3 import sys +import tempfile import time import zipfile -from datetime import datetime +from datetime import datetime, timezone from pathlib import Path +from typing import Any, Dict, List, Optional -from hermes_constants import get_default_hermes_root, display_hermes_home +from hermes_constants import get_default_hermes_root, get_hermes_home, display_hermes_home + +logger = logging.getLogger(__name__) # --------------------------------------------------------------------------- @@ -63,6 +71,33 @@ def _should_exclude(rel_path: Path) -> bool: return False +# --------------------------------------------------------------------------- +# SQLite safe copy +# --------------------------------------------------------------------------- + +def _safe_copy_db(src: Path, dst: Path) -> bool: + """Copy a SQLite database safely using the backup() API. + + Handles WAL mode โ€” produces a consistent snapshot even while + the DB is being written to. Falls back to raw copy on failure. + """ + try: + conn = sqlite3.connect(f"file:{src}?mode=ro", uri=True) + backup_conn = sqlite3.connect(str(dst)) + conn.backup(backup_conn) + backup_conn.close() + conn.close() + return True + except Exception as exc: + logger.warning("SQLite safe copy failed for %s: %s", src, exc) + try: + shutil.copy2(src, dst) + return True + except Exception as exc2: + logger.error("Raw copy also failed for %s: %s", src, exc2) + return False + + # --------------------------------------------------------------------------- # Backup # --------------------------------------------------------------------------- @@ -151,8 +186,21 @@ def run_backup(args) -> None: with zipfile.ZipFile(out_path, "w", zipfile.ZIP_DEFLATED, compresslevel=6) as zf: for i, (abs_path, rel_path) in enumerate(files_to_add, 1): try: - zf.write(abs_path, arcname=str(rel_path)) - total_bytes += abs_path.stat().st_size + # Safe copy for SQLite databases (handles WAL mode) + if abs_path.suffix == ".db": + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp: + tmp_db = Path(tmp.name) + if _safe_copy_db(abs_path, tmp_db): + zf.write(tmp_db, arcname=str(rel_path)) + total_bytes += tmp_db.stat().st_size + tmp_db.unlink(missing_ok=True) + else: + tmp_db.unlink(missing_ok=True) + errors.append(f" {rel_path}: SQLite safe copy failed") + continue + else: + zf.write(abs_path, arcname=str(rel_path)) + total_bytes += abs_path.stat().st_size except (PermissionError, OSError) as exc: errors.append(f" {rel_path}: {exc}") continue @@ -201,7 +249,7 @@ def _validate_backup_zip(zf: zipfile.ZipFile) -> tuple[bool, str]: return False, "zip archive is empty" # Look for telltale files that a hermes home would have - markers = {"config.yaml", ".env", "hermes_state.db", "memory_store.db"} + markers = {"config.yaml", ".env", "state.db"} found = set() for n in names: # Could be at the root or one level deep (if someone zipped the directory) @@ -397,3 +445,211 @@ def run_import(args) -> None: print(f" hermes -p {pname} gateway install") print("Done. Your Hermes configuration has been restored.") + + +# --------------------------------------------------------------------------- +# Quick state snapshots (used by /snapshot slash command and hermes backup --quick) +# --------------------------------------------------------------------------- + +# Critical state files to include in quick snapshots (relative to HERMES_HOME). +# Everything else is either regeneratable (logs, cache) or managed separately +# (skills, repo, sessions/). +_QUICK_STATE_FILES = ( + "state.db", + "config.yaml", + ".env", + "auth.json", + "cron/jobs.json", + "gateway_state.json", + "channel_directory.json", + "processes.json", +) + +_QUICK_SNAPSHOTS_DIR = "state-snapshots" +_QUICK_DEFAULT_KEEP = 20 + + +def _quick_snapshot_root(hermes_home: Optional[Path] = None) -> Path: + home = hermes_home or get_hermes_home() + return home / _QUICK_SNAPSHOTS_DIR + + +def create_quick_snapshot( + label: Optional[str] = None, + hermes_home: Optional[Path] = None, +) -> Optional[str]: + """Create a quick state snapshot of critical files. + + Copies STATE_FILES to a timestamped directory under state-snapshots/. + Auto-prunes old snapshots beyond the keep limit. + + Returns: + Snapshot ID (timestamp-based), or None if no files found. + """ + home = hermes_home or get_hermes_home() + root = _quick_snapshot_root(home) + + ts = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S") + snap_id = f"{ts}-{label}" if label else ts + snap_dir = root / snap_id + snap_dir.mkdir(parents=True, exist_ok=True) + + manifest: Dict[str, int] = {} # rel_path -> file size + + for rel in _QUICK_STATE_FILES: + src = home / rel + if not src.exists() or not src.is_file(): + continue + + dst = snap_dir / rel + dst.parent.mkdir(parents=True, exist_ok=True) + + try: + if src.suffix == ".db": + if not _safe_copy_db(src, dst): + continue + else: + shutil.copy2(src, dst) + manifest[rel] = dst.stat().st_size + except (OSError, PermissionError) as exc: + logger.warning("Could not snapshot %s: %s", rel, exc) + + if not manifest: + shutil.rmtree(snap_dir, ignore_errors=True) + return None + + # Write manifest + meta = { + "id": snap_id, + "timestamp": ts, + "label": label, + "file_count": len(manifest), + "total_size": sum(manifest.values()), + "files": manifest, + } + with open(snap_dir / "manifest.json", "w") as f: + json.dump(meta, f, indent=2) + + # Auto-prune + _prune_quick_snapshots(root, keep=_QUICK_DEFAULT_KEEP) + + logger.info("State snapshot created: %s (%d files)", snap_id, len(manifest)) + return snap_id + + +def list_quick_snapshots( + limit: int = 20, + hermes_home: Optional[Path] = None, +) -> List[Dict[str, Any]]: + """List existing quick state snapshots, most recent first.""" + root = _quick_snapshot_root(hermes_home) + if not root.exists(): + return [] + + results = [] + for d in sorted(root.iterdir(), reverse=True): + if not d.is_dir(): + continue + manifest_path = d / "manifest.json" + if manifest_path.exists(): + try: + with open(manifest_path) as f: + results.append(json.load(f)) + except (json.JSONDecodeError, OSError): + results.append({"id": d.name, "file_count": 0, "total_size": 0}) + if len(results) >= limit: + break + + return results + + +def restore_quick_snapshot( + snapshot_id: str, + hermes_home: Optional[Path] = None, +) -> bool: + """Restore state from a quick snapshot. + + Overwrites current state files with the snapshot's copies. + Returns True if at least one file was restored. + """ + home = hermes_home or get_hermes_home() + root = _quick_snapshot_root(home) + snap_dir = root / snapshot_id + + if not snap_dir.is_dir(): + return False + + manifest_path = snap_dir / "manifest.json" + if not manifest_path.exists(): + return False + + with open(manifest_path) as f: + meta = json.load(f) + + restored = 0 + for rel in meta.get("files", {}): + src = snap_dir / rel + if not src.exists(): + continue + + dst = home / rel + dst.parent.mkdir(parents=True, exist_ok=True) + + try: + if dst.suffix == ".db": + # Atomic-ish replace for databases + tmp = dst.parent / f".{dst.name}.snap_restore" + shutil.copy2(src, tmp) + dst.unlink(missing_ok=True) + shutil.move(str(tmp), str(dst)) + else: + shutil.copy2(src, dst) + restored += 1 + except (OSError, PermissionError) as exc: + logger.error("Failed to restore %s: %s", rel, exc) + + logger.info("Restored %d files from snapshot %s", restored, snapshot_id) + return restored > 0 + + +def _prune_quick_snapshots(root: Path, keep: int = _QUICK_DEFAULT_KEEP) -> int: + """Remove oldest quick snapshots beyond the keep limit. Returns count deleted.""" + if not root.exists(): + return 0 + + dirs = sorted( + (d for d in root.iterdir() if d.is_dir()), + key=lambda d: d.name, + reverse=True, + ) + + deleted = 0 + for d in dirs[keep:]: + try: + shutil.rmtree(d) + deleted += 1 + except OSError as exc: + logger.warning("Failed to prune snapshot %s: %s", d.name, exc) + + return deleted + + +def prune_quick_snapshots( + keep: int = _QUICK_DEFAULT_KEEP, + hermes_home: Optional[Path] = None, +) -> int: + """Manually prune quick snapshots. Returns count deleted.""" + return _prune_quick_snapshots(_quick_snapshot_root(hermes_home), keep=keep) + + +def run_quick_backup(args) -> None: + """CLI entry point for hermes backup --quick.""" + label = getattr(args, "label", None) + snap_id = create_quick_snapshot(label=label) + if snap_id: + print(f"State snapshot created: {snap_id}") + snaps = list_quick_snapshots() + print(f" {len(snaps)} snapshot(s) stored in {display_hermes_home()}/state-snapshots/") + print(f" Restore with: /snapshot restore {snap_id}") + else: + print("No state files found to snapshot.") diff --git a/hermes_cli/claw.py b/hermes_cli/claw.py index 0f9e28cbc..e62efe47e 100644 --- a/hermes_cli/claw.py +++ b/hermes_cli/claw.py @@ -11,6 +11,7 @@ Usage: import importlib.util import logging +import subprocess import sys from datetime import datetime from pathlib import Path @@ -52,6 +53,99 @@ _OPENCLAW_SCRIPT_INSTALLED = ( # Known OpenClaw directory names (current + legacy) _OPENCLAW_DIR_NAMES = (".openclaw", ".clawdbot", ".moltbot") +def _detect_openclaw_processes() -> list[str]: + """Detect running OpenClaw processes and services. + + Returns a list of human-readable descriptions of what was found. + An empty list means nothing was detected. + """ + found: list[str] = [] + + # -- systemd service (Linux) ------------------------------------------ + if sys.platform != "win32": + try: + result = subprocess.run( + ["systemctl", "--user", "is-active", "openclaw-gateway.service"], + capture_output=True, text=True, timeout=5, + ) + if result.stdout.strip() == "active": + found.append("systemd service: openclaw-gateway.service") + except (FileNotFoundError, subprocess.TimeoutExpired): + pass + + # -- process scan ------------------------------------------------------ + if sys.platform == "win32": + try: + for exe in ("openclaw.exe", "clawd.exe"): + result = subprocess.run( + ["tasklist", "/FI", f"IMAGENAME eq {exe}"], + capture_output=True, text=True, timeout=5, + ) + if exe in result.stdout.lower(): + found.append(f"process: {exe}") + + # Node.js-hosted OpenClaw โ€” tasklist doesn't show command lines, + # so fall back to PowerShell. + ps_cmd = ( + 'Get-CimInstance Win32_Process -Filter "Name = \'node.exe\'" | ' + 'Where-Object { $_.CommandLine -match "openclaw|clawd" } | ' + 'Select-Object -First 1 ProcessId' + ) + result = subprocess.run( + ["powershell", "-NoProfile", "-Command", ps_cmd], + capture_output=True, text=True, timeout=5, + ) + if result.stdout.strip(): + found.append(f"node.exe process with openclaw in command line (PID {result.stdout.strip()})") + except Exception: + pass + else: + try: + result = subprocess.run( + ["pgrep", "-f", "openclaw"], + capture_output=True, text=True, timeout=3, + ) + if result.returncode == 0: + pids = result.stdout.strip().split() + found.append(f"openclaw process(es) (PIDs: {', '.join(pids)})") + except (FileNotFoundError, subprocess.TimeoutExpired): + pass + + return found + + +def _warn_if_openclaw_running(auto_yes: bool) -> None: + """Warn if OpenClaw is still running before migration. + + Telegram, Discord, and Slack only allow one active connection per bot + token. Migrating while OpenClaw is running causes both to fight for the + same token. + """ + running = _detect_openclaw_processes() + if not running: + return + + print() + print_error("OpenClaw appears to be running:") + for detail in running: + print_info(f" * {detail}") + print_info( + "Messaging platforms (Telegram, Discord, Slack) only allow one " + "active session per bot token. If you continue, both OpenClaw and " + "Hermes may try to use the same token, causing disconnects." + ) + print_info("Recommendation: stop OpenClaw before migrating.") + print() + if auto_yes: + return + if not sys.stdin.isatty(): + print_info("Non-interactive session โ€” continuing to preview only.") + return + if not prompt_yes_no("Continue anyway?", default=False): + print_info("Migration cancelled. Stop OpenClaw and try again.") + sys.exit(0) + + def _warn_if_gateway_running(auto_yes: bool) -> None: """Check if a Hermes gateway is running with connected platforms. @@ -287,8 +381,11 @@ def _cmd_migrate(args): print_info(f"Workspace: {workspace_target}") print() - # Check if a gateway is running with connected platforms โ€” migrating tokens - # while the gateway is active will cause conflicts (e.g. Telegram 409). + # Check if OpenClaw is still running โ€” migrating tokens while both are + # active will cause conflicts (e.g. Telegram 409). + _warn_if_openclaw_running(auto_yes) + + # Check if a Hermes gateway is running with connected platforms. _warn_if_gateway_running(auto_yes) # Ensure config.yaml exists before migration tries to read it @@ -430,6 +527,28 @@ def _cmd_cleanup(args): print_success("No OpenClaw directories found. Nothing to clean up.") return + # Warn if OpenClaw is still running โ€” archiving while the service is + # active causes it to recreate an empty skeleton directory (#8502). + running = _detect_openclaw_processes() + if running: + print() + print_error("OpenClaw appears to be still running:") + for detail in running: + print_info(f" * {detail}") + print_info( + "Archiving .openclaw/ while the service is active may cause it to " + "immediately recreate an empty skeleton directory, destroying your config." + ) + print_info("Stop OpenClaw first: systemctl --user stop openclaw-gateway.service") + print() + if not auto_yes: + if not sys.stdin.isatty(): + print_info("Non-interactive session โ€” aborting. Stop OpenClaw and re-run.") + return + if not prompt_yes_no("Proceed anyway?", default=False): + print_info("Aborted. Stop OpenClaw first, then re-run: hermes claw cleanup") + return + total_archived = 0 for source_dir in dirs_to_check: diff --git a/hermes_cli/commands.py b/hermes_cli/commands.py index 3d1f37035..a607b7f47 100644 --- a/hermes_cli/commands.py +++ b/hermes_cli/commands.py @@ -73,7 +73,9 @@ COMMAND_REGISTRY: list[CommandDef] = [ args_hint="[focus topic]"), CommandDef("rollback", "List or restore filesystem checkpoints", "Session", args_hint="[number]"), - CommandDef("stop", "Kill all running registered subprocesses", "Session"), + CommandDef("snapshot", "Create or restore state snapshots of Hermes config/state", "Session", + aliases=("snap",), args_hint="[create|restore |prune]"), + CommandDef("stop", "Kill all running background processes", "Session"), CommandDef("approve", "Approve a pending dangerous command", "Session", gateway_only=True, args_hint="[session|always]"), CommandDef("deny", "Deny a pending dangerous command", "Session", @@ -131,6 +133,7 @@ COMMAND_REGISTRY: list[CommandDef] = [ CommandDef("cron", "Manage scheduled tasks", "Tools & Skills", cli_only=True, args_hint="[subcommand]", subcommands=("list", "add", "create", "edit", "pause", "resume", "run", "remove")), + CommandDef("reload", "Reload .env variables into the running session", "Tools & Skills"), CommandDef("reload-mcp", "Reload MCP servers from config", "Tools & Skills", aliases=("reload_mcp",)), CommandDef("browser", "Connect browser tools to your live Chrome via CDP", "Tools & Skills", @@ -158,6 +161,7 @@ COMMAND_REGISTRY: list[CommandDef] = [ cli_only=True, args_hint=""), CommandDef("update", "Update Hermes Agent to the latest version", "Info", gateway_only=True), + CommandDef("debug", "Upload debug report (system info + logs) and get shareable links", "Info"), # Exit CommandDef("quit", "Exit the CLI", "Exit", diff --git a/hermes_cli/config.py b/hermes_cli/config.py index 8c46f8bba..738960bb4 100644 --- a/hermes_cli/config.py +++ b/hermes_cli/config.py @@ -148,25 +148,6 @@ def managed_error(action: str = "modify configuration"): # Container-aware CLI (NixOS container mode) # ============================================================================= -def _is_inside_container() -> bool: - """Detect if we're already running inside a Docker/Podman container.""" - # Standard Docker/Podman indicators - if os.path.exists("/.dockerenv"): - return True - # Podman uses /run/.containerenv - if os.path.exists("/run/.containerenv"): - return True - # Check cgroup for container runtime evidence (works for both Docker & Podman) - try: - with open("/proc/1/cgroup", "r") as f: - cgroup = f.read() - if "docker" in cgroup or "podman" in cgroup or "/lxc/" in cgroup: - return True - except OSError: - pass - return False - - def get_container_exec_info() -> Optional[dict]: """Read container mode metadata from HERMES_HOME/.container-mode. @@ -181,7 +162,8 @@ def get_container_exec_info() -> Optional[dict]: if os.environ.get("HERMES_DEV") == "1": return None - if _is_inside_container(): + from hermes_constants import is_container + if is_container(): return None container_mode_file = get_hermes_home() / ".container-mode" @@ -355,6 +337,10 @@ DEFAULT_CONFIG = { # threshold before escalating to a full timeout. The warning fires # once per run and does not interrupt the agent. 0 = disable warning. "gateway_timeout_warning": 900, + # Periodic "still working" notification interval (seconds). + # Sends a status message every N seconds so the user knows the + # agent hasn't died during long tasks. 0 = disable notifications. + "gateway_notify_interval": 600, }, "terminal": { @@ -428,9 +414,7 @@ DEFAULT_CONFIG = { "threshold": 0.50, # compress when context usage exceeds this ratio "target_ratio": 0.20, # fraction of threshold to preserve as recent tail "protect_last_n": 20, # minimum recent messages to keep uncompressed - "summary_model": "", # empty = use main configured model - "summary_provider": "auto", - "summary_base_url": None, + }, "smart_model_routing": { "enabled": False, @@ -716,7 +700,7 @@ DEFAULT_CONFIG = { }, # Config schema version - bump this when adding new required fields - "_config_version": 16, + "_config_version": 17, } # ============================================================================= @@ -832,6 +816,14 @@ OPTIONAL_ENV_VARS = { "category": "provider", "advanced": True, }, + "KIMI_CN_API_KEY": { + "description": "Kimi / Moonshot China API key", + "prompt": "Kimi (China) API key", + "url": "https://platform.moonshot.cn/", + "password": True, + "category": "provider", + "advanced": True, + }, "MINIMAX_API_KEY": { "description": "MiniMax API key (international)", "prompt": "MiniMax API key", @@ -1560,6 +1552,136 @@ def get_missing_skill_config_vars() -> List[Dict[str, Any]]: return missing +def _normalize_custom_provider_entry( + entry: Any, + *, + provider_key: str = "", +) -> Optional[Dict[str, Any]]: + """Return a runtime-compatible custom provider entry or ``None``.""" + if not isinstance(entry, dict): + return None + + base_url = "" + for url_key in ("api", "url", "base_url"): + raw_url = entry.get(url_key) + if isinstance(raw_url, str) and raw_url.strip(): + base_url = raw_url.strip() + break + if not base_url: + return None + + name = "" + raw_name = entry.get("name") + if isinstance(raw_name, str) and raw_name.strip(): + name = raw_name.strip() + elif provider_key.strip(): + name = provider_key.strip() + if not name: + return None + + normalized: Dict[str, Any] = { + "name": name, + "base_url": base_url, + } + + provider_key = provider_key.strip() + if provider_key: + normalized["provider_key"] = provider_key + + api_key = entry.get("api_key") + if isinstance(api_key, str) and api_key.strip(): + normalized["api_key"] = api_key.strip() + + key_env = entry.get("key_env") + if isinstance(key_env, str) and key_env.strip(): + normalized["key_env"] = key_env.strip() + + api_mode = entry.get("api_mode") or entry.get("transport") + if isinstance(api_mode, str) and api_mode.strip(): + normalized["api_mode"] = api_mode.strip() + + model_name = entry.get("model") or entry.get("default_model") + if isinstance(model_name, str) and model_name.strip(): + normalized["model"] = model_name.strip() + + models = entry.get("models") + if isinstance(models, dict) and models: + normalized["models"] = models + + context_length = entry.get("context_length") + if isinstance(context_length, int) and context_length > 0: + normalized["context_length"] = context_length + + rate_limit_delay = entry.get("rate_limit_delay") + if isinstance(rate_limit_delay, (int, float)) and rate_limit_delay >= 0: + normalized["rate_limit_delay"] = rate_limit_delay + + return normalized + + +def providers_dict_to_custom_providers(providers_dict: Any) -> List[Dict[str, Any]]: + """Normalize ``providers`` config entries into the legacy custom-provider shape.""" + if not isinstance(providers_dict, dict): + return [] + + custom_providers: List[Dict[str, Any]] = [] + for key, entry in providers_dict.items(): + normalized = _normalize_custom_provider_entry(entry, provider_key=str(key)) + if normalized is not None: + custom_providers.append(normalized) + + return custom_providers + + +def get_compatible_custom_providers( + config: Optional[Dict[str, Any]] = None, +) -> List[Dict[str, Any]]: + """Return a deduplicated custom-provider view across legacy and v12+ config. + + ``custom_providers`` remains the on-disk legacy format, while ``providers`` + is the newer keyed schema. Runtime and picker flows still need a single + list-shaped view, but we should not materialise that compatibility layer + back into config.yaml because it duplicates entries in UIs. + """ + if config is None: + config = load_config() + + compatible: List[Dict[str, Any]] = [] + seen_provider_keys: set = set() + seen_name_url_pairs: set = set() + + def _append_if_new(entry: Optional[Dict[str, Any]]) -> None: + if entry is None: + return + provider_key = str(entry.get("provider_key", "") or "").strip().lower() + name = str(entry.get("name", "") or "").strip().lower() + base_url = str(entry.get("base_url", "") or "").strip().rstrip("/").lower() + pair = (name, base_url) + + if provider_key and provider_key in seen_provider_keys: + return + if name and base_url and pair in seen_name_url_pairs: + return + + compatible.append(entry) + if provider_key: + seen_provider_keys.add(provider_key) + if name and base_url: + seen_name_url_pairs.add(pair) + + custom_providers = config.get("custom_providers") + if custom_providers is not None: + if not isinstance(custom_providers, list): + return [] + for entry in custom_providers: + _append_if_new(_normalize_custom_provider_entry(entry)) + + for entry in providers_dict_to_custom_providers(config.get("providers")): + _append_if_new(entry) + + return compatible + + def check_config_version() -> Tuple[int, int]: """ Check config version. @@ -1877,8 +1999,8 @@ def migrate_config(interactive: bool = True, quiet: bool = False) -> Dict[str, A if migrated_count > 0: config["providers"] = providers_dict - # Remove the old list - del config["custom_providers"] + # Remove the old list โ€” runtime reads via get_compatible_custom_providers() + config.pop("custom_providers", None) save_config(config) if not quiet: print(f" โœ“ Migrated {migrated_count} custom provider(s) to providers: section") @@ -1989,6 +2111,43 @@ def migrate_config(interactive: bool = True, quiet: bool = False) -> Dict[str, A print(f" โœ“ Migrated tool_progress_overrides โ†’ display.platforms: {migrated}") results["config_added"].append("display.platforms (migrated from tool_progress_overrides)") + # โ”€โ”€ Version 16 โ†’ 17: remove legacy compression.summary_* keys โ”€โ”€ + if current_ver < 17: + config = read_raw_config() + comp = config.get("compression", {}) + if isinstance(comp, dict): + s_model = comp.pop("summary_model", None) + s_provider = comp.pop("summary_provider", None) + s_base_url = comp.pop("summary_base_url", None) + migrated_keys = [] + # Migrate non-empty, non-default values to auxiliary.compression + if s_model and str(s_model).strip(): + aux = config.setdefault("auxiliary", {}) + aux_comp = aux.setdefault("compression", {}) + if not aux_comp.get("model"): + aux_comp["model"] = str(s_model).strip() + migrated_keys.append(f"model={s_model}") + if s_provider and str(s_provider).strip() not in ("", "auto"): + aux = config.setdefault("auxiliary", {}) + aux_comp = aux.setdefault("compression", {}) + if not aux_comp.get("provider") or aux_comp.get("provider") == "auto": + aux_comp["provider"] = str(s_provider).strip() + migrated_keys.append(f"provider={s_provider}") + if s_base_url and str(s_base_url).strip(): + aux = config.setdefault("auxiliary", {}) + aux_comp = aux.setdefault("compression", {}) + if not aux_comp.get("base_url"): + aux_comp["base_url"] = str(s_base_url).strip() + migrated_keys.append(f"base_url={s_base_url}") + if migrated_keys or s_model is not None or s_provider is not None or s_base_url is not None: + config["compression"] = comp + save_config(config) + if not quiet: + if migrated_keys: + print(f" โœ“ Migrated compression.summary_* โ†’ auxiliary.compression: {', '.join(migrated_keys)}") + else: + print(" โœ“ Removed unused compression.summary_* keys") + if current_ver < latest_ver and not quiet: print(f"Config version: {current_ver} โ†’ {latest_ver}") @@ -2301,6 +2460,7 @@ _FALLBACK_COMMENT = """ # nous (OAuth โ€” hermes auth) โ€” Nous Portal # zai (ZAI_API_KEY) โ€” Z.AI / GLM # kimi-coding (KIMI_API_KEY) โ€” Kimi / Moonshot +# kimi-coding-cn (KIMI_CN_API_KEY) โ€” Kimi / Moonshot (China) # minimax (MINIMAX_API_KEY) โ€” MiniMax # minimax-cn (MINIMAX_CN_API_KEY) โ€” MiniMax (China) # @@ -2344,6 +2504,7 @@ _COMMENTED_SECTIONS = """ # nous (OAuth โ€” hermes auth) โ€” Nous Portal # zai (ZAI_API_KEY) โ€” Z.AI / GLM # kimi-coding (KIMI_API_KEY) โ€” Kimi / Moonshot +# kimi-coding-cn (KIMI_CN_API_KEY) โ€” Kimi / Moonshot (China) # minimax (MINIMAX_API_KEY) โ€” MiniMax # minimax-cn (MINIMAX_CN_API_KEY) โ€” MiniMax (China) # @@ -2398,7 +2559,13 @@ def save_config(config: Dict[str, Any]): def load_env() -> Dict[str, str]: - """Load environment variables from ~/.hermes/.env.""" + """Load environment variables from ~/.hermes/.env. + + Sanitizes lines before parsing so that corrupted files (e.g. + concatenated KEY=VALUE pairs on a single line) are handled + gracefully instead of producing mangled values such as duplicated + bot tokens. See #8908. + """ env_path = get_env_path() env_vars = {} @@ -2407,17 +2574,21 @@ def load_env() -> Dict[str, str]: # fail on UTF-8 .env files. Use explicit UTF-8 only on Windows. open_kw = {"encoding": "utf-8", "errors": "replace"} if _IS_WINDOWS else {} with open(env_path, **open_kw) as f: - for line in f: - line = line.strip() - if line and not line.startswith('#') and '=' in line: - key, _, value = line.partition('=') - env_vars[key.strip()] = value.strip().strip('"\'') + raw_lines = f.readlines() + # Sanitize before parsing: split concatenated lines & drop stale + # placeholders so corrupted .env files don't produce invalid tokens. + lines = _sanitize_env_lines(raw_lines) + for line in lines: + line = line.strip() + if line and not line.startswith('#') and '=' in line: + key, _, value = line.partition('=') + env_vars[key.strip()] = value.strip().strip('"\'') return env_vars def _sanitize_env_lines(lines: list) -> list: - """Fix corrupted .env lines before writing. + """Fix corrupted .env lines before reading or writing. Handles two known corruption patterns: 1. Concatenated KEY=VALUE pairs on a single line (missing newline between @@ -2650,6 +2821,28 @@ def save_env_value_secure(key: str, value: str) -> Dict[str, Any]: +def reload_env() -> int: + """Re-read ~/.hermes/.env into os.environ. Returns count of vars updated. + + Adds/updates vars that changed and removes vars that were deleted from + the .env file (but only vars known to Hermes โ€” OPTIONAL_ENV_VARS and + _EXTRA_ENV_KEYS โ€” to avoid clobbering unrelated environment). + """ + env_vars = load_env() + known_keys = set(OPTIONAL_ENV_VARS.keys()) | _EXTRA_ENV_KEYS + count = 0 + for key, value in env_vars.items(): + if os.environ.get(key) != value: + os.environ[key] = value + count += 1 + # Remove known Hermes vars that are no longer in .env + for key in known_keys: + if key not in env_vars and key in os.environ: + del os.environ[key] + count += 1 + return count + + def get_env_value(key: str) -> Optional[str]: """Get a value from ~/.hermes/.env or environment.""" # Check environment first @@ -2772,10 +2965,11 @@ def show_config(): print(f" Threshold: {compression.get('threshold', 0.50) * 100:.0f}%") print(f" Target ratio: {compression.get('target_ratio', 0.20) * 100:.0f}% of threshold preserved") print(f" Protect last: {compression.get('protect_last_n', 20)} messages") - _sm = compression.get('summary_model', '') or '(main model)' + _aux_comp = config.get('auxiliary', {}).get('compression', {}) + _sm = _aux_comp.get('model', '') or '(auto)' print(f" Model: {_sm}") - comp_provider = compression.get('summary_provider', 'auto') - if comp_provider != 'auto': + comp_provider = _aux_comp.get('provider', 'auto') + if comp_provider and comp_provider != 'auto': print(f" Provider: {comp_provider}") # Auxiliary models diff --git a/hermes_cli/copilot_auth.py b/hermes_cli/copilot_auth.py index 0db863705..24859da1a 100644 --- a/hermes_cli/copilot_auth.py +++ b/hermes_cli/copilot_auth.py @@ -117,14 +117,30 @@ def _gh_cli_candidates() -> list[str]: def _try_gh_cli_token() -> Optional[str]: - """Return a token from ``gh auth token`` when the GitHub CLI is available.""" + """Return a token from ``gh auth token`` when the GitHub CLI is available. + + When COPILOT_GH_HOST is set, passes ``--hostname`` so gh returns the + correct host's token. Also strips GITHUB_TOKEN / GH_TOKEN from the + subprocess environment so ``gh`` reads from its own credential store + (hosts.yml) instead of just echoing the env var back. + """ + hostname = os.getenv("COPILOT_GH_HOST", "").strip() + + # Build a clean env so gh doesn't short-circuit on GITHUB_TOKEN / GH_TOKEN + clean_env = {k: v for k, v in os.environ.items() + if k not in ("GITHUB_TOKEN", "GH_TOKEN")} + for gh_path in _gh_cli_candidates(): + cmd = [gh_path, "auth", "token"] + if hostname: + cmd += ["--hostname", hostname] try: result = subprocess.run( - [gh_path, "auth", "token"], + cmd, capture_output=True, text=True, timeout=5, + env=clean_env, ) except (FileNotFoundError, subprocess.TimeoutExpired) as exc: logger.debug("gh CLI token lookup failed (%s): %s", gh_path, exc) diff --git a/hermes_cli/debug.py b/hermes_cli/debug.py new file mode 100644 index 000000000..3607db923 --- /dev/null +++ b/hermes_cli/debug.py @@ -0,0 +1,336 @@ +"""``hermes debug`` โ€” debug tools for Hermes Agent. + +Currently supports: + hermes debug share Upload debug report (system info + logs) to a + paste service and print a shareable URL. +""" + +import io +import sys +import urllib.error +import urllib.parse +import urllib.request +from pathlib import Path +from typing import Optional + +from hermes_constants import get_hermes_home + + +# --------------------------------------------------------------------------- +# Paste services โ€” try paste.rs first, dpaste.com as fallback. +# --------------------------------------------------------------------------- + +_PASTE_RS_URL = "https://paste.rs/" +_DPASTE_COM_URL = "https://dpaste.com/api/" + +# Maximum bytes to read from a single log file for upload. +# paste.rs caps at ~1 MB; we stay under that with headroom. +_MAX_LOG_BYTES = 512_000 + + +def _upload_paste_rs(content: str) -> str: + """Upload to paste.rs. Returns the paste URL. + + paste.rs accepts a plain POST body and returns the URL directly. + """ + data = content.encode("utf-8") + req = urllib.request.Request( + _PASTE_RS_URL, data=data, method="POST", + headers={ + "Content-Type": "text/plain; charset=utf-8", + "User-Agent": "hermes-agent/debug-share", + }, + ) + with urllib.request.urlopen(req, timeout=30) as resp: + url = resp.read().decode("utf-8").strip() + if not url.startswith("http"): + raise ValueError(f"Unexpected response from paste.rs: {url[:200]}") + return url + + +def _upload_dpaste_com(content: str, expiry_days: int = 7) -> str: + """Upload to dpaste.com. Returns the paste URL. + + dpaste.com uses multipart form data. + """ + boundary = "----HermesDebugBoundary9f3c" + + def _field(name: str, value: str) -> str: + return ( + f"--{boundary}\r\n" + f'Content-Disposition: form-data; name="{name}"\r\n' + f"\r\n" + f"{value}\r\n" + ) + + body = ( + _field("content", content) + + _field("syntax", "text") + + _field("expiry_days", str(expiry_days)) + + f"--{boundary}--\r\n" + ).encode("utf-8") + + req = urllib.request.Request( + _DPASTE_COM_URL, data=body, method="POST", + headers={ + "Content-Type": f"multipart/form-data; boundary={boundary}", + "User-Agent": "hermes-agent/debug-share", + }, + ) + with urllib.request.urlopen(req, timeout=30) as resp: + url = resp.read().decode("utf-8").strip() + if not url.startswith("http"): + raise ValueError(f"Unexpected response from dpaste.com: {url[:200]}") + return url + + +def upload_to_pastebin(content: str, expiry_days: int = 7) -> str: + """Upload *content* to a paste service, trying paste.rs then dpaste.com. + + Returns the paste URL on success, raises on total failure. + """ + errors: list[str] = [] + + # Try paste.rs first (simple, fast) + try: + return _upload_paste_rs(content) + except Exception as exc: + errors.append(f"paste.rs: {exc}") + + # Fallback: dpaste.com (supports expiry) + try: + return _upload_dpaste_com(content, expiry_days=expiry_days) + except Exception as exc: + errors.append(f"dpaste.com: {exc}") + + raise RuntimeError( + "Failed to upload to any paste service:\n " + "\n ".join(errors) + ) + + +# --------------------------------------------------------------------------- +# Log file reading +# --------------------------------------------------------------------------- + +def _resolve_log_path(log_name: str) -> Optional[Path]: + """Find the log file for *log_name*, falling back to the .1 rotation. + + Returns the path if found, or None. + """ + from hermes_cli.logs import LOG_FILES + + filename = LOG_FILES.get(log_name) + if not filename: + return None + + log_dir = get_hermes_home() / "logs" + primary = log_dir / filename + if primary.exists() and primary.stat().st_size > 0: + return primary + + # Fall back to the most recent rotated file (.1). + rotated = log_dir / f"{filename}.1" + if rotated.exists() and rotated.stat().st_size > 0: + return rotated + + return None + + +def _read_log_tail(log_name: str, num_lines: int) -> str: + """Read the last *num_lines* from a log file, or return a placeholder.""" + from hermes_cli.logs import _read_last_n_lines + + log_path = _resolve_log_path(log_name) + if log_path is None: + return "(file not found)" + + try: + lines = _read_last_n_lines(log_path, num_lines) + return "".join(lines).rstrip("\n") + except Exception as exc: + return f"(error reading: {exc})" + + +def _read_full_log(log_name: str, max_bytes: int = _MAX_LOG_BYTES) -> Optional[str]: + """Read a log file for standalone upload. + + Returns the file content (last *max_bytes* if truncated), or None if the + file doesn't exist or is empty. + """ + log_path = _resolve_log_path(log_name) + if log_path is None: + return None + + try: + size = log_path.stat().st_size + if size == 0: + return None + + if size <= max_bytes: + return log_path.read_text(encoding="utf-8", errors="replace") + + # File is larger than max_bytes โ€” read the tail. + with open(log_path, "rb") as f: + f.seek(size - max_bytes) + # Skip partial line at the seek point. + f.readline() + content = f.read().decode("utf-8", errors="replace") + return f"[... truncated โ€” showing last ~{max_bytes // 1024}KB ...]\n{content}" + except Exception: + return None + + +# --------------------------------------------------------------------------- +# Debug report collection +# --------------------------------------------------------------------------- + +def _capture_dump() -> str: + """Run ``hermes dump`` and return its stdout as a string.""" + from hermes_cli.dump import run_dump + + class _FakeArgs: + show_keys = False + + old_stdout = sys.stdout + sys.stdout = capture = io.StringIO() + try: + run_dump(_FakeArgs()) + except SystemExit: + pass + finally: + sys.stdout = old_stdout + + return capture.getvalue() + + +def collect_debug_report(*, log_lines: int = 200, dump_text: str = "") -> str: + """Build the summary debug report: system dump + log tails. + + Parameters + ---------- + log_lines + Number of recent lines to include per log file. + dump_text + Pre-captured dump output. If empty, ``hermes dump`` is run + internally. + + Returns the report as a plain-text string ready for upload. + """ + buf = io.StringIO() + + if not dump_text: + dump_text = _capture_dump() + buf.write(dump_text) + + # โ”€โ”€ Recent log tails (summary only) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + buf.write("\n\n") + buf.write(f"--- agent.log (last {log_lines} lines) ---\n") + buf.write(_read_log_tail("agent", log_lines)) + buf.write("\n\n") + + errors_lines = min(log_lines, 100) + buf.write(f"--- errors.log (last {errors_lines} lines) ---\n") + buf.write(_read_log_tail("errors", errors_lines)) + buf.write("\n\n") + + buf.write(f"--- gateway.log (last {errors_lines} lines) ---\n") + buf.write(_read_log_tail("gateway", errors_lines)) + buf.write("\n") + + return buf.getvalue() + + +# --------------------------------------------------------------------------- +# CLI entry points +# --------------------------------------------------------------------------- + +def run_debug_share(args): + """Collect debug report + full logs, upload each, print URLs.""" + log_lines = getattr(args, "lines", 200) + expiry = getattr(args, "expire", 7) + local_only = getattr(args, "local", False) + + print("Collecting debug report...") + + # Capture dump once โ€” prepended to every paste for context. + dump_text = _capture_dump() + + report = collect_debug_report(log_lines=log_lines, dump_text=dump_text) + agent_log = _read_full_log("agent") + gateway_log = _read_full_log("gateway") + + # Prepend dump header to each full log so every paste is self-contained. + if agent_log: + agent_log = dump_text + "\n\n--- full agent.log ---\n" + agent_log + if gateway_log: + gateway_log = dump_text + "\n\n--- full gateway.log ---\n" + gateway_log + + if local_only: + print(report) + if agent_log: + print(f"\n\n{'=' * 60}") + print("FULL agent.log") + print(f"{'=' * 60}\n") + print(agent_log) + if gateway_log: + print(f"\n\n{'=' * 60}") + print("FULL gateway.log") + print(f"{'=' * 60}\n") + print(gateway_log) + return + + print("Uploading...") + urls: dict[str, str] = {} + failures: list[str] = [] + + # 1. Summary report (required) + try: + urls["Report"] = upload_to_pastebin(report, expiry_days=expiry) + except RuntimeError as exc: + print(f"\nUpload failed: {exc}", file=sys.stderr) + print("\nFull report printed below โ€” copy-paste it manually:\n") + print(report) + sys.exit(1) + + # 2. Full agent.log (optional) + if agent_log: + try: + urls["agent.log"] = upload_to_pastebin(agent_log, expiry_days=expiry) + except Exception as exc: + failures.append(f"agent.log: {exc}") + + # 3. Full gateway.log (optional) + if gateway_log: + try: + urls["gateway.log"] = upload_to_pastebin(gateway_log, expiry_days=expiry) + except Exception as exc: + failures.append(f"gateway.log: {exc}") + + # Print results + label_width = max(len(k) for k in urls) + print(f"\nDebug report uploaded:") + for label, url in urls.items(): + print(f" {label:<{label_width}} {url}") + + if failures: + print(f"\n (failed to upload: {', '.join(failures)})") + + print(f"\nShare these links with the Hermes team for support.") + + +def run_debug(args): + """Route debug subcommands.""" + subcmd = getattr(args, "debug_command", None) + if subcmd == "share": + run_debug_share(args) + else: + # Default: show help + print("Usage: hermes debug share [--lines N] [--expire N] [--local]") + print() + print("Commands:") + print(" share Upload debug report to a paste service and print URL") + print() + print("Options:") + print(" --lines N Number of log lines to include (default: 200)") + print(" --expire N Paste expiry in days (default: 7)") + print(" --local Print report locally instead of uploading") diff --git a/hermes_cli/doctor.py b/hermes_cli/doctor.py index 13c904692..a01690cba 100644 --- a/hermes_cli/doctor.py +++ b/hermes_cli/doctor.py @@ -721,6 +721,7 @@ def run_doctor(args): _apikey_providers = [ ("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True), ("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True), + ("Kimi / Moonshot (China)", ("KIMI_CN_API_KEY",), "https://api.moonshot.cn/v1/models", None, True), ("DeepSeek", ("DEEPSEEK_API_KEY",), "https://api.deepseek.com/v1/models", "DEEPSEEK_BASE_URL", True), ("Hugging Face", ("HF_TOKEN",), "https://router.huggingface.co/v1/models", "HF_BASE_URL", True), ("Alibaba/DashScope", ("DASHSCOPE_API_KEY",), "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", "DASHSCOPE_BASE_URL", True), diff --git a/hermes_cli/dump.py b/hermes_cli/dump.py index caa6b7e8c..491bf6e2c 100644 --- a/hermes_cli/dump.py +++ b/hermes_cli/dump.py @@ -44,6 +44,16 @@ def _redact(value: str) -> str: def _gateway_status() -> str: """Return a short gateway status string.""" if sys.platform.startswith("linux"): + from hermes_constants import is_container + if is_container(): + try: + from hermes_cli.gateway import find_gateway_pids + pids = find_gateway_pids() + if pids: + return f"running (docker, pid {pids[0]})" + return "stopped (docker)" + except Exception: + return "stopped (docker)" try: from hermes_cli.gateway import get_service_name svc = get_service_name() diff --git a/hermes_cli/env_loader.py b/hermes_cli/env_loader.py index 0066d25b0..8d6a1449d 100644 --- a/hermes_cli/env_loader.py +++ b/hermes_cli/env_loader.py @@ -15,6 +15,51 @@ def _load_dotenv_with_fallback(path: Path, *, override: bool) -> None: load_dotenv(dotenv_path=path, override=override, encoding="latin-1") +def _sanitize_env_file_if_needed(path: Path) -> None: + """Pre-sanitize a .env file before python-dotenv reads it. + + python-dotenv does not handle corrupted lines where multiple + KEY=VALUE pairs are concatenated on a single line (missing newline). + This produces mangled values โ€” e.g. a bot token duplicated 8ร— + (see #8908). + + We delegate to ``hermes_cli.config._sanitize_env_lines`` which + already knows all valid Hermes env-var names and can split + concatenated lines correctly. + """ + if not path.exists(): + return + try: + from hermes_cli.config import _sanitize_env_lines + except ImportError: + return # early bootstrap โ€” config module not available yet + + read_kw = {"encoding": "utf-8", "errors": "replace"} + try: + with open(path, **read_kw) as f: + original = f.readlines() + sanitized = _sanitize_env_lines(original) + if sanitized != original: + import tempfile + fd, tmp = tempfile.mkstemp( + dir=str(path.parent), suffix=".tmp", prefix=".env_" + ) + try: + with os.fdopen(fd, "w", encoding="utf-8") as f: + f.writelines(sanitized) + f.flush() + os.fsync(f.fileno()) + os.replace(tmp, path) + except BaseException: + try: + os.unlink(tmp) + except OSError: + pass + raise + except Exception: + pass # best-effort โ€” don't block gateway startup + + def load_hermes_dotenv( *, hermes_home: str | os.PathLike | None = None, @@ -34,6 +79,10 @@ def load_hermes_dotenv( user_env = home_path / ".env" project_env_path = Path(project_env) if project_env else None + # Fix corrupted .env files before python-dotenv parses them (#8908). + if user_env.exists(): + _sanitize_env_file_if_needed(user_env) + if user_env.exists(): _load_dotenv_with_fallback(user_env, override=True) loaded.append(user_env) diff --git a/hermes_cli/gateway.py b/hermes_cli/gateway.py index 908d8992a..c049c0f96 100644 --- a/hermes_cli/gateway.py +++ b/hermes_cli/gateway.py @@ -331,7 +331,7 @@ def is_linux() -> bool: return sys.platform.startswith('linux') -from hermes_constants import is_termux, is_wsl +from hermes_constants import is_container, is_termux, is_wsl def _wsl_systemd_operational() -> bool: @@ -353,7 +353,9 @@ def _wsl_systemd_operational() -> bool: def supports_systemd_services() -> bool: - if not is_linux() or is_termux(): + if not is_linux() or is_termux() or is_container(): + return False + if shutil.which("systemctl") is None: return False if is_wsl(): return _wsl_systemd_operational() @@ -483,6 +485,21 @@ def _journalctl_cmd(system: bool = False) -> list[str]: return ["journalctl"] if system else ["journalctl", "--user"] +def _run_systemctl(args: list[str], *, system: bool = False, **kwargs) -> subprocess.CompletedProcess: + """Run a systemctl command, raising RuntimeError if systemctl is missing. + + Defense-in-depth: callers are gated by ``supports_systemd_services()``, + but this ensures any future caller that bypasses the gate still gets a + clear error instead of a raw ``FileNotFoundError`` traceback. + """ + try: + return subprocess.run(_systemctl_cmd(system) + args, **kwargs) + except FileNotFoundError: + raise RuntimeError( + "systemctl is not available on this system" + ) from None + + def _service_scope_label(system: bool = False) -> str: return "system" if system else "user" @@ -751,14 +768,22 @@ def _remap_path_for_user(path: str, target_home_dir: str) -> str: /root/.hermes/hermes-agent -> /home/alice/.hermes/hermes-agent /opt/hermes -> /opt/hermes (kept as-is) + + Note: this function intentionally does NOT resolve symlinks. A venv's + ``bin/python`` is typically a symlink to the base interpreter (e.g. a + uv-managed CPython at ``~/.local/share/uv/python/.../python3.11``); + resolving that symlink swaps the unit's ``ExecStart`` to a bare Python + that has none of the venv's site-packages, so the service crashes on + the first ``import``. Keep the symlinked path so the venv activates + its own environment. Lexical expansion only via ``expanduser``. """ - current_home = Path.home().resolve() - resolved = Path(path).resolve() + current_home = Path.home() + p = Path(path).expanduser() try: - relative = resolved.relative_to(current_home) + relative = p.relative_to(current_home) return str(Path(target_home_dir) / relative) except ValueError: - return str(resolved) + return str(p) def _hermes_home_for_target_user(target_home_dir: str) -> str: @@ -929,7 +954,7 @@ def refresh_systemd_unit_if_needed(system: bool = False) -> bool: expected_user = _read_systemd_user_from_unit(unit_path) if system else None unit_path.write_text(generate_systemd_unit(system=system, run_as_user=expected_user), encoding="utf-8") - subprocess.run(_systemctl_cmd(system) + ["daemon-reload"], check=True, timeout=30) + _run_systemctl(["daemon-reload"], system=system, check=True, timeout=30) print(f"โ†ป Updated gateway {_service_scope_label(system)} service definition to match the current Hermes install") return True @@ -1025,7 +1050,7 @@ def systemd_install(force: bool = False, system: bool = False, run_as_user: str if not systemd_unit_is_current(system=system): print(f"โ†ป Repairing outdated {_service_scope_label(system)} systemd service at: {unit_path}") refresh_systemd_unit_if_needed(system=system) - subprocess.run(_systemctl_cmd(system) + ["enable", get_service_name()], check=True, timeout=30) + _run_systemctl(["enable", get_service_name()], system=system, check=True, timeout=30) print(f"โœ“ {_service_scope_label(system).capitalize()} service definition updated") return print(f"Service already installed at: {unit_path}") @@ -1036,8 +1061,8 @@ def systemd_install(force: bool = False, system: bool = False, run_as_user: str print(f"Installing {_service_scope_label(system)} systemd service to: {unit_path}") unit_path.write_text(generate_systemd_unit(system=system, run_as_user=run_as_user), encoding="utf-8") - subprocess.run(_systemctl_cmd(system) + ["daemon-reload"], check=True, timeout=30) - subprocess.run(_systemctl_cmd(system) + ["enable", get_service_name()], check=True, timeout=30) + _run_systemctl(["daemon-reload"], system=system, check=True, timeout=30) + _run_systemctl(["enable", get_service_name()], system=system, check=True, timeout=30) print() print(f"โœ“ {_service_scope_label(system).capitalize()} service installed and enabled!") @@ -1063,15 +1088,15 @@ def systemd_uninstall(system: bool = False): if system: _require_root_for_system_service("uninstall") - subprocess.run(_systemctl_cmd(system) + ["stop", get_service_name()], check=False, timeout=90) - subprocess.run(_systemctl_cmd(system) + ["disable", get_service_name()], check=False, timeout=30) + _run_systemctl(["stop", get_service_name()], system=system, check=False, timeout=90) + _run_systemctl(["disable", get_service_name()], system=system, check=False, timeout=30) unit_path = get_systemd_unit_path(system=system) if unit_path.exists(): unit_path.unlink() print(f"โœ“ Removed {unit_path}") - subprocess.run(_systemctl_cmd(system) + ["daemon-reload"], check=True, timeout=30) + _run_systemctl(["daemon-reload"], system=system, check=True, timeout=30) print(f"โœ“ {_service_scope_label(system).capitalize()} service uninstalled") @@ -1080,7 +1105,7 @@ def systemd_start(system: bool = False): if system: _require_root_for_system_service("start") refresh_systemd_unit_if_needed(system=system) - subprocess.run(_systemctl_cmd(system) + ["start", get_service_name()], check=True, timeout=30) + _run_systemctl(["start", get_service_name()], system=system, check=True, timeout=30) print(f"โœ“ {_service_scope_label(system).capitalize()} service started") @@ -1089,7 +1114,7 @@ def systemd_stop(system: bool = False): system = _select_systemd_scope(system) if system: _require_root_for_system_service("stop") - subprocess.run(_systemctl_cmd(system) + ["stop", get_service_name()], check=True, timeout=90) + _run_systemctl(["stop", get_service_name()], system=system, check=True, timeout=90) print(f"โœ“ {_service_scope_label(system).capitalize()} service stopped") @@ -1105,7 +1130,7 @@ def systemd_restart(system: bool = False): if pid is not None and _request_gateway_self_restart(pid): print(f"โœ“ {_service_scope_label(system).capitalize()} service restart requested") return - subprocess.run(_systemctl_cmd(system) + ["reload-or-restart", get_service_name()], check=True, timeout=90) + _run_systemctl(["reload-or-restart", get_service_name()], system=system, check=True, timeout=90) print(f"โœ“ {_service_scope_label(system).capitalize()} service restarted") @@ -1129,14 +1154,16 @@ def systemd_status(deep: bool = False, system: bool = False): print(f" Run: {'sudo ' if system else ''}hermes gateway restart{scope_flag} # auto-refreshes the unit") print() - subprocess.run( - _systemctl_cmd(system) + ["status", get_service_name(), "--no-pager"], + _run_systemctl( + ["status", get_service_name(), "--no-pager"], + system=system, capture_output=False, timeout=10, ) - result = subprocess.run( - _systemctl_cmd(system) + ["is-active", get_service_name()], + result = _run_systemctl( + ["is-active", get_service_name()], + system=system, capture_output=True, text=True, timeout=10, @@ -2100,12 +2127,6 @@ def _setup_dingtalk(): _setup_standard_platform(dingtalk_platform) -def _setup_feishu(): - """Configure Feishu / Lark via the standard platform setup.""" - feishu_platform = next(p for p in _PLATFORMS if p["key"] == "feishu") - _setup_standard_platform(feishu_platform) - - def _setup_wecom(): """Configure WeCom (Enterprise WeChat) via the standard platform setup.""" wecom_platform = next(p for p in _PLATFORMS if p["key"] == "wecom") @@ -2129,24 +2150,24 @@ def _is_service_running() -> bool: if user_unit_exists: try: - result = subprocess.run( - _systemctl_cmd(False) + ["is-active", get_service_name()], - capture_output=True, text=True, timeout=10, + result = _run_systemctl( + ["is-active", get_service_name()], + system=False, capture_output=True, text=True, timeout=10, ) if result.stdout.strip() == "active": return True - except subprocess.TimeoutExpired: + except (RuntimeError, subprocess.TimeoutExpired): pass if system_unit_exists: try: - result = subprocess.run( - _systemctl_cmd(True) + ["is-active", get_service_name()], - capture_output=True, text=True, timeout=10, + result = _run_systemctl( + ["is-active", get_service_name()], + system=True, capture_output=True, text=True, timeout=10, ) if result.stdout.strip() == "active": return True - except subprocess.TimeoutExpired: + except (RuntimeError, subprocess.TimeoutExpired): pass return False @@ -2290,6 +2311,178 @@ def _setup_weixin(): print_info(f" User ID: {user_id}") +def _setup_feishu(): + """Interactive setup for Feishu / Lark โ€” scan-to-create or manual credentials.""" + print() + print(color(" โ”€โ”€โ”€ ๐Ÿชฝ Feishu / Lark Setup โ”€โ”€โ”€", Colors.CYAN)) + + existing_app_id = get_env_value("FEISHU_APP_ID") + existing_secret = get_env_value("FEISHU_APP_SECRET") + if existing_app_id and existing_secret: + print() + print_success("Feishu / Lark is already configured.") + if not prompt_yes_no(" Reconfigure Feishu / Lark?", False): + return + + # โ”€โ”€ Choose setup method โ”€โ”€ + print() + method_choices = [ + "Scan QR code to create a new bot automatically (recommended)", + "Enter existing App ID and App Secret manually", + ] + method_idx = prompt_choice(" How would you like to set up Feishu / Lark?", method_choices, 0) + + credentials = None + used_qr = False + + if method_idx == 0: + # โ”€โ”€ QR scan-to-create โ”€โ”€ + try: + from gateway.platforms.feishu import qr_register + except Exception as exc: + print_error(f" Feishu / Lark onboard import failed: {exc}") + qr_register = None + + if qr_register is not None: + try: + credentials = qr_register() + except KeyboardInterrupt: + print() + print_warning(" Feishu / Lark setup cancelled.") + return + except Exception as exc: + print_warning(f" QR registration failed: {exc}") + if credentials: + used_qr = True + if not credentials: + print_info(" QR setup did not complete. Continuing with manual input.") + + # โ”€โ”€ Manual credential input โ”€โ”€ + if not credentials: + print() + print_info(" Go to https://open.feishu.cn/ (or https://open.larksuite.com/ for Lark)") + print_info(" Create an app, enable the Bot capability, and copy the credentials.") + print() + app_id = prompt(" App ID", password=False) + if not app_id: + print_warning(" Skipped โ€” Feishu / Lark won't work without an App ID.") + return + app_secret = prompt(" App Secret", password=True) + if not app_secret: + print_warning(" Skipped โ€” Feishu / Lark won't work without an App Secret.") + return + + domain_choices = ["feishu (China)", "lark (International)"] + domain_idx = prompt_choice(" Domain", domain_choices, 0) + domain = "lark" if domain_idx == 1 else "feishu" + + # Try to probe the bot with manual credentials + bot_name = None + try: + from gateway.platforms.feishu import probe_bot + bot_info = probe_bot(app_id, app_secret, domain) + if bot_info: + bot_name = bot_info.get("bot_name") + print_success(f" Credentials verified โ€” bot: {bot_name or 'unnamed'}") + else: + print_warning(" Could not verify bot connection. Credentials saved anyway.") + except Exception as exc: + print_warning(f" Credential verification skipped: {exc}") + + credentials = { + "app_id": app_id, + "app_secret": app_secret, + "domain": domain, + "open_id": None, + "bot_name": bot_name, + } + + # โ”€โ”€ Save core credentials โ”€โ”€ + app_id = credentials["app_id"] + app_secret = credentials["app_secret"] + domain = credentials.get("domain", "feishu") + open_id = credentials.get("open_id") + bot_name = credentials.get("bot_name") + + save_env_value("FEISHU_APP_ID", app_id) + save_env_value("FEISHU_APP_SECRET", app_secret) + save_env_value("FEISHU_DOMAIN", domain) + # Bot identity is resolved at runtime via _hydrate_bot_identity(). + + # โ”€โ”€ Connection mode โ”€โ”€ + if used_qr: + connection_mode = "websocket" + else: + print() + mode_choices = [ + "WebSocket (recommended โ€” no public URL needed)", + "Webhook (requires a reachable HTTP endpoint)", + ] + mode_idx = prompt_choice(" Connection mode", mode_choices, 0) + connection_mode = "webhook" if mode_idx == 1 else "websocket" + if connection_mode == "webhook": + print_info(" Webhook defaults: 127.0.0.1:8765/feishu/webhook") + print_info(" Override with FEISHU_WEBHOOK_HOST / FEISHU_WEBHOOK_PORT / FEISHU_WEBHOOK_PATH") + print_info(" For signature verification, set FEISHU_ENCRYPT_KEY and FEISHU_VERIFICATION_TOKEN") + save_env_value("FEISHU_CONNECTION_MODE", connection_mode) + + if bot_name: + print() + print_success(f" Bot created: {bot_name}") + + # โ”€โ”€ DM security policy โ”€โ”€ + print() + access_choices = [ + "Use DM pairing approval (recommended)", + "Allow all direct messages", + "Only allow listed user IDs", + ] + access_idx = prompt_choice(" How should direct messages be authorized?", access_choices, 0) + if access_idx == 0: + save_env_value("FEISHU_ALLOW_ALL_USERS", "false") + save_env_value("FEISHU_ALLOWED_USERS", "") + print_success(" DM pairing enabled.") + print_info(" Unknown users can request access; approve with `hermes pairing approve`.") + elif access_idx == 1: + save_env_value("FEISHU_ALLOW_ALL_USERS", "true") + save_env_value("FEISHU_ALLOWED_USERS", "") + print_warning(" Open DM access enabled for Feishu / Lark.") + else: + save_env_value("FEISHU_ALLOW_ALL_USERS", "false") + default_allow = open_id or "" + allowlist = prompt(" Allowed user IDs (comma-separated)", default_allow, password=False).replace(" ", "") + save_env_value("FEISHU_ALLOWED_USERS", allowlist) + print_success(" Allowlist saved.") + + # โ”€โ”€ Group policy โ”€โ”€ + print() + group_choices = [ + "Respond only when @mentioned in groups (recommended)", + "Disable group chats", + ] + group_idx = prompt_choice(" How should group chats be handled?", group_choices, 0) + if group_idx == 0: + save_env_value("FEISHU_GROUP_POLICY", "open") + print_info(" Group chats enabled (bot must be @mentioned).") + else: + save_env_value("FEISHU_GROUP_POLICY", "disabled") + print_info(" Group chats disabled.") + + # โ”€โ”€ Home channel โ”€โ”€ + print() + home_channel = prompt(" Home chat ID (optional, for cron/notifications)", password=False) + if home_channel: + save_env_value("FEISHU_HOME_CHANNEL", home_channel) + print_success(f" Home channel set to {home_channel}") + + print() + print_success("๐Ÿชฝ Feishu / Lark configured!") + print_info(f" App ID: {app_id}") + print_info(f" Domain: {domain}") + if bot_name: + print_info(f" Bot: {bot_name}") + + def _setup_signal(): """Interactive setup for Signal messenger.""" import shutil @@ -2467,6 +2660,8 @@ def gateway_setup(): _setup_signal() elif platform["key"] == "weixin": _setup_weixin() + elif platform["key"] == "feishu": + _setup_feishu() else: _setup_standard_platform(platform) @@ -2606,6 +2801,15 @@ def gateway_command(args): print(" tmux new -s hermes 'hermes gateway run' # persistent via tmux") print(" nohup hermes gateway run > ~/.hermes/logs/gateway.log 2>&1 & # background") sys.exit(1) + elif is_container(): + print("Service installation is not needed inside a Docker container.") + print("The container runtime is your service manager โ€” use Docker restart policies instead:") + print() + print(" docker run --restart unless-stopped ... # auto-restart on crash/reboot") + print(" docker restart # manual restart") + print() + print("To run the gateway: hermes gateway run") + sys.exit(0) else: print("Service installation not supported on this platform.") print("Run manually: hermes gateway run") @@ -2624,10 +2828,17 @@ def gateway_command(args): systemd_uninstall(system=system) elif is_macos(): launchd_uninstall() + elif is_container(): + print("Service uninstall is not applicable inside a Docker container.") + print("To stop the gateway, stop or remove the container:") + print() + print(" docker stop ") + print(" docker rm ") + sys.exit(0) else: print("Not supported on this platform.") sys.exit(1) - + elif subcmd == "start": system = getattr(args, 'system', False) if is_termux(): @@ -2648,10 +2859,19 @@ def gateway_command(args): print() print("To enable systemd: add systemd=true to /etc/wsl.conf and run 'wsl --shutdown' from PowerShell.") sys.exit(1) + elif is_container(): + print("Service start is not applicable inside a Docker container.") + print("The gateway runs as the container's main process.") + print() + print(" docker start # start a stopped container") + print(" docker restart # restart a running container") + print() + print("Or run the gateway directly: hermes gateway run") + sys.exit(0) else: print("Not supported on this platform.") sys.exit(1) - + elif subcmd == "stop": stop_all = getattr(args, 'all', False) system = getattr(args, 'system', False) diff --git a/hermes_cli/main.py b/hermes_cli/main.py index 8bef611b0..18826eaaa 100644 --- a/hermes_cli/main.py +++ b/hermes_cli/main.py @@ -1213,7 +1213,7 @@ def select_provider_and_model(args=None): from hermes_cli.auth import ( resolve_provider, AuthError, format_auth_error, ) - from hermes_cli.config import load_config, get_env_value + from hermes_cli.config import get_compatible_custom_providers, load_config, get_env_value config = load_config() current_model = config.get("model") @@ -1248,28 +1248,9 @@ def select_provider_and_model(args=None): if active == "openrouter" and get_env_value("OPENAI_BASE_URL"): active = "custom" - provider_labels = { - "openrouter": "OpenRouter", - "nous": "Nous Portal", - "openai-codex": "OpenAI Codex", - "qwen-oauth": "Qwen OAuth", - "copilot-acp": "GitHub Copilot ACP", - "copilot": "GitHub Copilot", - "anthropic": "Anthropic", - "gemini": "Google AI Studio", - "zai": "Z.AI / GLM", - "kimi-coding": "Kimi / Moonshot", - "minimax": "MiniMax", - "minimax-cn": "MiniMax (China)", - "opencode-zen": "OpenCode Zen", - "opencode-go": "OpenCode Go", - "ai-gateway": "AI Gateway", - "kilocode": "Kilo Code", - "alibaba": "Alibaba Cloud (DashScope)", - "huggingface": "Hugging Face", - "xiaomi": "Xiaomi MiMo", - "custom": "Custom endpoint", - } + from hermes_cli.models import CANONICAL_PROVIDERS, _PROVIDER_LABELS + + provider_labels = dict(_PROVIDER_LABELS) # derive from canonical list active_label = provider_labels.get(active, active) if active else "none" print() @@ -1278,37 +1259,13 @@ def select_provider_and_model(args=None): print() # Step 1: Provider selection โ€” top providers shown first, rest behind "More..." - top_providers = [ - ("nous", "Nous Portal (Nous Research subscription)"), - ("openrouter", "OpenRouter (100+ models, pay-per-use)"), - ("anthropic", "Anthropic (Claude models โ€” API key or Claude Code)"), - ("openai-codex", "OpenAI Codex"), - ("qwen-oauth", "Qwen OAuth (reuses local Qwen CLI login)"), - ("copilot", "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"), - ("huggingface", "Hugging Face Inference Providers (20+ open models)"), - ] - - extended_providers = [ - ("copilot-acp", "GitHub Copilot ACP (spawns `copilot --acp --stdio`)"), - ("gemini", "Google AI Studio (Gemini models โ€” OpenAI-compatible endpoint)"), - ("zai", "Z.AI / GLM (Zhipu AI direct API)"), - ("kimi-coding", "Kimi / Moonshot (Moonshot AI direct API)"), - ("minimax", "MiniMax (global direct API)"), - ("minimax-cn", "MiniMax China (domestic direct API)"), - ("kilocode", "Kilo Code (Kilo Gateway API)"), - ("opencode-zen", "OpenCode Zen (35+ curated models, pay-as-you-go)"), - ("opencode-go", "OpenCode Go (open models, $10/month subscription)"), - ("ai-gateway", "AI Gateway (Vercel โ€” 200+ models, pay-per-use)"), - ("alibaba", "Alibaba Cloud / DashScope Coding (Qwen + multi-provider)"), - ("xiaomi", "Xiaomi MiMo (MiMo-V2 models โ€” pro, omni, flash)"), - ] + # Derived from CANONICAL_PROVIDERS (single source of truth) + top_providers = [(p.slug, p.tui_desc) for p in CANONICAL_PROVIDERS if p.tier == "top"] + extended_providers = [(p.slug, p.tui_desc) for p in CANONICAL_PROVIDERS if p.tier == "extended"] def _named_custom_provider_map(cfg) -> dict[str, dict[str, str]]: - custom_providers_cfg = cfg.get("custom_providers") or [] custom_provider_map = {} - if not isinstance(custom_providers_cfg, list): - return custom_provider_map - for entry in custom_providers_cfg: + for entry in get_compatible_custom_providers(cfg): if not isinstance(entry, dict): continue name = (entry.get("name") or "").strip() @@ -1316,12 +1273,20 @@ def select_provider_and_model(args=None): if not name or not base_url: continue key = "custom:" + name.lower().replace(" ", "-") + provider_key = (entry.get("provider_key") or "").strip() + if provider_key: + try: + resolve_provider(provider_key) + except AuthError: + key = provider_key custom_provider_map[key] = { "name": name, "base_url": base_url, "api_key": entry.get("api_key", ""), + "key_env": entry.get("key_env", ""), "model": entry.get("model", ""), "api_mode": entry.get("api_mode", ""), + "provider_key": provider_key, } return custom_provider_map @@ -1371,7 +1336,8 @@ def select_provider_and_model(args=None): if selected_provider == "more": ext_ordered = list(extended_providers) ext_ordered.append(("custom", "Custom endpoint (enter URL manually)")) - if _custom_provider_map: + _has_saved_custom_list = isinstance(config.get("custom_providers"), list) and bool(config.get("custom_providers")) + if _has_saved_custom_list: ext_ordered.append(("remove-custom", "Remove a saved custom provider")) ext_ordered.append(("cancel", "Cancel")) @@ -1398,7 +1364,7 @@ def select_provider_and_model(args=None): _model_flow_copilot(config, current_model) elif selected_provider == "custom": _model_flow_custom(config) - elif selected_provider.startswith("custom:"): + elif selected_provider.startswith("custom:") or selected_provider in _custom_provider_map: provider_info = _named_custom_provider_map(load_config()).get(selected_provider) if provider_info is None: print( @@ -1413,7 +1379,7 @@ def select_provider_and_model(args=None): _model_flow_anthropic(config, current_model) elif selected_provider == "kimi-coding": _model_flow_kimi(config, current_model) - elif selected_provider in ("gemini", "zai", "minimax", "minimax-cn", "kilocode", "opencode-zen", "opencode-go", "ai-gateway", "alibaba", "huggingface", "xiaomi"): + elif selected_provider in ("gemini", "deepseek", "xai", "zai", "kimi-coding-cn", "minimax", "minimax-cn", "kilocode", "opencode-zen", "opencode-go", "ai-gateway", "alibaba", "huggingface", "xiaomi"): _model_flow_api_key_provider(config, selected_provider, current_model) # โ”€โ”€ Post-switch cleanup: clear stale OPENAI_BASE_URL โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ @@ -2083,7 +2049,9 @@ def _model_flow_named_custom(config, provider_info): name = provider_info["name"] base_url = provider_info["base_url"] api_key = provider_info.get("api_key", "") + key_env = provider_info.get("key_env", "") saved_model = provider_info.get("model", "") + provider_key = (provider_info.get("provider_key") or "").strip() print(f" Provider: {name}") print(f" URL: {base_url}") @@ -2166,10 +2134,15 @@ def _model_flow_named_custom(config, provider_info): if not isinstance(model, dict): model = {"default": model} if model else {} cfg["model"] = model - model["provider"] = "custom" - model["base_url"] = base_url - if api_key: - model["api_key"] = api_key + if provider_key: + model["provider"] = provider_key + model.pop("base_url", None) + model.pop("api_key", None) + else: + model["provider"] = "custom" + model["base_url"] = base_url + if api_key: + model["api_key"] = api_key # Apply api_mode from custom_providers entry, or clear stale value custom_api_mode = provider_info.get("api_mode", "") if custom_api_mode: @@ -2179,8 +2152,23 @@ def _model_flow_named_custom(config, provider_info): save_config(cfg) deactivate_provider() - # Save model name to the custom_providers entry for next time - _save_custom_provider(base_url, api_key, model_name) + # Persist the selected model back to whichever schema owns this endpoint. + if provider_key: + cfg = load_config() + providers_cfg = cfg.get("providers") + if isinstance(providers_cfg, dict): + provider_entry = providers_cfg.get(provider_key) + if isinstance(provider_entry, dict): + provider_entry["default_model"] = model_name + if api_key and not str(provider_entry.get("api_key", "") or "").strip(): + provider_entry["api_key"] = api_key + if key_env and not str(provider_entry.get("key_env", "") or "").strip(): + provider_entry["key_env"] = key_env + cfg["providers"] = providers_cfg + save_config(cfg) + else: + # Save model name to the custom_providers entry for next time + _save_custom_provider(base_url, api_key, model_name) print(f"\nโœ… Model set to: {model_name}") print(f" Provider: {name} ({base_url})") @@ -3048,6 +3036,12 @@ def cmd_dump(args): run_dump(args) +def cmd_debug(args): + """Debug tools (share report, etc.).""" + from hermes_cli.debug import run_debug + run_debug(args) + + def cmd_config(args): """Configuration management.""" from hermes_cli.config import config_command @@ -3056,8 +3050,12 @@ def cmd_config(args): def cmd_backup(args): """Back up Hermes home directory to a zip file.""" - from hermes_cli.backup import run_backup - run_backup(args) + if getattr(args, "quick", False): + from hermes_cli.backup import run_quick_backup + run_quick_backup(args) + else: + from hermes_cli.backup import run_backup + run_backup(args) def cmd_import(args): @@ -3184,6 +3182,44 @@ def _gateway_prompt(prompt_text: str, default: str = "", timeout: float = 300.0) return default +def _build_web_ui(web_dir: Path, *, fatal: bool = False) -> bool: + """Build the web UI frontend if npm is available. + + Args: + web_dir: Path to the ``web/`` source directory. + fatal: If True, print error guidance and return False on failure + instead of a soft warning (used by ``hermes web``). + + Returns True if the build succeeded or was skipped (no package.json). + """ + if not (web_dir / "package.json").exists(): + return True + import shutil + npm = shutil.which("npm") + if not npm: + if fatal: + print("Web UI frontend not built and npm is not available.") + print("Install Node.js, then run: cd web && npm install && npm run build") + return not fatal + print("โ†’ Building web UI...") + r1 = subprocess.run([npm, "install", "--silent"], cwd=web_dir, capture_output=True) + if r1.returncode != 0: + print(f" {'โœ—' if fatal else 'โš '} Web UI npm install failed" + + ("" if fatal else " (hermes web will not be available)")) + if fatal: + print(" Run manually: cd web && npm install && npm run build") + return False + r2 = subprocess.run([npm, "run", "build"], cwd=web_dir, capture_output=True) + if r2.returncode != 0: + print(f" {'โœ—' if fatal else 'โš '} Web UI build failed" + + ("" if fatal else " (hermes web will not be available)")) + if fatal: + print(" Run manually: cd web && npm install && npm run build") + return False + print(" โœ“ Web UI built") + return True + + def _update_via_zip(args): """Update Hermes Agent by downloading a ZIP archive. @@ -3278,8 +3314,9 @@ def _update_via_zip(args): check=True, ) _install_python_dependencies_with_optional_fallback(pip_cmd) - + _update_node_dependencies() + _build_web_ui(PROJECT_ROOT / "web") # Sync skills try: @@ -4055,6 +4092,7 @@ def cmd_update(args): _install_python_dependencies_with_optional_fallback(pip_cmd) _update_node_dependencies() + _build_web_ui(PROJECT_ROOT / "web") print() print("โœ“ Code updated!") @@ -4337,7 +4375,7 @@ def _coalesce_session_name_args(argv: list) -> list: "chat", "model", "gateway", "setup", "whatsapp", "login", "logout", "auth", "status", "cron", "doctor", "config", "pairing", "skills", "tools", "mcp", "sessions", "insights", "version", "update", "uninstall", - "profile", + "profile", "dashboard", } _SESSION_FLAGS = {"-c", "--continue", "-r", "--resume"} @@ -4487,18 +4525,24 @@ def cmd_profile(args): print(f' Add to your shell config (~/.bashrc or ~/.zshrc):') print(f' export PATH="$HOME/.local/bin:$PATH"') + # Profile dir for display + try: + profile_dir_display = "~/" + str(profile_dir.relative_to(Path.home())) + except ValueError: + profile_dir_display = str(profile_dir) + # Next steps print(f"\nNext steps:") print(f" {name} setup Configure API keys and model") print(f" {name} chat Start chatting") print(f" {name} gateway start Start the messaging gateway") if clone or clone_all: - try: - profile_dir_display = "~/" + str(profile_dir.relative_to(Path.home())) - except ValueError: - profile_dir_display = str(profile_dir) print(f"\n Edit {profile_dir_display}/.env for different API keys") print(f" Edit {profile_dir_display}/SOUL.md for different personality") + else: + print(f"\n โš  This profile has no API keys yet. Run '{name} setup' first,") + print(f" or it will inherit keys from your shell environment.") + print(f" Edit {profile_dir_display}/SOUL.md to customize personality") print() except (ValueError, FileExistsError, FileNotFoundError) as e: @@ -4609,6 +4653,27 @@ def cmd_profile(args): sys.exit(1) +def cmd_dashboard(args): + """Start the web UI server.""" + try: + import fastapi # noqa: F401 + import uvicorn # noqa: F401 + except ImportError: + print("Web UI dependencies not installed.") + print("Install them with: pip install hermes-agent[web]") + sys.exit(1) + + if not _build_web_ui(PROJECT_ROOT / "web", fatal=True): + sys.exit(1) + + from hermes_cli.web_server import start_server + start_server( + host=args.host, + port=args.port, + open_browser=not args.no_open, + ) + + def cmd_completion(args): """Print shell completion script.""" from hermes_cli.profiles import generate_bash_completion, generate_zsh_completion @@ -4674,6 +4739,7 @@ Examples: hermes logs -f Follow agent.log in real time hermes logs errors View errors.log hermes logs --since 1h Lines from the last hour + hermes debug share Upload debug report for support hermes update Update to latest version For more help on a command: @@ -4773,7 +4839,7 @@ For more help on a command: ) chat_parser.add_argument( "--provider", - choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "xiaomi"], + choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "kimi-coding-cn", "minimax", "minimax-cn", "kilocode", "xiaomi"], default=None, help="Inference provider (default: auto)" ) @@ -5229,6 +5295,43 @@ For more help on a command: ) dump_parser.set_defaults(func=cmd_dump) + # ========================================================================= + # debug command + # ========================================================================= + debug_parser = subparsers.add_parser( + "debug", + help="Debug tools โ€” upload logs and system info for support", + description="Debug utilities for Hermes Agent. Use 'hermes debug share' to " + "upload a debug report (system info + recent logs) to a paste " + "service and get a shareable URL.", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog="""\ +Examples: + hermes debug share Upload debug report and print URL + hermes debug share --lines 500 Include more log lines + hermes debug share --expire 30 Keep paste for 30 days + hermes debug share --local Print report locally (no upload) +""", + ) + debug_sub = debug_parser.add_subparsers(dest="debug_command") + share_parser = debug_sub.add_parser( + "share", + help="Upload debug report to a paste service and print a shareable URL", + ) + share_parser.add_argument( + "--lines", type=int, default=200, + help="Number of log lines to include per log file (default: 200)", + ) + share_parser.add_argument( + "--expire", type=int, default=7, + help="Paste expiry in days (default: 7)", + ) + share_parser.add_argument( + "--local", action="store_true", + help="Print the report locally instead of uploading", + ) + debug_parser.set_defaults(func=cmd_debug) + # ========================================================================= # backup command # ========================================================================= @@ -5236,12 +5339,22 @@ For more help on a command: "backup", help="Back up Hermes home directory to a zip file", description="Create a zip archive of your entire Hermes configuration, " - "skills, sessions, and data (excludes the hermes-agent codebase)" + "skills, sessions, and data (excludes the hermes-agent codebase). " + "Use --quick for a fast snapshot of just critical state files." ) backup_parser.add_argument( "-o", "--output", help="Output path for the zip file (default: ~/hermes-backup-.zip)" ) + backup_parser.add_argument( + "-q", "--quick", + action="store_true", + help="Quick snapshot: only critical state files (config, state.db, .env, auth, cron)" + ) + backup_parser.add_argument( + "-l", "--label", + help="Label for the snapshot (only used with --quick)" + ) backup_parser.set_defaults(func=cmd_backup) # ========================================================================= @@ -6082,6 +6195,19 @@ For more help on a command: ) completion_parser.set_defaults(func=cmd_completion) + # ========================================================================= + # dashboard command + # ========================================================================= + dashboard_parser = subparsers.add_parser( + "dashboard", + help="Start the web UI dashboard", + description="Launch the Hermes Agent web dashboard for managing config, API keys, and sessions", + ) + dashboard_parser.add_argument("--port", type=int, default=9119, help="Port (default 9119)") + dashboard_parser.add_argument("--host", default="127.0.0.1", help="Host (default 127.0.0.1)") + dashboard_parser.add_argument("--no-open", action="store_true", help="Don't open browser automatically") + dashboard_parser.set_defaults(func=cmd_dashboard) + # ========================================================================= # logs command # ========================================================================= diff --git a/hermes_cli/model_normalize.py b/hermes_cli/model_normalize.py index 68e8dc898..c391b0715 100644 --- a/hermes_cli/model_normalize.py +++ b/hermes_cli/model_normalize.py @@ -8,8 +8,9 @@ Different LLM providers expect model identifiers in different formats: hyphens: ``claude-sonnet-4-6``. - **Copilot** expects bare names *with* dots preserved: ``claude-sonnet-4.6``. -- **OpenCode Zen** follows the same dot-to-hyphen convention as - Anthropic: ``claude-sonnet-4-6``. +- **OpenCode Zen** preserves dots for GPT/GLM/Gemini/Kimi/MiniMax-style + model IDs, but Claude still uses hyphenated native names like + ``claude-sonnet-4-6``. - **OpenCode Go** preserves dots in model names: ``minimax-m2.7``. - **DeepSeek** only accepts two model identifiers: ``deepseek-chat`` and ``deepseek-reasoner``. @@ -67,7 +68,6 @@ _AGGREGATOR_PROVIDERS: frozenset[str] = frozenset({ # Providers that want bare names with dots replaced by hyphens. _DOT_TO_HYPHEN_PROVIDERS: frozenset[str] = frozenset({ "anthropic", - "opencode-zen", }) # Providers that want bare names with dots preserved. @@ -88,6 +88,7 @@ _AUTHORITATIVE_NATIVE_PROVIDERS: frozenset[str] = frozenset({ _MATCHING_PREFIX_STRIP_PROVIDERS: frozenset[str] = frozenset({ "zai", "kimi-coding", + "kimi-coding-cn", "minimax", "minimax-cn", "alibaba", @@ -329,6 +330,9 @@ def normalize_model_for_provider(model_input: str, target_provider: str) -> str: >>> normalize_model_for_provider("claude-sonnet-4.6", "opencode-zen") 'claude-sonnet-4-6' + >>> normalize_model_for_provider("minimax-m2.5-free", "opencode-zen") + 'minimax-m2.5-free' + >>> normalize_model_for_provider("deepseek-v3", "deepseek") 'deepseek-chat' @@ -351,7 +355,16 @@ def normalize_model_for_provider(model_input: str, target_provider: str) -> str: if provider in _AGGREGATOR_PROVIDERS: return _prepend_vendor(name) - # --- Anthropic / OpenCode: strip matching provider prefix, dots -> hyphens --- + # --- OpenCode Zen: Claude stays hyphenated; other models keep dots --- + if provider == "opencode-zen": + bare = _strip_matching_provider_prefix(name, provider) + if "/" in bare: + return bare + if bare.lower().startswith("claude-"): + return _dots_to_hyphens(bare) + return bare + + # --- Anthropic: strip matching provider prefix, dots -> hyphens --- if provider in _DOT_TO_HYPHEN_PROVIDERS: bare = _strip_matching_provider_prefix(name, provider) if "/" in bare: diff --git a/hermes_cli/model_switch.py b/hermes_cli/model_switch.py index a257de48b..7f49af74b 100644 --- a/hermes_cli/model_switch.py +++ b/hermes_cli/model_switch.py @@ -21,6 +21,7 @@ OpenRouter variant suffixes (``:free``, ``:extended``, ``:fast``). from __future__ import annotations import logging +import re from dataclasses import dataclass from typing import List, NamedTuple, Optional @@ -57,10 +58,36 @@ _HERMES_MODEL_WARNING = ( "(Claude, GPT, Gemini, DeepSeek, etc.)." ) +# Match only the real Nous Research Hermes 3 / Hermes 4 chat families. +# The previous substring check (`"hermes" in name.lower()`) false-positived on +# unrelated local Modelfiles like ``hermes-brain:qwen3-14b-ctx16k`` that just +# happen to carry "hermes" in their tag but are fully tool-capable. +# +# Positive examples the regex must match: +# NousResearch/Hermes-3-Llama-3.1-70B, hermes-4-405b, openrouter/hermes3:70b +# Negative examples it must NOT match: +# hermes-brain:qwen3-14b-ctx16k, qwen3:14b, claude-opus-4-6 +_NOUS_HERMES_NON_AGENTIC_RE = re.compile( + r"(?:^|[/:])hermes[-_ ]?[34](?:[-_.:]|$)", + re.IGNORECASE, +) + + +def is_nous_hermes_non_agentic(model_name: str) -> bool: + """Return True if *model_name* is a real Nous Hermes 3/4 chat model. + + Used to decide whether to surface the non-agentic warning at startup. + Callers in :mod:`cli.py` and here should go through this single helper + so the two sites don't drift. + """ + if not model_name: + return False + return bool(_NOUS_HERMES_NON_AGENTIC_RE.search(model_name)) + def _check_hermes_model_warning(model_name: str) -> str: - """Return a warning string if *model_name* looks like a Hermes LLM model.""" - if "hermes" in model_name.lower(): + """Return a warning string if *model_name* is a Nous Hermes 3/4 chat model.""" + if is_nous_hermes_non_agentic(model_name): return _HERMES_MODEL_WARNING return "" @@ -908,6 +935,65 @@ def list_authenticated_providers( seen_slugs.add(pid) seen_slugs.add(hermes_slug) + # --- 2b. Cross-check canonical provider list --- + # Catches providers that are in CANONICAL_PROVIDERS but weren't found + # in PROVIDER_TO_MODELS_DEV or HERMES_OVERLAYS (keeps /model in sync + # with `hermes model`). + try: + from hermes_cli.models import CANONICAL_PROVIDERS as _canon_provs + except ImportError: + _canon_provs = [] + + for _cp in _canon_provs: + if _cp.slug in seen_slugs: + continue + + # Check credentials via PROVIDER_REGISTRY (auth.py) + _cp_config = _auth_registry.get(_cp.slug) + _cp_has_creds = False + if _cp_config and _cp_config.api_key_env_vars: + _cp_has_creds = any(os.environ.get(ev) for ev in _cp_config.api_key_env_vars) + # Also check auth store and credential pool + if not _cp_has_creds: + try: + from hermes_cli.auth import _load_auth_store + _cp_store = _load_auth_store() + _cp_providers_store = _cp_store.get("providers", {}) + _cp_pool_store = _cp_store.get("credential_pool", {}) + if _cp_store and ( + _cp.slug in _cp_providers_store + or _cp.slug in _cp_pool_store + ): + _cp_has_creds = True + except Exception: + pass + if not _cp_has_creds: + try: + from agent.credential_pool import load_pool + _cp_pool = load_pool(_cp.slug) + if _cp_pool.has_credentials(): + _cp_has_creds = True + except Exception: + pass + + if not _cp_has_creds: + continue + + _cp_model_ids = curated.get(_cp.slug, []) + _cp_total = len(_cp_model_ids) + _cp_top = _cp_model_ids[:max_models] + + results.append({ + "slug": _cp.slug, + "name": _cp.label, + "is_current": _cp.slug == current_provider, + "is_user_defined": False, + "models": _cp_top, + "total_models": _cp_total, + "source": "canonical", + }) + seen_slugs.add(_cp.slug) + # --- 3. User-defined endpoints from config --- if user_providers and isinstance(user_providers, dict): for ep_name, ep_cfg in user_providers.items(): @@ -917,9 +1003,16 @@ def list_authenticated_providers( api_url = ep_cfg.get("api", "") or ep_cfg.get("url", "") or "" default_model = ep_cfg.get("default_model", "") + # Build models list from both default_model and full models array models_list = [] if default_model: models_list.append(default_model) + # Also include the full models list from config + cfg_models = ep_cfg.get("models", []) + if isinstance(cfg_models, list): + for m in cfg_models: + if m and m not in models_list: + models_list.append(m) # Try to probe /v1/models if URL is set (but don't block on it) # For now just show what we know from config diff --git a/hermes_cli/models.py b/hermes_cli/models.py index 964e1b522..eff360fab 100644 --- a/hermes_cli/models.py +++ b/hermes_cli/models.py @@ -12,7 +12,7 @@ import os import urllib.request import urllib.error from difflib import get_close_matches -from typing import Any, Optional +from typing import Any, NamedTuple, Optional COPILOT_BASE_URL = "https://api.githubcopilot.com" COPILOT_MODELS_URL = f"{COPILOT_BASE_URL}/models" @@ -70,13 +70,13 @@ def _codex_curated_models() -> list[str]: _PROVIDER_MODELS: dict[str, list[str]] = { "nous": [ + "xiaomi/mimo-v2-pro", "anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "anthropic/claude-sonnet-4.5", "anthropic/claude-haiku-4.5", "openai/gpt-5.4", "openai/gpt-5.4-mini", - "xiaomi/mimo-v2-pro", "openai/gpt-5.3-codex", "google/gemini-3-pro-preview", "google/gemini-3-flash-preview", @@ -130,6 +130,7 @@ _PROVIDER_MODELS: dict[str, list[str]] = { "gemma-4-26b-it", ], "zai": [ + "glm-5.1", "glm-5", "glm-5-turbo", "glm-4.7", @@ -157,6 +158,12 @@ _PROVIDER_MODELS: dict[str, list[str]] = { "kimi-k2-turbo-preview", "kimi-k2-0905-preview", ], + "kimi-coding-cn": [ + "kimi-k2.5", + "kimi-k2-thinking", + "kimi-k2-turbo-preview", + "kimi-k2-0905-preview", + ], "moonshot": [ "kimi-k2.5", "kimi-k2-thinking", @@ -478,29 +485,55 @@ def check_nous_free_tier() -> bool: return False # default to paid on error โ€” don't block users -_PROVIDER_LABELS = { - "openrouter": "OpenRouter", - "openai-codex": "OpenAI Codex", - "copilot-acp": "GitHub Copilot ACP", - "nous": "Nous Portal", - "copilot": "GitHub Copilot", - "gemini": "Google AI Studio", - "zai": "Z.AI / GLM", - "kimi-coding": "Kimi / Moonshot", - "minimax": "MiniMax", - "minimax-cn": "MiniMax (China)", - "anthropic": "Anthropic", - "deepseek": "DeepSeek", - "opencode-zen": "OpenCode Zen", - "opencode-go": "OpenCode Go", - "ai-gateway": "AI Gateway", - "kilocode": "Kilo Code", - "alibaba": "Alibaba Cloud (DashScope)", - "qwen-oauth": "Qwen OAuth (Portal)", - "huggingface": "Hugging Face", - "xiaomi": "Xiaomi MiMo", - "custom": "Custom endpoint", -} +# --------------------------------------------------------------------------- +# Canonical provider list โ€” single source of truth for provider identity. +# Every code path that lists, displays, or iterates providers derives from +# this list: hermes model, /model, /provider, list_authenticated_providers. +# +# Fields: +# slug โ€” internal provider ID (used in config.yaml, --provider flag) +# label โ€” short display name +# tier โ€” "top" (shown first) or "extended" (behind "More...") +# tui_desc โ€” longer description for the `hermes model` interactive picker +# --------------------------------------------------------------------------- + +class ProviderEntry(NamedTuple): + slug: str + label: str + tier: str # "top" or "extended" + tui_desc: str # detailed description for `hermes model` TUI + + +CANONICAL_PROVIDERS: list[ProviderEntry] = [ + # -- Top tier (shown by default) -- + ProviderEntry("nous", "Nous Portal", "top", "Nous Portal (Nous Research subscription)"), + ProviderEntry("openrouter", "OpenRouter", "top", "OpenRouter (100+ models, pay-per-use)"), + ProviderEntry("anthropic", "Anthropic", "top", "Anthropic (Claude models โ€” API key or Claude Code)"), + ProviderEntry("openai-codex", "OpenAI Codex", "top", "OpenAI Codex"), + ProviderEntry("qwen-oauth", "Qwen OAuth (Portal)", "top", "Qwen OAuth (reuses local Qwen CLI login)"), + ProviderEntry("copilot", "GitHub Copilot", "top", "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"), + ProviderEntry("huggingface", "Hugging Face", "top", "Hugging Face Inference Providers (20+ open models)"), + # -- Extended tier (behind "More..." in hermes model) -- + ProviderEntry("copilot-acp", "GitHub Copilot ACP", "extended", "GitHub Copilot ACP (spawns `copilot --acp --stdio`)"), + ProviderEntry("gemini", "Google AI Studio", "extended", "Google AI Studio (Gemini models โ€” OpenAI-compatible endpoint)"), + ProviderEntry("deepseek", "DeepSeek", "extended", "DeepSeek (DeepSeek-V3, R1, coder โ€” direct API)"), + ProviderEntry("xai", "xAI", "extended", "xAI (Grok models โ€” direct API)"), + ProviderEntry("zai", "Z.AI / GLM", "extended", "Z.AI / GLM (Zhipu AI direct API)"), + ProviderEntry("kimi-coding", "Kimi / Moonshot", "extended", "Kimi / Moonshot (Moonshot AI direct API)"), + ProviderEntry("kimi-coding-cn", "Kimi / Moonshot (China)", "extended", "Kimi / Moonshot China (Moonshot CN direct API)"), + ProviderEntry("minimax", "MiniMax", "extended", "MiniMax (global direct API)"), + ProviderEntry("minimax-cn", "MiniMax (China)", "extended", "MiniMax China (domestic direct API)"), + ProviderEntry("kilocode", "Kilo Code", "extended", "Kilo Code (Kilo Gateway API)"), + ProviderEntry("opencode-zen", "OpenCode Zen", "extended", "OpenCode Zen (35+ curated models, pay-as-you-go)"), + ProviderEntry("opencode-go", "OpenCode Go", "extended", "OpenCode Go (open models, $10/month subscription)"), + ProviderEntry("ai-gateway", "AI Gateway", "extended", "AI Gateway (Vercel โ€” 200+ models, pay-per-use)"), + ProviderEntry("alibaba", "Alibaba Cloud (DashScope)","extended", "Alibaba Cloud / DashScope Coding (Qwen + multi-provider)"), + ProviderEntry("xiaomi", "Xiaomi MiMo", "extended", "Xiaomi MiMo (MiMo-V2 models โ€” pro, omni, flash)"), +] + +# Derived dicts โ€” used throughout the codebase +_PROVIDER_LABELS = {p.slug: p.label for p in CANONICAL_PROVIDERS} +_PROVIDER_LABELS["custom"] = "Custom endpoint" # special case: not a named provider _PROVIDER_ALIASES = { "glm": "zai", @@ -518,6 +551,8 @@ _PROVIDER_ALIASES = { "google-ai-studio": "gemini", "kimi": "kimi-coding", "moonshot": "kimi-coding", + "kimi-cn": "kimi-coding-cn", + "moonshot-cn": "kimi-coding-cn", "minimax-china": "minimax-cn", "minimax_cn": "minimax-cn", "claude": "anthropic", @@ -543,6 +578,9 @@ _PROVIDER_ALIASES = { "huggingface-hub": "huggingface", "mimo": "xiaomi", "xiaomi-mimo": "xiaomi", + "grok": "xai", + "x-ai": "xai", + "x.ai": "xai", } @@ -835,23 +873,20 @@ def list_available_providers() -> list[dict[str, str]]: Each dict has ``id``, ``label``, and ``aliases``. Checks which providers have valid credentials configured. + + Derives the provider list from :data:`CANONICAL_PROVIDERS` (single + source of truth shared with ``hermes model``, ``/model``, etc.). """ - # Canonical providers in display order - _PROVIDER_ORDER = [ - "openrouter", "nous", "openai-codex", "copilot", "copilot-acp", - "gemini", "huggingface", - "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic", "alibaba", - "qwen-oauth", "xiaomi", - "opencode-zen", "opencode-go", - "ai-gateway", "deepseek", "custom", - ] + # Derive display order from canonical list + custom + provider_order = [p.slug for p in CANONICAL_PROVIDERS] + ["custom"] + # Build reverse alias map aliases_for: dict[str, list[str]] = {} for alias, canonical in _PROVIDER_ALIASES.items(): aliases_for.setdefault(canonical, []).append(alias) result = [] - for pid in _PROVIDER_ORDER: + for pid in provider_order: label = _PROVIDER_LABELS.get(pid, pid) alias_list = aliases_for.get(pid, []) # Check if this provider has credentials available diff --git a/hermes_cli/profiles.py b/hermes_cli/profiles.py index 6735ff0f0..1e9fcae00 100644 --- a/hermes_cli/profiles.py +++ b/hermes_cli/profiles.py @@ -459,6 +459,16 @@ def create_profile( dst.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(src, dst) + # Seed a default SOUL.md so the user has a file to customize immediately. + # Skipped when the profile already has one (from --clone / --clone-all). + soul_path = profile_dir / "SOUL.md" + if not soul_path.exists(): + try: + from hermes_cli.default_soul import DEFAULT_SOUL_MD + soul_path.write_text(DEFAULT_SOUL_MD, encoding="utf-8") + except Exception: + pass # best-effort โ€” don't fail profile creation over this + return profile_dir diff --git a/hermes_cli/providers.py b/hermes_cli/providers.py index a99763498..ee4beebe0 100644 --- a/hermes_cli/providers.py +++ b/hermes_cli/providers.py @@ -179,6 +179,7 @@ ALIASES: Dict[str, str] = { # kimi-for-coding (models.dev ID) "kimi": "kimi-for-coding", "kimi-coding": "kimi-for-coding", + "kimi-coding-cn": "kimi-for-coding", "moonshot": "kimi-for-coding", # minimax-cn diff --git a/hermes_cli/runtime_provider.py b/hermes_cli/runtime_provider.py index cd0b66722..54b9ae65c 100644 --- a/hermes_cli/runtime_provider.py +++ b/hermes_cli/runtime_provider.py @@ -26,7 +26,7 @@ from hermes_cli.auth import ( resolve_external_process_provider_credentials, has_usable_secret, ) -from hermes_cli.config import load_config +from hermes_cli.config import get_compatible_custom_providers, load_config from hermes_constants import OPENROUTER_BASE_URL @@ -275,14 +275,56 @@ def _get_named_custom_provider(requested_provider: str) -> Optional[Dict[str, An return None config = load_config() + + # First check providers: dict (new-style user-defined providers) + providers = config.get("providers") + if isinstance(providers, dict): + for ep_name, entry in providers.items(): + if not isinstance(entry, dict): + continue + # Match exact name or normalized name + name_norm = _normalize_custom_provider_name(ep_name) + # Resolve the API key from the env var name stored in key_env + key_env = str(entry.get("key_env", "") or "").strip() + resolved_api_key = os.getenv(key_env, "").strip() if key_env else "" + + if requested_norm in {ep_name, name_norm, f"custom:{name_norm}"}: + # Found match by provider key + base_url = entry.get("api") or entry.get("url") or entry.get("base_url") or "" + if base_url: + return { + "name": entry.get("name", ep_name), + "base_url": base_url.strip(), + "api_key": resolved_api_key, + "model": entry.get("default_model", ""), + } + # Also check the 'name' field if present + display_name = entry.get("name", "") + if display_name: + display_norm = _normalize_custom_provider_name(display_name) + if requested_norm in {display_name, display_norm, f"custom:{display_norm}"}: + # Found match by display name + base_url = entry.get("api") or entry.get("url") or entry.get("base_url") or "" + if base_url: + return { + "name": display_name, + "base_url": base_url.strip(), + "api_key": resolved_api_key, + "model": entry.get("default_model", ""), + } + + # Fall back to custom_providers: list (legacy format) custom_providers = config.get("custom_providers") - if not isinstance(custom_providers, list): - if isinstance(custom_providers, dict): - logger.warning( - "custom_providers in config.yaml is a dict, not a list. " - "Each entry must be prefixed with '-' in YAML. " - "Run 'hermes doctor' for details." - ) + if isinstance(custom_providers, dict): + logger.warning( + "custom_providers in config.yaml is a dict, not a list. " + "Each entry must be prefixed with '-' in YAML. " + "Run 'hermes doctor' for details." + ) + return None + + custom_providers = get_compatible_custom_providers(config) + if not custom_providers: return None for entry in custom_providers: @@ -294,13 +336,21 @@ def _get_named_custom_provider(requested_provider: str) -> Optional[Dict[str, An continue name_norm = _normalize_custom_provider_name(name) menu_key = f"custom:{name_norm}" - if requested_norm not in {name_norm, menu_key}: + provider_key = str(entry.get("provider_key", "") or "").strip() + provider_key_norm = _normalize_custom_provider_name(provider_key) if provider_key else "" + provider_menu_key = f"custom:{provider_key_norm}" if provider_key_norm else "" + if requested_norm not in {name_norm, menu_key, provider_key_norm, provider_menu_key}: continue result = { "name": name.strip(), "base_url": base_url.strip(), "api_key": str(entry.get("api_key", "") or "").strip(), } + key_env = str(entry.get("key_env", "") or "").strip() + if key_env: + result["key_env"] = key_env + if provider_key: + result["provider_key"] = provider_key api_mode = _parse_api_mode(entry.get("api_mode")) if api_mode: result["api_mode"] = api_mode @@ -342,6 +392,7 @@ def _resolve_named_custom_runtime( api_key_candidates = [ (explicit_api_key or "").strip(), str(custom_provider.get("api_key", "") or "").strip(), + os.getenv(str(custom_provider.get("key_env", "") or "").strip(), "").strip(), os.getenv("OPENAI_API_KEY", "").strip(), os.getenv("OPENROUTER_API_KEY", "").strip(), ] @@ -557,7 +608,7 @@ def _resolve_explicit_runtime( base_url = explicit_base_url if not base_url: - if provider == "kimi-coding": + if provider in ("kimi-coding", "kimi-coding-cn"): creds = resolve_api_key_provider_credentials(provider) base_url = creds.get("base_url", "").rstrip("/") else: diff --git a/hermes_cli/setup.py b/hermes_cli/setup.py index e12f7d1a7..996dc87da 100644 --- a/hermes_cli/setup.py +++ b/hermes_cli/setup.py @@ -104,8 +104,9 @@ _DEFAULT_PROVIDER_MODELS = { "gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.5-flash-lite", "gemma-4-31b-it", "gemma-4-26b-it", ], - "zai": ["glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"], + "zai": ["glm-5.1", "glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"], "kimi-coding": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"], + "kimi-coding-cn": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"], "minimax": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"], "minimax-cn": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"], "ai-gateway": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5", "google/gemini-3-flash"], @@ -815,6 +816,7 @@ def setup_model_provider(config: dict, *, quick: bool = False): "copilot-acp": "GitHub Copilot ACP", "zai": "Z.AI / GLM", "kimi-coding": "Kimi / Moonshot", + "kimi-coding-cn": "Kimi / Moonshot (China)", "minimax": "MiniMax", "minimax-cn": "MiniMax CN", "anthropic": "Anthropic", @@ -2232,6 +2234,7 @@ def setup_gateway(config: dict): from hermes_cli.gateway import ( _is_service_installed, _is_service_running, + supports_systemd_services, has_conflicting_systemd_units, install_linux_gateway_from_setup, print_systemd_scope_conflict_warning, @@ -2244,16 +2247,18 @@ def setup_gateway(config: dict): service_installed = _is_service_installed() service_running = _is_service_running() + supports_systemd = supports_systemd_services() + supports_service_manager = supports_systemd or _is_macos print() - if _is_linux and has_conflicting_systemd_units(): + if supports_systemd and has_conflicting_systemd_units(): print_systemd_scope_conflict_warning() print() if service_running: if prompt_yes_no(" Restart the gateway to pick up changes?", True): try: - if _is_linux: + if supports_systemd: systemd_restart() elif _is_macos: launchd_restart() @@ -2262,14 +2267,14 @@ def setup_gateway(config: dict): elif service_installed: if prompt_yes_no(" Start the gateway service?", True): try: - if _is_linux: + if supports_systemd: systemd_start() elif _is_macos: launchd_start() except Exception as e: print_error(f" Start failed: {e}") - elif _is_linux or _is_macos: - svc_name = "systemd" if _is_linux else "launchd" + elif supports_service_manager: + svc_name = "systemd" if supports_systemd else "launchd" if prompt_yes_no( f" Install the gateway as a {svc_name} service? (runs in background, starts on boot)", True, @@ -2277,7 +2282,7 @@ def setup_gateway(config: dict): try: installed_scope = None did_install = False - if _is_linux: + if supports_systemd: installed_scope, did_install = install_linux_gateway_from_setup(force=False) else: launchd_install(force=False) @@ -2285,7 +2290,7 @@ def setup_gateway(config: dict): print() if did_install and prompt_yes_no(" Start the service now?", True): try: - if _is_linux: + if supports_systemd: systemd_start(system=installed_scope == "system") elif _is_macos: launchd_start() @@ -2296,12 +2301,21 @@ def setup_gateway(config: dict): print_info(" You can try manually: hermes gateway install") else: print_info(" You can install later: hermes gateway install") - if _is_linux: + if supports_systemd: print_info(" Or as a boot-time service: sudo hermes gateway install --system") print_info(" Or run in foreground: hermes gateway") else: - print_info("Start the gateway to bring your bots online:") - print_info(" hermes gateway # Run in foreground") + from hermes_constants import is_container + if is_container(): + print_info("Start the gateway to bring your bots online:") + print_info(" hermes gateway run # Run as container main process") + print_info("") + print_info("For automatic restarts, use a Docker restart policy:") + print_info(" docker run --restart unless-stopped ...") + print_info(" docker restart # Manual restart") + else: + print_info("Start the gateway to bring your bots online:") + print_info(" hermes gateway # Run in foreground") print_info("โ”" * 50) diff --git a/hermes_cli/skills_hub.py b/hermes_cli/skills_hub.py index 5a61d0248..182cbf5fe 100644 --- a/hermes_cli/skills_hub.py +++ b/hermes_cli/skills_hub.py @@ -335,7 +335,23 @@ def do_install(identifier: str, category: str = "", force: bool = False, meta, bundle, _matched_source = _resolve_source_meta_and_bundle(identifier, sources) if not bundle: - c.print(f"[bold red]Error:[/] Could not fetch '{identifier}' from any source.\n") + # Check if any source hit GitHub API rate limit + rate_limited = any( + getattr(src, "is_rate_limited", False) + or getattr(getattr(src, "github", None), "is_rate_limited", False) + for src in sources + ) + c.print(f"[bold red]Error:[/] Could not fetch '{identifier}' from any source.") + if rate_limited: + c.print( + "[yellow]Hint:[/] GitHub API rate limit exhausted " + "(unauthenticated: 60 requests/hour).\n" + "Set [bold]GITHUB_TOKEN[/] in your .env or install the " + "[bold]gh[/] CLI and run [bold]gh auth login[/] " + "to raise the limit to 5,000/hr.\n" + ) + else: + c.print() return # Auto-detect category for official skills (e.g. "official/autonomous-ai-agents/blackbox") diff --git a/hermes_cli/status.py b/hermes_cli/status.py index c48c0008b..a7745d65f 100644 --- a/hermes_cli/status.py +++ b/hermes_cli/status.py @@ -346,23 +346,35 @@ def show_status(args): print(" Note: Android may stop background jobs when Termux is suspended") elif sys.platform.startswith('linux'): - try: - from hermes_cli.gateway import get_service_name - _gw_svc = get_service_name() - except Exception: - _gw_svc = "hermes-gateway" - try: - result = subprocess.run( - ["systemctl", "--user", "is-active", _gw_svc], - capture_output=True, - text=True, - timeout=5 - ) - is_active = result.stdout.strip() == "active" - except (FileNotFoundError, subprocess.TimeoutExpired): - is_active = False - print(f" Status: {check_mark(is_active)} {'running' if is_active else 'stopped'}") - print(" Manager: systemd (user)") + from hermes_constants import is_container + if is_container(): + # Docker/Podman: no systemd โ€” check for running gateway processes + try: + from hermes_cli.gateway import find_gateway_pids + gateway_pids = find_gateway_pids() + is_active = len(gateway_pids) > 0 + except Exception: + is_active = False + print(f" Status: {check_mark(is_active)} {'running' if is_active else 'stopped'}") + print(" Manager: docker (foreground)") + else: + try: + from hermes_cli.gateway import get_service_name + _gw_svc = get_service_name() + except Exception: + _gw_svc = "hermes-gateway" + try: + result = subprocess.run( + ["systemctl", "--user", "is-active", _gw_svc], + capture_output=True, + text=True, + timeout=5 + ) + is_active = result.stdout.strip() == "active" + except (FileNotFoundError, subprocess.TimeoutExpired): + is_active = False + print(f" Status: {check_mark(is_active)} {'running' if is_active else 'stopped'}") + print(" Manager: systemd (user)") elif sys.platform == 'darwin': from hermes_cli.gateway import get_launchd_label diff --git a/hermes_cli/web_server.py b/hermes_cli/web_server.py new file mode 100644 index 000000000..77053292e --- /dev/null +++ b/hermes_cli/web_server.py @@ -0,0 +1,1839 @@ +""" +Hermes Agent โ€” Web UI server. + +Provides a FastAPI backend serving the Vite/React frontend and REST API +endpoints for managing configuration, environment variables, and sessions. + +Usage: + python -m hermes_cli.main web # Start on http://127.0.0.1:9119 + python -m hermes_cli.main web --port 8080 +""" + +import asyncio +import json +import logging +import os +import secrets +import sys +import threading +import time +import urllib.parse +import urllib.request +from pathlib import Path +from typing import Any, Dict, List, Optional + +import yaml + +PROJECT_ROOT = Path(__file__).parent.parent.resolve() +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from hermes_cli import __version__, __release_date__ +from hermes_cli.config import ( + DEFAULT_CONFIG, + OPTIONAL_ENV_VARS, + get_config_path, + get_env_path, + get_hermes_home, + load_config, + load_env, + save_config, + save_env_value, + remove_env_value, + check_config_version, + redact_key, +) +from gateway.status import get_running_pid, read_runtime_status + +try: + from fastapi import FastAPI, HTTPException, Request + from fastapi.middleware.cors import CORSMiddleware + from fastapi.responses import FileResponse, JSONResponse + from fastapi.staticfiles import StaticFiles + from pydantic import BaseModel +except ImportError: + raise SystemExit( + "Web UI requires fastapi and uvicorn.\n" + "Run 'hermes web' to auto-install, or: pip install hermes-agent[web]" + ) + +WEB_DIST = Path(__file__).parent / "web_dist" +_log = logging.getLogger(__name__) + +app = FastAPI(title="Hermes Agent", version=__version__) + +# --------------------------------------------------------------------------- +# Session token for protecting sensitive endpoints (reveal). +# Generated fresh on every server start โ€” dies when the process exits. +# Injected into the SPA HTML so only the legitimate web UI can use it. +# --------------------------------------------------------------------------- +_SESSION_TOKEN = secrets.token_urlsafe(32) + +# Simple rate limiter for the reveal endpoint +_reveal_timestamps: List[float] = [] +_REVEAL_MAX_PER_WINDOW = 5 +_REVEAL_WINDOW_SECONDS = 30 + +# CORS: restrict to localhost origins only. The web UI is intended to run +# locally; binding to 0.0.0.0 with allow_origins=["*"] would let any website +# read/modify config and secrets. + +app.add_middleware( + CORSMiddleware, + allow_origin_regex=r"^https?://(localhost|127\.0\.0\.1)(:\d+)?$", + allow_methods=["*"], + allow_headers=["*"], +) + + +# --------------------------------------------------------------------------- +# Config schema โ€” auto-generated from DEFAULT_CONFIG +# --------------------------------------------------------------------------- + +# Manual overrides for fields that need select options or custom types +_SCHEMA_OVERRIDES: Dict[str, Dict[str, Any]] = { + "model": { + "type": "string", + "description": "Default model (e.g. anthropic/claude-sonnet-4.6)", + "category": "general", + }, + "terminal.backend": { + "type": "select", + "description": "Terminal execution backend", + "options": ["local", "docker", "ssh", "modal", "daytona", "singularity"], + }, + "terminal.modal_mode": { + "type": "select", + "description": "Modal sandbox mode", + "options": ["sandbox", "function"], + }, + "tts.provider": { + "type": "select", + "description": "Text-to-speech provider", + "options": ["edge", "elevenlabs", "openai", "neutts"], + }, + "stt.provider": { + "type": "select", + "description": "Speech-to-text provider", + "options": ["local", "openai", "mistral"], + }, + "display.skin": { + "type": "select", + "description": "CLI visual theme", + "options": ["default", "ares", "mono", "slate"], + }, + "display.resume_display": { + "type": "select", + "description": "How resumed sessions display history", + "options": ["minimal", "full", "off"], + }, + "display.busy_input_mode": { + "type": "select", + "description": "Input behavior while agent is running", + "options": ["queue", "interrupt", "block"], + }, + "memory.provider": { + "type": "select", + "description": "Memory provider plugin", + "options": ["builtin", "honcho"], + }, + "approvals.mode": { + "type": "select", + "description": "Dangerous command approval mode", + "options": ["ask", "yolo", "deny"], + }, + "context.engine": { + "type": "select", + "description": "Context management engine", + "options": ["default", "custom"], + }, + "human_delay.mode": { + "type": "select", + "description": "Simulated typing delay mode", + "options": ["off", "typing", "fixed"], + }, + "logging.level": { + "type": "select", + "description": "Log level for agent.log", + "options": ["DEBUG", "INFO", "WARNING", "ERROR"], + }, + "agent.service_tier": { + "type": "select", + "description": "API service tier (OpenAI/Anthropic)", + "options": ["", "auto", "default", "flex"], + }, + "delegation.reasoning_effort": { + "type": "select", + "description": "Reasoning effort for delegated subagents", + "options": ["", "low", "medium", "high"], + }, +} + +# Categories with fewer fields get merged into "general" to avoid tab sprawl. +_CATEGORY_MERGE: Dict[str, str] = { + "privacy": "security", + "context": "agent", + "skills": "agent", + "cron": "agent", + "network": "agent", + "checkpoints": "agent", + "approvals": "security", + "human_delay": "display", + "smart_model_routing": "agent", +} + +# Display order for tabs โ€” unlisted categories sort alphabetically after these. +_CATEGORY_ORDER = [ + "general", "agent", "terminal", "display", "delegation", + "memory", "compression", "security", "browser", "voice", + "tts", "stt", "logging", "discord", "auxiliary", +] + + +def _infer_type(value: Any) -> str: + """Infer a UI field type from a Python value.""" + if isinstance(value, bool): + return "boolean" + if isinstance(value, int): + return "number" + if isinstance(value, float): + return "number" + if isinstance(value, list): + return "list" + if isinstance(value, dict): + return "object" + return "string" + + +def _build_schema_from_config( + config: Dict[str, Any], + prefix: str = "", +) -> Dict[str, Dict[str, Any]]: + """Walk DEFAULT_CONFIG and produce a flat dot-path โ†’ field schema dict.""" + schema: Dict[str, Dict[str, Any]] = {} + for key, value in config.items(): + full_key = f"{prefix}.{key}" if prefix else key + + # Skip internal / version keys + if full_key in ("_config_version",): + continue + + # Category is the first path component for nested keys, or "general" + # for top-level scalar fields (model, toolsets, timezone, etc.). + if prefix: + category = prefix.split(".")[0] + elif isinstance(value, dict): + category = key + else: + category = "general" + + if isinstance(value, dict): + # Recurse into nested dicts + schema.update(_build_schema_from_config(value, full_key)) + else: + entry: Dict[str, Any] = { + "type": _infer_type(value), + "description": full_key.replace(".", " โ†’ ").replace("_", " ").title(), + "category": category, + } + # Apply manual overrides + if full_key in _SCHEMA_OVERRIDES: + entry.update(_SCHEMA_OVERRIDES[full_key]) + # Merge small categories + entry["category"] = _CATEGORY_MERGE.get(entry["category"], entry["category"]) + schema[full_key] = entry + return schema + + +CONFIG_SCHEMA = _build_schema_from_config(DEFAULT_CONFIG) + + +class ConfigUpdate(BaseModel): + config: dict + + +class EnvVarUpdate(BaseModel): + key: str + value: str + + +class EnvVarDelete(BaseModel): + key: str + + +class EnvVarReveal(BaseModel): + key: str + + +@app.get("/api/status") +async def get_status(): + current_ver, latest_ver = check_config_version() + + gateway_pid = get_running_pid() + gateway_running = gateway_pid is not None + + gateway_state = None + gateway_platforms: dict = {} + gateway_exit_reason = None + gateway_updated_at = None + configured_gateway_platforms: set[str] | None = None + try: + from gateway.config import load_gateway_config + + gateway_config = load_gateway_config() + configured_gateway_platforms = { + platform.value for platform in gateway_config.get_connected_platforms() + } + except Exception: + configured_gateway_platforms = None + + runtime = read_runtime_status() + if runtime: + gateway_state = runtime.get("gateway_state") + gateway_platforms = runtime.get("platforms") or {} + if configured_gateway_platforms is not None: + gateway_platforms = { + key: value + for key, value in gateway_platforms.items() + if key in configured_gateway_platforms + } + gateway_exit_reason = runtime.get("exit_reason") + gateway_updated_at = runtime.get("updated_at") + if not gateway_running: + gateway_state = gateway_state if gateway_state in ("stopped", "startup_failed") else "stopped" + gateway_platforms = {} + + active_sessions = 0 + try: + from hermes_state import SessionDB + db = SessionDB() + try: + sessions = db.list_sessions_rich(limit=50) + now = time.time() + active_sessions = sum( + 1 for s in sessions + if s.get("ended_at") is None + and (now - s.get("last_active", s.get("started_at", 0))) < 300 + ) + finally: + db.close() + except Exception: + pass + + return { + "version": __version__, + "release_date": __release_date__, + "hermes_home": str(get_hermes_home()), + "config_path": str(get_config_path()), + "env_path": str(get_env_path()), + "config_version": current_ver, + "latest_config_version": latest_ver, + "gateway_running": gateway_running, + "gateway_pid": gateway_pid, + "gateway_state": gateway_state, + "gateway_platforms": gateway_platforms, + "gateway_exit_reason": gateway_exit_reason, + "gateway_updated_at": gateway_updated_at, + "active_sessions": active_sessions, + } + + +@app.get("/api/sessions") +async def get_sessions(limit: int = 20, offset: int = 0): + try: + from hermes_state import SessionDB + db = SessionDB() + try: + sessions = db.list_sessions_rich(limit=limit, offset=offset) + total = db.session_count() + now = time.time() + for s in sessions: + s["is_active"] = ( + s.get("ended_at") is None + and (now - s.get("last_active", s.get("started_at", 0))) < 300 + ) + return {"sessions": sessions, "total": total, "limit": limit, "offset": offset} + finally: + db.close() + except Exception as e: + _log.exception("GET /api/sessions failed") + raise HTTPException(status_code=500, detail="Internal server error") + + +@app.get("/api/sessions/search") +async def search_sessions(q: str = "", limit: int = 20): + """Full-text search across session message content using FTS5.""" + if not q or not q.strip(): + return {"results": []} + try: + from hermes_state import SessionDB + db = SessionDB() + try: + # Auto-add prefix wildcards so partial words match + # e.g. "nimb" โ†’ "nimb*" matches "nimby" + # Preserve quoted phrases and existing wildcards as-is + import re + terms = [] + for token in re.findall(r'"[^"]*"|\S+', q.strip()): + if token.startswith('"') or token.endswith("*"): + terms.append(token) + else: + terms.append(token + "*") + prefix_query = " ".join(terms) + matches = db.search_messages(query=prefix_query, limit=limit) + # Group by session_id โ€” return unique sessions with their best snippet + seen: dict = {} + for m in matches: + sid = m["session_id"] + if sid not in seen: + seen[sid] = { + "session_id": sid, + "snippet": m.get("snippet", ""), + "role": m.get("role"), + "source": m.get("source"), + "model": m.get("model"), + "session_started": m.get("session_started"), + } + return {"results": list(seen.values())} + finally: + db.close() + except Exception: + _log.exception("GET /api/sessions/search failed") + raise HTTPException(status_code=500, detail="Search failed") + + +def _normalize_config_for_web(config: Dict[str, Any]) -> Dict[str, Any]: + """Normalize config for the web UI. + + Hermes supports ``model`` as either a bare string (``"anthropic/claude-sonnet-4"``) + or a dict (``{default: ..., provider: ..., base_url: ...}``). The schema is built + from DEFAULT_CONFIG where ``model`` is a string, but user configs often have the + dict form. Normalize to the string form so the frontend schema matches. + """ + config = dict(config) # shallow copy + model_val = config.get("model") + if isinstance(model_val, dict): + config["model"] = model_val.get("default", model_val.get("name", "")) + return config + + +@app.get("/api/config") +async def get_config(): + config = _normalize_config_for_web(load_config()) + # Strip internal keys that the frontend shouldn't see or send back + return {k: v for k, v in config.items() if not k.startswith("_")} + + +@app.get("/api/config/defaults") +async def get_defaults(): + return DEFAULT_CONFIG + + +@app.get("/api/config/schema") +async def get_schema(): + return {"fields": CONFIG_SCHEMA, "category_order": _CATEGORY_ORDER} + + +def _denormalize_config_from_web(config: Dict[str, Any]) -> Dict[str, Any]: + """Reverse _normalize_config_for_web before saving. + + Reconstructs ``model`` as a dict by reading the current on-disk config + to recover model subkeys (provider, base_url, api_mode, etc.) that were + stripped from the GET response. The frontend only sees model as a flat + string; the rest is preserved transparently. + """ + config = dict(config) + # Remove any _model_meta that might have leaked in (shouldn't happen + # with the stripped GET response, but be defensive) + config.pop("_model_meta", None) + + model_val = config.get("model") + if isinstance(model_val, str) and model_val: + # Read the current disk config to recover model subkeys + try: + disk_config = load_config() + disk_model = disk_config.get("model") + if isinstance(disk_model, dict): + # Preserve all subkeys, update default with the new value + disk_model["default"] = model_val + config["model"] = disk_model + except Exception: + pass # can't read disk config โ€” just use the string form + return config + + +@app.put("/api/config") +async def update_config(body: ConfigUpdate): + try: + save_config(_denormalize_config_from_web(body.config)) + return {"ok": True} + except Exception as e: + _log.exception("PUT /api/config failed") + raise HTTPException(status_code=500, detail="Internal server error") + + +@app.get("/api/auth/session-token") +async def get_session_token(): + """Return the ephemeral session token for this server instance. + + The token protects sensitive endpoints (reveal). It's served to the SPA + which stores it in memory โ€” it's never persisted and dies when the server + process exits. CORS already restricts this to localhost origins. + """ + return {"token": _SESSION_TOKEN} + + +@app.get("/api/env") +async def get_env_vars(): + env_on_disk = load_env() + result = {} + for var_name, info in OPTIONAL_ENV_VARS.items(): + value = env_on_disk.get(var_name) + result[var_name] = { + "is_set": bool(value), + "redacted_value": redact_key(value) if value else None, + "description": info.get("description", ""), + "url": info.get("url"), + "category": info.get("category", ""), + "is_password": info.get("password", False), + "tools": info.get("tools", []), + "advanced": info.get("advanced", False), + } + return result + + +@app.put("/api/env") +async def set_env_var(body: EnvVarUpdate): + try: + save_env_value(body.key, body.value) + return {"ok": True, "key": body.key} + except Exception as e: + _log.exception("PUT /api/env failed") + raise HTTPException(status_code=500, detail="Internal server error") + + +@app.delete("/api/env") +async def remove_env_var(body: EnvVarDelete): + try: + removed = remove_env_value(body.key) + if not removed: + raise HTTPException(status_code=404, detail=f"{body.key} not found in .env") + return {"ok": True, "key": body.key} + except HTTPException: + raise + except Exception as e: + _log.exception("DELETE /api/env failed") + raise HTTPException(status_code=500, detail="Internal server error") + + +@app.post("/api/env/reveal") +async def reveal_env_var(body: EnvVarReveal, request: Request): + """Return the real (unredacted) value of a single env var. + + Protected by: + - Ephemeral session token (generated per server start, injected into SPA) + - Rate limiting (max 5 reveals per 30s window) + - Audit logging + """ + # --- Token check --- + auth = request.headers.get("authorization", "") + if auth != f"Bearer {_SESSION_TOKEN}": + raise HTTPException(status_code=401, detail="Unauthorized") + + # --- Rate limit --- + now = time.time() + cutoff = now - _REVEAL_WINDOW_SECONDS + _reveal_timestamps[:] = [t for t in _reveal_timestamps if t > cutoff] + if len(_reveal_timestamps) >= _REVEAL_MAX_PER_WINDOW: + raise HTTPException(status_code=429, detail="Too many reveal requests. Try again shortly.") + _reveal_timestamps.append(now) + + # --- Reveal --- + env_on_disk = load_env() + value = env_on_disk.get(body.key) + if value is None: + raise HTTPException(status_code=404, detail=f"{body.key} not found in .env") + + _log.info("env/reveal: %s", body.key) + return {"key": body.key, "value": value} + + +# --------------------------------------------------------------------------- +# OAuth provider endpoints โ€” status + disconnect (Phase 1) +# --------------------------------------------------------------------------- +# +# Phase 1 surfaces *which OAuth providers exist* and whether each is +# connected, plus a disconnect button. The actual login flow (PKCE for +# Anthropic, device-code for Nous/Codex) still runs in the CLI for now; +# Phase 2 will add in-browser flows. For unconnected providers we return +# the canonical ``hermes auth add `` command so the dashboard +# can surface a one-click copy. + + +def _truncate_token(value: Optional[str], visible: int = 6) -> str: + """Return ``...XXXXXX`` (last N chars) for safe display in the UI. + + We never expose more than the trailing ``visible`` characters of an + OAuth access token. JWT prefixes (the part before the first dot) are + stripped first when present so the visible suffix is always part of + the signing region rather than a meaningless header chunk. + """ + if not value: + return "" + s = str(value) + if "." in s and s.count(".") >= 2: + # Looks like a JWT โ€” show the trailing piece of the signature only. + s = s.rsplit(".", 1)[-1] + if len(s) <= visible: + return s + return f"โ€ฆ{s[-visible:]}" + + +def _anthropic_oauth_status() -> Dict[str, Any]: + """Combined status across the three Anthropic credential sources we read. + + Hermes resolves Anthropic creds in this order at runtime: + 1. ``~/.hermes/.anthropic_oauth.json`` โ€” Hermes-managed PKCE flow + 2. ``~/.claude/.credentials.json`` โ€” Claude Code CLI credentials (auto) + 3. ``ANTHROPIC_TOKEN`` / ``ANTHROPIC_API_KEY`` env vars + The dashboard reports the highest-priority source that's actually present. + """ + try: + from agent.anthropic_adapter import ( + read_hermes_oauth_credentials, + read_claude_code_credentials, + _HERMES_OAUTH_FILE, + ) + except ImportError: + read_claude_code_credentials = None # type: ignore + read_hermes_oauth_credentials = None # type: ignore + _HERMES_OAUTH_FILE = None # type: ignore + + hermes_creds = None + if read_hermes_oauth_credentials: + try: + hermes_creds = read_hermes_oauth_credentials() + except Exception: + hermes_creds = None + if hermes_creds and hermes_creds.get("accessToken"): + return { + "logged_in": True, + "source": "hermes_pkce", + "source_label": f"Hermes PKCE ({_HERMES_OAUTH_FILE})", + "token_preview": _truncate_token(hermes_creds.get("accessToken")), + "expires_at": hermes_creds.get("expiresAt"), + "has_refresh_token": bool(hermes_creds.get("refreshToken")), + } + + cc_creds = None + if read_claude_code_credentials: + try: + cc_creds = read_claude_code_credentials() + except Exception: + cc_creds = None + if cc_creds and cc_creds.get("accessToken"): + return { + "logged_in": True, + "source": "claude_code", + "source_label": "Claude Code (~/.claude/.credentials.json)", + "token_preview": _truncate_token(cc_creds.get("accessToken")), + "expires_at": cc_creds.get("expiresAt"), + "has_refresh_token": bool(cc_creds.get("refreshToken")), + } + + env_token = os.getenv("ANTHROPIC_TOKEN") or os.getenv("CLAUDE_CODE_OAUTH_TOKEN") + if env_token: + return { + "logged_in": True, + "source": "env_var", + "source_label": "ANTHROPIC_TOKEN environment variable", + "token_preview": _truncate_token(env_token), + "expires_at": None, + "has_refresh_token": False, + } + return {"logged_in": False, "source": None} + + +def _claude_code_only_status() -> Dict[str, Any]: + """Surface Claude Code CLI credentials as their own provider entry. + + Independent of the Anthropic entry above so users can see whether their + Claude Code subscription tokens are actively flowing into Hermes even + when they also have a separate Hermes-managed PKCE login. + """ + try: + from agent.anthropic_adapter import read_claude_code_credentials + creds = read_claude_code_credentials() + except Exception: + creds = None + if creds and creds.get("accessToken"): + return { + "logged_in": True, + "source": "claude_code_cli", + "source_label": "~/.claude/.credentials.json", + "token_preview": _truncate_token(creds.get("accessToken")), + "expires_at": creds.get("expiresAt"), + "has_refresh_token": bool(creds.get("refreshToken")), + } + return {"logged_in": False, "source": None} + + +# Provider catalog. The order matters โ€” it's how we render the UI list. +# ``cli_command`` is what the dashboard surfaces as the copy-to-clipboard +# fallback while Phase 2 (in-browser flows) isn't built yet. +# ``flow`` describes the OAuth shape so the future modal can pick the +# right UI: ``pkce`` = open URL + paste callback code, ``device_code`` = +# show code + verification URL + poll, ``external`` = read-only (delegated +# to a third-party CLI like Claude Code or Qwen). +_OAUTH_PROVIDER_CATALOG: tuple[Dict[str, Any], ...] = ( + { + "id": "anthropic", + "name": "Anthropic (Claude API)", + "flow": "pkce", + "cli_command": "hermes auth add anthropic", + "docs_url": "https://docs.claude.com/en/api/getting-started", + "status_fn": _anthropic_oauth_status, + }, + { + "id": "claude-code", + "name": "Claude Code (subscription)", + "flow": "external", + "cli_command": "claude setup-token", + "docs_url": "https://docs.claude.com/en/docs/claude-code", + "status_fn": _claude_code_only_status, + }, + { + "id": "nous", + "name": "Nous Portal", + "flow": "device_code", + "cli_command": "hermes auth add nous", + "docs_url": "https://portal.nousresearch.com", + "status_fn": None, # dispatched via auth.get_nous_auth_status + }, + { + "id": "openai-codex", + "name": "OpenAI Codex (ChatGPT)", + "flow": "device_code", + "cli_command": "hermes auth add openai-codex", + "docs_url": "https://platform.openai.com/docs", + "status_fn": None, # dispatched via auth.get_codex_auth_status + }, + { + "id": "qwen-oauth", + "name": "Qwen (via Qwen CLI)", + "flow": "external", + "cli_command": "hermes auth add qwen-oauth", + "docs_url": "https://github.com/QwenLM/qwen-code", + "status_fn": None, # dispatched via auth.get_qwen_auth_status + }, +) + + +def _resolve_provider_status(provider_id: str, status_fn) -> Dict[str, Any]: + """Dispatch to the right status helper for an OAuth provider entry.""" + if status_fn is not None: + try: + return status_fn() + except Exception as e: + return {"logged_in": False, "error": str(e)} + try: + from hermes_cli import auth as hauth + if provider_id == "nous": + raw = hauth.get_nous_auth_status() + return { + "logged_in": bool(raw.get("logged_in")), + "source": "nous_portal", + "source_label": raw.get("portal_base_url") or "Nous Portal", + "token_preview": _truncate_token(raw.get("access_token")), + "expires_at": raw.get("access_expires_at"), + "has_refresh_token": bool(raw.get("has_refresh_token")), + } + if provider_id == "openai-codex": + raw = hauth.get_codex_auth_status() + return { + "logged_in": bool(raw.get("logged_in")), + "source": raw.get("source") or "openai_codex", + "source_label": raw.get("auth_mode") or "OpenAI Codex", + "token_preview": _truncate_token(raw.get("api_key")), + "expires_at": None, + "has_refresh_token": False, + "last_refresh": raw.get("last_refresh"), + } + if provider_id == "qwen-oauth": + raw = hauth.get_qwen_auth_status() + return { + "logged_in": bool(raw.get("logged_in")), + "source": "qwen_cli", + "source_label": raw.get("auth_store_path") or "Qwen CLI", + "token_preview": _truncate_token(raw.get("access_token")), + "expires_at": raw.get("expires_at"), + "has_refresh_token": bool(raw.get("has_refresh_token")), + } + except Exception as e: + return {"logged_in": False, "error": str(e)} + return {"logged_in": False} + + +@app.get("/api/providers/oauth") +async def list_oauth_providers(): + """Enumerate every OAuth-capable LLM provider with current status. + + Response shape (per provider): + id stable identifier (used in DELETE path) + name human label + flow "pkce" | "device_code" | "external" + cli_command fallback CLI command for users to run manually + docs_url external docs/portal link for the "Learn more" link + status: + logged_in bool โ€” currently has usable creds + source short slug ("hermes_pkce", "claude_code", ...) + source_label human-readable origin (file path, env var name) + token_preview last N chars of the token, never the full token + expires_at ISO timestamp string or null + has_refresh_token bool + """ + providers = [] + for p in _OAUTH_PROVIDER_CATALOG: + status = _resolve_provider_status(p["id"], p.get("status_fn")) + providers.append({ + "id": p["id"], + "name": p["name"], + "flow": p["flow"], + "cli_command": p["cli_command"], + "docs_url": p["docs_url"], + "status": status, + }) + return {"providers": providers} + + +@app.delete("/api/providers/oauth/{provider_id}") +async def disconnect_oauth_provider(provider_id: str, request: Request): + """Disconnect an OAuth provider. Token-protected (matches /env/reveal).""" + auth = request.headers.get("authorization", "") + if auth != f"Bearer {_SESSION_TOKEN}": + raise HTTPException(status_code=401, detail="Unauthorized") + + valid_ids = {p["id"] for p in _OAUTH_PROVIDER_CATALOG} + if provider_id not in valid_ids: + raise HTTPException( + status_code=400, + detail=f"Unknown provider: {provider_id}. " + f"Available: {', '.join(sorted(valid_ids))}", + ) + + # Anthropic and claude-code clear the same Hermes-managed PKCE file + # AND forget the Claude Code import. We don't touch ~/.claude/* directly + # โ€” that's owned by the Claude Code CLI; users can re-auth there if they + # want to undo a disconnect. + if provider_id in ("anthropic", "claude-code"): + try: + from agent.anthropic_adapter import _HERMES_OAUTH_FILE + if _HERMES_OAUTH_FILE.exists(): + _HERMES_OAUTH_FILE.unlink() + except Exception: + pass + # Also clear the credential pool entry if present. + try: + from hermes_cli.auth import clear_provider_auth + clear_provider_auth("anthropic") + except Exception: + pass + _log.info("oauth/disconnect: %s", provider_id) + return {"ok": True, "provider": provider_id} + + try: + from hermes_cli.auth import clear_provider_auth + cleared = clear_provider_auth(provider_id) + _log.info("oauth/disconnect: %s (cleared=%s)", provider_id, cleared) + return {"ok": bool(cleared), "provider": provider_id} + except Exception as e: + _log.exception("disconnect %s failed", provider_id) + raise HTTPException(status_code=500, detail=str(e)) + + +# --------------------------------------------------------------------------- +# OAuth Phase 2 โ€” in-browser PKCE & device-code flows +# --------------------------------------------------------------------------- +# +# Two flow shapes are supported: +# +# PKCE (Anthropic): +# 1. POST /api/providers/oauth/anthropic/start +# โ†’ server generates code_verifier + challenge, builds claude.ai +# authorize URL, stashes verifier in _oauth_sessions[session_id] +# โ†’ returns { session_id, flow: "pkce", auth_url } +# 2. UI opens auth_url in a new tab. User authorizes, copies code. +# 3. POST /api/providers/oauth/anthropic/submit { session_id, code } +# โ†’ server exchanges (code + verifier) โ†’ tokens at console.anthropic.com +# โ†’ persists to ~/.hermes/.anthropic_oauth.json AND credential pool +# โ†’ returns { ok: true, status: "approved" } +# +# Device code (Nous, OpenAI Codex): +# 1. POST /api/providers/oauth/{nous|openai-codex}/start +# โ†’ server hits provider's device-auth endpoint +# โ†’ gets { user_code, verification_url, device_code, interval, expires_in } +# โ†’ spawns background poller thread that polls the token endpoint +# every `interval` seconds until approved/expired +# โ†’ stores poll status in _oauth_sessions[session_id] +# โ†’ returns { session_id, flow: "device_code", user_code, +# verification_url, expires_in, poll_interval } +# 2. UI opens verification_url in a new tab and shows user_code. +# 3. UI polls GET /api/providers/oauth/{provider}/poll/{session_id} +# every 2s until status != "pending". +# 4. On "approved" the background thread has already saved creds; UI +# refreshes the providers list. +# +# Sessions are kept in-memory only (single-process FastAPI) and time out +# after 15 minutes. A periodic cleanup runs on each /start call to GC +# expired sessions so the dict doesn't grow without bound. + +_OAUTH_SESSION_TTL_SECONDS = 15 * 60 +_oauth_sessions: Dict[str, Dict[str, Any]] = {} +_oauth_sessions_lock = threading.Lock() + +# Import OAuth constants from canonical source instead of duplicating. +# Guarded so hermes web still starts if anthropic_adapter is unavailable; +# Phase 2 endpoints will return 501 in that case. +try: + from agent.anthropic_adapter import ( + _OAUTH_CLIENT_ID as _ANTHROPIC_OAUTH_CLIENT_ID, + _OAUTH_TOKEN_URL as _ANTHROPIC_OAUTH_TOKEN_URL, + _OAUTH_REDIRECT_URI as _ANTHROPIC_OAUTH_REDIRECT_URI, + _OAUTH_SCOPES as _ANTHROPIC_OAUTH_SCOPES, + _generate_pkce as _generate_pkce_pair, + ) + _ANTHROPIC_OAUTH_AVAILABLE = True +except ImportError: + _ANTHROPIC_OAUTH_AVAILABLE = False +_ANTHROPIC_OAUTH_AUTHORIZE_URL = "https://claude.ai/oauth/authorize" + + +def _gc_oauth_sessions() -> None: + """Drop expired sessions. Called opportunistically on /start.""" + cutoff = time.time() - _OAUTH_SESSION_TTL_SECONDS + with _oauth_sessions_lock: + stale = [sid for sid, sess in _oauth_sessions.items() if sess["created_at"] < cutoff] + for sid in stale: + _oauth_sessions.pop(sid, None) + + +def _new_oauth_session(provider_id: str, flow: str) -> tuple[str, Dict[str, Any]]: + """Create + register a new OAuth session, return (session_id, session_dict).""" + sid = secrets.token_urlsafe(16) + sess = { + "session_id": sid, + "provider": provider_id, + "flow": flow, + "created_at": time.time(), + "status": "pending", # pending | approved | denied | expired | error + "error_message": None, + } + with _oauth_sessions_lock: + _oauth_sessions[sid] = sess + return sid, sess + + +def _save_anthropic_oauth_creds(access_token: str, refresh_token: str, expires_at_ms: int) -> None: + """Persist Anthropic PKCE creds to both Hermes file AND credential pool. + + Mirrors what auth_commands.add_command does so the dashboard flow leaves + the system in the same state as ``hermes auth add anthropic``. + """ + from agent.anthropic_adapter import _HERMES_OAUTH_FILE + payload = { + "accessToken": access_token, + "refreshToken": refresh_token, + "expiresAt": expires_at_ms, + } + _HERMES_OAUTH_FILE.parent.mkdir(parents=True, exist_ok=True) + _HERMES_OAUTH_FILE.write_text(json.dumps(payload, indent=2), encoding="utf-8") + # Best-effort credential-pool insert. Failure here doesn't invalidate + # the file write โ€” pool registration only matters for the rotation + # strategy, not for runtime credential resolution. + try: + from agent.credential_pool import ( + PooledCredential, + load_pool, + AUTH_TYPE_OAUTH, + SOURCE_MANUAL, + ) + import uuid + pool = load_pool("anthropic") + # Avoid duplicate entries: delete any prior dashboard-issued OAuth entry + existing = [e for e in pool.entries() if getattr(e, "source", "").startswith(f"{SOURCE_MANUAL}:dashboard_pkce")] + for e in existing: + try: + pool.remove_entry(getattr(e, "id", "")) + except Exception: + pass + entry = PooledCredential( + provider="anthropic", + id=uuid.uuid4().hex[:6], + label="dashboard PKCE", + auth_type=AUTH_TYPE_OAUTH, + priority=0, + source=f"{SOURCE_MANUAL}:dashboard_pkce", + access_token=access_token, + refresh_token=refresh_token, + expires_at_ms=expires_at_ms, + ) + pool.add_entry(entry) + except Exception as e: + _log.warning("anthropic pool add (dashboard) failed: %s", e) + + +def _start_anthropic_pkce() -> Dict[str, Any]: + """Begin PKCE flow. Returns the auth URL the UI should open.""" + if not _ANTHROPIC_OAUTH_AVAILABLE: + raise HTTPException(status_code=501, detail="Anthropic OAuth not available (missing adapter)") + verifier, challenge = _generate_pkce_pair() + sid, sess = _new_oauth_session("anthropic", "pkce") + sess["verifier"] = verifier + sess["state"] = verifier # Anthropic round-trips verifier as state + params = { + "code": "true", + "client_id": _ANTHROPIC_OAUTH_CLIENT_ID, + "response_type": "code", + "redirect_uri": _ANTHROPIC_OAUTH_REDIRECT_URI, + "scope": _ANTHROPIC_OAUTH_SCOPES, + "code_challenge": challenge, + "code_challenge_method": "S256", + "state": verifier, + } + auth_url = f"{_ANTHROPIC_OAUTH_AUTHORIZE_URL}?{urllib.parse.urlencode(params)}" + return { + "session_id": sid, + "flow": "pkce", + "auth_url": auth_url, + "expires_in": _OAUTH_SESSION_TTL_SECONDS, + } + + +def _submit_anthropic_pkce(session_id: str, code_input: str) -> Dict[str, Any]: + """Exchange authorization code for tokens. Persists on success.""" + with _oauth_sessions_lock: + sess = _oauth_sessions.get(session_id) + if not sess or sess["provider"] != "anthropic" or sess["flow"] != "pkce": + raise HTTPException(status_code=404, detail="Unknown or expired session") + if sess["status"] != "pending": + return {"ok": False, "status": sess["status"], "message": sess.get("error_message")} + + # Anthropic's redirect callback page formats the code as `#`. + # Strip the state suffix if present (we already have the verifier server-side). + parts = code_input.strip().split("#", 1) + code = parts[0].strip() + if not code: + return {"ok": False, "status": "error", "message": "No code provided"} + state_from_callback = parts[1] if len(parts) > 1 else "" + + exchange_data = json.dumps({ + "grant_type": "authorization_code", + "client_id": _ANTHROPIC_OAUTH_CLIENT_ID, + "code": code, + "state": state_from_callback or sess["state"], + "redirect_uri": _ANTHROPIC_OAUTH_REDIRECT_URI, + "code_verifier": sess["verifier"], + }).encode() + req = urllib.request.Request( + _ANTHROPIC_OAUTH_TOKEN_URL, + data=exchange_data, + headers={ + "Content-Type": "application/json", + "User-Agent": "hermes-dashboard/1.0", + }, + method="POST", + ) + try: + with urllib.request.urlopen(req, timeout=20) as resp: + result = json.loads(resp.read().decode()) + except Exception as e: + sess["status"] = "error" + sess["error_message"] = f"Token exchange failed: {e}" + return {"ok": False, "status": "error", "message": sess["error_message"]} + + access_token = result.get("access_token", "") + refresh_token = result.get("refresh_token", "") + expires_in = int(result.get("expires_in") or 3600) + if not access_token: + sess["status"] = "error" + sess["error_message"] = "No access token returned" + return {"ok": False, "status": "error", "message": sess["error_message"]} + + expires_at_ms = int(time.time() * 1000) + (expires_in * 1000) + try: + _save_anthropic_oauth_creds(access_token, refresh_token, expires_at_ms) + except Exception as e: + sess["status"] = "error" + sess["error_message"] = f"Save failed: {e}" + return {"ok": False, "status": "error", "message": sess["error_message"]} + sess["status"] = "approved" + _log.info("oauth/pkce: anthropic login completed (session=%s)", session_id) + return {"ok": True, "status": "approved"} + + +async def _start_device_code_flow(provider_id: str) -> Dict[str, Any]: + """Initiate a device-code flow (Nous or OpenAI Codex). + + Calls the provider's device-auth endpoint via the existing CLI helpers, + then spawns a background poller. Returns the user-facing display fields + so the UI can render the verification page link + user code. + """ + from hermes_cli import auth as hauth + if provider_id == "nous": + from hermes_cli.auth import _request_device_code, PROVIDER_REGISTRY + import httpx + pconfig = PROVIDER_REGISTRY["nous"] + portal_base_url = ( + os.getenv("HERMES_PORTAL_BASE_URL") + or os.getenv("NOUS_PORTAL_BASE_URL") + or pconfig.portal_base_url + ).rstrip("/") + client_id = pconfig.client_id + scope = pconfig.scope + def _do_nous_device_request(): + with httpx.Client(timeout=httpx.Timeout(15.0), headers={"Accept": "application/json"}) as client: + return _request_device_code( + client=client, + portal_base_url=portal_base_url, + client_id=client_id, + scope=scope, + ) + device_data = await asyncio.get_event_loop().run_in_executor(None, _do_nous_device_request) + sid, sess = _new_oauth_session("nous", "device_code") + sess["device_code"] = str(device_data["device_code"]) + sess["interval"] = int(device_data["interval"]) + sess["expires_at"] = time.time() + int(device_data["expires_in"]) + sess["portal_base_url"] = portal_base_url + sess["client_id"] = client_id + threading.Thread( + target=_nous_poller, args=(sid,), daemon=True, name=f"oauth-poll-{sid[:6]}" + ).start() + return { + "session_id": sid, + "flow": "device_code", + "user_code": str(device_data["user_code"]), + "verification_url": str(device_data["verification_uri_complete"]), + "expires_in": int(device_data["expires_in"]), + "poll_interval": int(device_data["interval"]), + } + + if provider_id == "openai-codex": + # Codex uses fixed OpenAI device-auth endpoints; reuse the helper. + sid, _ = _new_oauth_session("openai-codex", "device_code") + # Use the helper but in a thread because it polls inline. + # We can't extract just the start step without refactoring auth.py, + # so we run the full helper in a worker and proxy the user_code + + # verification_url back via the session dict. The helper prints + # to stdout โ€” we capture nothing here, just status. + threading.Thread( + target=_codex_full_login_worker, args=(sid,), daemon=True, + name=f"oauth-codex-{sid[:6]}", + ).start() + # Block briefly until the worker has populated the user_code, OR error. + deadline = time.time() + 10 + while time.time() < deadline: + with _oauth_sessions_lock: + s = _oauth_sessions.get(sid) + if s and (s.get("user_code") or s["status"] != "pending"): + break + await asyncio.sleep(0.1) + with _oauth_sessions_lock: + s = _oauth_sessions.get(sid, {}) + if s.get("status") == "error": + raise HTTPException(status_code=500, detail=s.get("error_message") or "device-auth failed") + if not s.get("user_code"): + raise HTTPException(status_code=504, detail="device-auth timed out before returning a user code") + return { + "session_id": sid, + "flow": "device_code", + "user_code": s["user_code"], + "verification_url": s["verification_url"], + "expires_in": int(s.get("expires_in") or 900), + "poll_interval": int(s.get("interval") or 5), + } + + raise HTTPException(status_code=400, detail=f"Provider {provider_id} does not support device-code flow") + + +def _nous_poller(session_id: str) -> None: + """Background poller that drives a Nous device-code flow to completion.""" + from hermes_cli.auth import _poll_for_token, refresh_nous_oauth_from_state + from datetime import datetime, timezone + import httpx + with _oauth_sessions_lock: + sess = _oauth_sessions.get(session_id) + if not sess: + return + portal_base_url = sess["portal_base_url"] + client_id = sess["client_id"] + device_code = sess["device_code"] + interval = sess["interval"] + expires_in = max(60, int(sess["expires_at"] - time.time())) + try: + with httpx.Client(timeout=httpx.Timeout(15.0), headers={"Accept": "application/json"}) as client: + token_data = _poll_for_token( + client=client, + portal_base_url=portal_base_url, + client_id=client_id, + device_code=device_code, + expires_in=expires_in, + poll_interval=interval, + ) + # Same post-processing as _nous_device_code_login (mint agent key) + now = datetime.now(timezone.utc) + token_ttl = int(token_data.get("expires_in") or 0) + auth_state = { + "portal_base_url": portal_base_url, + "inference_base_url": token_data.get("inference_base_url"), + "client_id": client_id, + "scope": token_data.get("scope"), + "token_type": token_data.get("token_type", "Bearer"), + "access_token": token_data["access_token"], + "refresh_token": token_data.get("refresh_token"), + "obtained_at": now.isoformat(), + "expires_at": ( + datetime.fromtimestamp(now.timestamp() + token_ttl, tz=timezone.utc).isoformat() + if token_ttl else None + ), + "expires_in": token_ttl, + } + full_state = refresh_nous_oauth_from_state( + auth_state, min_key_ttl_seconds=300, timeout_seconds=15.0, + force_refresh=False, force_mint=True, + ) + # Save into credential pool same as auth_commands.py does + from agent.credential_pool import ( + PooledCredential, + load_pool, + AUTH_TYPE_OAUTH, + SOURCE_MANUAL, + ) + pool = load_pool("nous") + entry = PooledCredential.from_dict("nous", { + **full_state, + "label": "dashboard device_code", + "auth_type": AUTH_TYPE_OAUTH, + "source": f"{SOURCE_MANUAL}:dashboard_device_code", + "base_url": full_state.get("inference_base_url"), + }) + pool.add_entry(entry) + with _oauth_sessions_lock: + sess["status"] = "approved" + _log.info("oauth/device: nous login completed (session=%s)", session_id) + except Exception as e: + _log.warning("nous device-code poll failed (session=%s): %s", session_id, e) + with _oauth_sessions_lock: + sess["status"] = "error" + sess["error_message"] = str(e) + + +def _codex_full_login_worker(session_id: str) -> None: + """Run the complete OpenAI Codex device-code flow. + + Codex doesn't use the standard OAuth device-code endpoints; it has its + own ``/api/accounts/deviceauth/usercode`` (JSON body, returns + ``device_auth_id``) and ``/api/accounts/deviceauth/token`` (JSON body + polled until 200). On success the response carries an + ``authorization_code`` + ``code_verifier`` that get exchanged at + CODEX_OAUTH_TOKEN_URL with grant_type=authorization_code. + + The flow is replicated inline (rather than calling + _codex_device_code_login) because that helper prints/blocks/polls in a + single function โ€” we need to surface the user_code to the dashboard the + moment we receive it, well before polling completes. + """ + try: + import httpx + from hermes_cli.auth import ( + CODEX_OAUTH_CLIENT_ID, + CODEX_OAUTH_TOKEN_URL, + DEFAULT_CODEX_BASE_URL, + ) + issuer = "https://auth.openai.com" + + # Step 1: request device code + with httpx.Client(timeout=httpx.Timeout(15.0)) as client: + resp = client.post( + f"{issuer}/api/accounts/deviceauth/usercode", + json={"client_id": CODEX_OAUTH_CLIENT_ID}, + headers={"Content-Type": "application/json"}, + ) + if resp.status_code != 200: + raise RuntimeError(f"deviceauth/usercode returned {resp.status_code}") + device_data = resp.json() + user_code = device_data.get("user_code", "") + device_auth_id = device_data.get("device_auth_id", "") + poll_interval = max(3, int(device_data.get("interval", "5"))) + if not user_code or not device_auth_id: + raise RuntimeError("device-code response missing user_code or device_auth_id") + verification_url = f"{issuer}/codex/device" + with _oauth_sessions_lock: + sess = _oauth_sessions.get(session_id) + if not sess: + return + sess["user_code"] = user_code + sess["verification_url"] = verification_url + sess["device_auth_id"] = device_auth_id + sess["interval"] = poll_interval + sess["expires_in"] = 15 * 60 # OpenAI's effective limit + sess["expires_at"] = time.time() + sess["expires_in"] + + # Step 2: poll until authorized + deadline = time.time() + sess["expires_in"] + code_resp = None + with httpx.Client(timeout=httpx.Timeout(15.0)) as client: + while time.time() < deadline: + time.sleep(poll_interval) + poll = client.post( + f"{issuer}/api/accounts/deviceauth/token", + json={"device_auth_id": device_auth_id, "user_code": user_code}, + headers={"Content-Type": "application/json"}, + ) + if poll.status_code == 200: + code_resp = poll.json() + break + if poll.status_code in (403, 404): + continue # user hasn't authorized yet + raise RuntimeError(f"deviceauth/token poll returned {poll.status_code}") + + if code_resp is None: + with _oauth_sessions_lock: + sess["status"] = "expired" + sess["error_message"] = "Device code expired before approval" + return + + # Step 3: exchange authorization_code for tokens + authorization_code = code_resp.get("authorization_code", "") + code_verifier = code_resp.get("code_verifier", "") + if not authorization_code or not code_verifier: + raise RuntimeError("device-auth response missing authorization_code/code_verifier") + with httpx.Client(timeout=httpx.Timeout(15.0)) as client: + token_resp = client.post( + CODEX_OAUTH_TOKEN_URL, + data={ + "grant_type": "authorization_code", + "code": authorization_code, + "redirect_uri": f"{issuer}/deviceauth/callback", + "client_id": CODEX_OAUTH_CLIENT_ID, + "code_verifier": code_verifier, + }, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + if token_resp.status_code != 200: + raise RuntimeError(f"token exchange returned {token_resp.status_code}") + tokens = token_resp.json() + access_token = tokens.get("access_token", "") + refresh_token = tokens.get("refresh_token", "") + if not access_token: + raise RuntimeError("token exchange did not return access_token") + + # Persist via credential pool โ€” same shape as auth_commands.add_command + from agent.credential_pool import ( + PooledCredential, + load_pool, + AUTH_TYPE_OAUTH, + SOURCE_MANUAL, + ) + import uuid as _uuid + pool = load_pool("openai-codex") + base_url = ( + os.getenv("HERMES_CODEX_BASE_URL", "").strip().rstrip("/") + or DEFAULT_CODEX_BASE_URL + ) + entry = PooledCredential( + provider="openai-codex", + id=_uuid.uuid4().hex[:6], + label="dashboard device_code", + auth_type=AUTH_TYPE_OAUTH, + priority=0, + source=f"{SOURCE_MANUAL}:dashboard_device_code", + access_token=access_token, + refresh_token=refresh_token, + base_url=base_url, + ) + pool.add_entry(entry) + with _oauth_sessions_lock: + sess["status"] = "approved" + _log.info("oauth/device: openai-codex login completed (session=%s)", session_id) + except Exception as e: + _log.warning("codex device-code worker failed (session=%s): %s", session_id, e) + with _oauth_sessions_lock: + s = _oauth_sessions.get(session_id) + if s: + s["status"] = "error" + s["error_message"] = str(e) + + +@app.post("/api/providers/oauth/{provider_id}/start") +async def start_oauth_login(provider_id: str, request: Request): + """Initiate an OAuth login flow. Token-protected.""" + auth = request.headers.get("authorization", "") + if auth != f"Bearer {_SESSION_TOKEN}": + raise HTTPException(status_code=401, detail="Unauthorized") + _gc_oauth_sessions() + valid = {p["id"] for p in _OAUTH_PROVIDER_CATALOG} + if provider_id not in valid: + raise HTTPException(status_code=400, detail=f"Unknown provider {provider_id}") + catalog_entry = next(p for p in _OAUTH_PROVIDER_CATALOG if p["id"] == provider_id) + if catalog_entry["flow"] == "external": + raise HTTPException( + status_code=400, + detail=f"{provider_id} uses an external CLI; run `{catalog_entry['cli_command']}` manually", + ) + try: + if catalog_entry["flow"] == "pkce": + return _start_anthropic_pkce() + if catalog_entry["flow"] == "device_code": + return await _start_device_code_flow(provider_id) + except HTTPException: + raise + except Exception as e: + _log.exception("oauth/start %s failed", provider_id) + raise HTTPException(status_code=500, detail=str(e)) + raise HTTPException(status_code=400, detail="Unsupported flow") + + +class OAuthSubmitBody(BaseModel): + session_id: str + code: str + + +@app.post("/api/providers/oauth/{provider_id}/submit") +async def submit_oauth_code(provider_id: str, body: OAuthSubmitBody, request: Request): + """Submit the auth code for PKCE flows. Token-protected.""" + auth = request.headers.get("authorization", "") + if auth != f"Bearer {_SESSION_TOKEN}": + raise HTTPException(status_code=401, detail="Unauthorized") + if provider_id == "anthropic": + return await asyncio.get_event_loop().run_in_executor( + None, _submit_anthropic_pkce, body.session_id, body.code, + ) + raise HTTPException(status_code=400, detail=f"submit not supported for {provider_id}") + + +@app.get("/api/providers/oauth/{provider_id}/poll/{session_id}") +async def poll_oauth_session(provider_id: str, session_id: str): + """Poll a device-code session's status (no auth โ€” read-only state).""" + with _oauth_sessions_lock: + sess = _oauth_sessions.get(session_id) + if not sess: + raise HTTPException(status_code=404, detail="Session not found or expired") + if sess["provider"] != provider_id: + raise HTTPException(status_code=400, detail="Provider mismatch for session") + return { + "session_id": session_id, + "status": sess["status"], + "error_message": sess.get("error_message"), + "expires_at": sess.get("expires_at"), + } + + +@app.delete("/api/providers/oauth/sessions/{session_id}") +async def cancel_oauth_session(session_id: str, request: Request): + """Cancel a pending OAuth session. Token-protected.""" + auth = request.headers.get("authorization", "") + if auth != f"Bearer {_SESSION_TOKEN}": + raise HTTPException(status_code=401, detail="Unauthorized") + with _oauth_sessions_lock: + sess = _oauth_sessions.pop(session_id, None) + if sess is None: + return {"ok": False, "message": "session not found"} + return {"ok": True, "session_id": session_id} + + +# --------------------------------------------------------------------------- +# Session detail endpoints +# --------------------------------------------------------------------------- + + +@app.get("/api/sessions/{session_id}") +async def get_session_detail(session_id: str): + from hermes_state import SessionDB + db = SessionDB() + try: + sid = db.resolve_session_id(session_id) + session = db.get_session(sid) if sid else None + if not session: + raise HTTPException(status_code=404, detail="Session not found") + return session + finally: + db.close() + + +@app.get("/api/sessions/{session_id}/messages") +async def get_session_messages(session_id: str): + from hermes_state import SessionDB + db = SessionDB() + try: + sid = db.resolve_session_id(session_id) + if not sid: + raise HTTPException(status_code=404, detail="Session not found") + messages = db.get_messages(sid) + return {"session_id": sid, "messages": messages} + finally: + db.close() + + +@app.delete("/api/sessions/{session_id}") +async def delete_session_endpoint(session_id: str): + from hermes_state import SessionDB + db = SessionDB() + try: + if not db.delete_session(session_id): + raise HTTPException(status_code=404, detail="Session not found") + return {"ok": True} + finally: + db.close() + + +# --------------------------------------------------------------------------- +# Log viewer endpoint +# --------------------------------------------------------------------------- + + +@app.get("/api/logs") +async def get_logs( + file: str = "agent", + lines: int = 100, + level: Optional[str] = None, + component: Optional[str] = None, + search: Optional[str] = None, +): + from hermes_cli.logs import _read_tail, LOG_FILES + + log_name = LOG_FILES.get(file) + if not log_name: + raise HTTPException(status_code=400, detail=f"Unknown log file: {file}") + log_path = get_hermes_home() / "logs" / log_name + if not log_path.exists(): + return {"file": file, "lines": []} + + try: + from hermes_logging import COMPONENT_PREFIXES + except ImportError: + COMPONENT_PREFIXES = {} + + # Normalize "ALL" / "all" / empty โ†’ no filter. _matches_filters treats an + # empty tuple as "must match a prefix" (startswith(()) is always False), + # so passing () instead of None silently drops every line. + min_level = level if level and level.upper() != "ALL" else None + if component and component.lower() != "all": + comp_prefixes = COMPONENT_PREFIXES.get(component) + if comp_prefixes is None: + raise HTTPException( + status_code=400, + detail=f"Unknown component: {component}. " + f"Available: {', '.join(sorted(COMPONENT_PREFIXES))}", + ) + else: + comp_prefixes = None + + has_filters = bool(min_level or comp_prefixes or search) + result = _read_tail( + log_path, min(lines, 500) if not search else 2000, + has_filters=has_filters, + min_level=min_level, + component_prefixes=comp_prefixes, + ) + # Post-filter by search term (case-insensitive substring match). + # _read_tail doesn't support free-text search, so we filter here and + # trim to the requested line count afterward. + if search: + needle = search.lower() + result = [l for l in result if needle in l.lower()][-min(lines, 500):] + return {"file": file, "lines": result} + + +# --------------------------------------------------------------------------- +# Cron job management endpoints +# --------------------------------------------------------------------------- + + +class CronJobCreate(BaseModel): + prompt: str + schedule: str + name: str = "" + deliver: str = "local" + + +class CronJobUpdate(BaseModel): + updates: dict + + +@app.get("/api/cron/jobs") +async def list_cron_jobs(): + from cron.jobs import list_jobs + return list_jobs(include_disabled=True) + + +@app.get("/api/cron/jobs/{job_id}") +async def get_cron_job(job_id: str): + from cron.jobs import get_job + job = get_job(job_id) + if not job: + raise HTTPException(status_code=404, detail="Job not found") + return job + + +@app.post("/api/cron/jobs") +async def create_cron_job(body: CronJobCreate): + from cron.jobs import create_job + try: + job = create_job(prompt=body.prompt, schedule=body.schedule, + name=body.name, deliver=body.deliver) + return job + except Exception as e: + _log.exception("POST /api/cron/jobs failed") + raise HTTPException(status_code=400, detail=str(e)) + + +@app.put("/api/cron/jobs/{job_id}") +async def update_cron_job(job_id: str, body: CronJobUpdate): + from cron.jobs import update_job + job = update_job(job_id, body.updates) + if not job: + raise HTTPException(status_code=404, detail="Job not found") + return job + + +@app.post("/api/cron/jobs/{job_id}/pause") +async def pause_cron_job(job_id: str): + from cron.jobs import pause_job + job = pause_job(job_id) + if not job: + raise HTTPException(status_code=404, detail="Job not found") + return job + + +@app.post("/api/cron/jobs/{job_id}/resume") +async def resume_cron_job(job_id: str): + from cron.jobs import resume_job + job = resume_job(job_id) + if not job: + raise HTTPException(status_code=404, detail="Job not found") + return job + + +@app.post("/api/cron/jobs/{job_id}/trigger") +async def trigger_cron_job(job_id: str): + from cron.jobs import trigger_job + job = trigger_job(job_id) + if not job: + raise HTTPException(status_code=404, detail="Job not found") + return job + + +@app.delete("/api/cron/jobs/{job_id}") +async def delete_cron_job(job_id: str): + from cron.jobs import remove_job + if not remove_job(job_id): + raise HTTPException(status_code=404, detail="Job not found") + return {"ok": True} + + +# --------------------------------------------------------------------------- +# Skills & Tools endpoints +# --------------------------------------------------------------------------- + + +class SkillToggle(BaseModel): + name: str + enabled: bool + + +@app.get("/api/skills") +async def get_skills(): + from tools.skills_tool import _find_all_skills + from hermes_cli.skills_config import get_disabled_skills + config = load_config() + disabled = get_disabled_skills(config) + skills = _find_all_skills(skip_disabled=True) + for s in skills: + s["enabled"] = s["name"] not in disabled + return skills + + +@app.put("/api/skills/toggle") +async def toggle_skill(body: SkillToggle): + from hermes_cli.skills_config import get_disabled_skills, save_disabled_skills + config = load_config() + disabled = get_disabled_skills(config) + if body.enabled: + disabled.discard(body.name) + else: + disabled.add(body.name) + save_disabled_skills(config, disabled) + return {"ok": True, "name": body.name, "enabled": body.enabled} + + +@app.get("/api/tools/toolsets") +async def get_toolsets(): + from hermes_cli.tools_config import ( + _get_effective_configurable_toolsets, + _get_platform_tools, + _toolset_has_keys, + ) + from toolsets import resolve_toolset + + config = load_config() + enabled_toolsets = _get_platform_tools( + config, + "cli", + include_default_mcp_servers=False, + ) + result = [] + for name, label, desc in _get_effective_configurable_toolsets(): + try: + tools = sorted(set(resolve_toolset(name))) + except Exception: + tools = [] + is_enabled = name in enabled_toolsets + result.append({ + "name": name, "label": label, "description": desc, + "enabled": is_enabled, + "available": is_enabled, + "configured": _toolset_has_keys(name, config), + "tools": tools, + }) + return result + + +# --------------------------------------------------------------------------- +# Raw YAML config endpoint +# --------------------------------------------------------------------------- + + +class RawConfigUpdate(BaseModel): + yaml_text: str + + +@app.get("/api/config/raw") +async def get_config_raw(): + path = get_config_path() + if not path.exists(): + return {"yaml": ""} + return {"yaml": path.read_text(encoding="utf-8")} + + +@app.put("/api/config/raw") +async def update_config_raw(body: RawConfigUpdate): + try: + parsed = yaml.safe_load(body.yaml_text) + if not isinstance(parsed, dict): + raise HTTPException(status_code=400, detail="YAML must be a mapping") + save_config(parsed) + return {"ok": True} + except yaml.YAMLError as e: + raise HTTPException(status_code=400, detail=f"Invalid YAML: {e}") + + +# --------------------------------------------------------------------------- +# Token / cost analytics endpoint +# --------------------------------------------------------------------------- + + +@app.get("/api/analytics/usage") +async def get_usage_analytics(days: int = 30): + from hermes_state import SessionDB + db = SessionDB() + try: + cutoff = time.time() - (days * 86400) + cur = db._conn.execute(""" + SELECT date(started_at, 'unixepoch') as day, + SUM(input_tokens) as input_tokens, + SUM(output_tokens) as output_tokens, + SUM(cache_read_tokens) as cache_read_tokens, + SUM(reasoning_tokens) as reasoning_tokens, + COALESCE(SUM(estimated_cost_usd), 0) as estimated_cost, + COALESCE(SUM(actual_cost_usd), 0) as actual_cost, + COUNT(*) as sessions + FROM sessions WHERE started_at > ? + GROUP BY day ORDER BY day + """, (cutoff,)) + daily = [dict(r) for r in cur.fetchall()] + + cur2 = db._conn.execute(""" + SELECT model, + SUM(input_tokens) as input_tokens, + SUM(output_tokens) as output_tokens, + COALESCE(SUM(estimated_cost_usd), 0) as estimated_cost, + COUNT(*) as sessions + FROM sessions WHERE started_at > ? AND model IS NOT NULL + GROUP BY model ORDER BY SUM(input_tokens) + SUM(output_tokens) DESC + """, (cutoff,)) + by_model = [dict(r) for r in cur2.fetchall()] + + cur3 = db._conn.execute(""" + SELECT SUM(input_tokens) as total_input, + SUM(output_tokens) as total_output, + SUM(cache_read_tokens) as total_cache_read, + SUM(reasoning_tokens) as total_reasoning, + COALESCE(SUM(estimated_cost_usd), 0) as total_estimated_cost, + COALESCE(SUM(actual_cost_usd), 0) as total_actual_cost, + COUNT(*) as total_sessions + FROM sessions WHERE started_at > ? + """, (cutoff,)) + totals = dict(cur3.fetchone()) + + return {"daily": daily, "by_model": by_model, "totals": totals, "period_days": days} + finally: + db.close() + + +def mount_spa(application: FastAPI): + """Mount the built SPA. Falls back to index.html for client-side routing.""" + if not WEB_DIST.exists(): + @application.get("/{full_path:path}") + async def no_frontend(full_path: str): + return JSONResponse( + {"error": "Frontend not built. Run: cd web && npm run build"}, + status_code=404, + ) + return + + application.mount("/assets", StaticFiles(directory=WEB_DIST / "assets"), name="assets") + + @application.get("/{full_path:path}") + async def serve_spa(full_path: str): + file_path = WEB_DIST / full_path + # Prevent path traversal via url-encoded sequences (%2e%2e/) + if ( + full_path + and file_path.resolve().is_relative_to(WEB_DIST.resolve()) + and file_path.exists() + and file_path.is_file() + ): + return FileResponse(file_path) + return FileResponse( + WEB_DIST / "index.html", + headers={"Cache-Control": "no-store, no-cache, must-revalidate"}, + ) + + +mount_spa(app) + + +def start_server(host: str = "127.0.0.1", port: int = 9119, open_browser: bool = True): + """Start the web UI server.""" + import uvicorn + + if host not in ("127.0.0.1", "localhost", "::1"): + import logging + logging.warning( + "Binding to %s โ€” the web UI exposes config and API keys. " + "Only bind to non-localhost if you trust all users on the network.", host, + ) + + if open_browser: + import threading + import webbrowser + + def _open(): + import time as _t + _t.sleep(1.0) + webbrowser.open(f"http://{host}:{port}") + + threading.Thread(target=_open, daemon=True).start() + + print(f" Hermes Web UI โ†’ http://{host}:{port}") + uvicorn.run(app, host=host, port=port, log_level="warning") diff --git a/hermes_constants.py b/hermes_constants.py index 4c2b95b42..adc9ea12c 100644 --- a/hermes_constants.py +++ b/hermes_constants.py @@ -190,6 +190,37 @@ def is_wsl() -> bool: return _wsl_detected +_container_detected: bool | None = None + + +def is_container() -> bool: + """Return True when running inside a Docker/Podman container. + + Checks ``/.dockerenv`` (Docker), ``/run/.containerenv`` (Podman), + and ``/proc/1/cgroup`` for container runtime markers. Result is + cached for the process lifetime. Import-safe โ€” no heavy deps. + """ + global _container_detected + if _container_detected is not None: + return _container_detected + if os.path.exists("/.dockerenv"): + _container_detected = True + return True + if os.path.exists("/run/.containerenv"): + _container_detected = True + return True + try: + with open("/proc/1/cgroup", "r") as f: + cgroup = f.read() + if "docker" in cgroup or "podman" in cgroup or "/lxc/" in cgroup: + _container_detected = True + return True + except OSError: + pass + _container_detected = False + return False + + # โ”€โ”€โ”€ Well-Known Paths โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ diff --git a/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py b/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py index 9b58eab59..beb32aba2 100644 --- a/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py +++ b/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py @@ -1995,7 +1995,9 @@ class Migrator: if compaction.get("timeout"): pass # No direct mapping if compaction.get("model"): - compression["summary_model"] = compaction["model"] + aux = hermes_cfg.setdefault("auxiliary", {}) + aux_comp = aux.setdefault("compression", {}) + aux_comp["model"] = compaction["model"] hermes_cfg["compression"] = compression changes = True diff --git a/package-lock.json b/package-lock.json index 476e6938c..de94d1467 100644 --- a/package-lock.json +++ b/package-lock.json @@ -33,12 +33,6 @@ "npm": ">=8" } }, - "node_modules/@appium/logger/node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "license": "MIT" - }, "node_modules/@askjo/camoufox-browser": { "version": "1.0.12", "resolved": "https://registry.npmjs.org/@askjo/camoufox-browser/-/camoufox-browser-1.0.12.tgz", @@ -76,6 +70,58 @@ "node": ">=12" } }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", @@ -126,6 +172,55 @@ "node": ">=18" } }, + "node_modules/@puppeteer/browsers/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@puppeteer/browsers/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/@puppeteer/browsers/node_modules/tar-fs": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.1.2.tgz", + "integrity": "sha512-QGxxTxxyleAdyM3kpFs14ymbYmNFrfY+pHj7Z8FgtbZ7w2//VAgLMac7sT6nRpIHjppXO2AwwEOg0bPFVRcmXw==", + "license": "MIT", + "dependencies": { + "pump": "^3.0.0", + "tar-stream": "^3.1.5" + }, + "optionalDependencies": { + "bare-fs": "^4.0.1", + "bare-path": "^3.0.0" + } + }, + "node_modules/@puppeteer/browsers/node_modules/tar-stream": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.8.tgz", + "integrity": "sha512-U6QpVRyCGHva435KoNWy9PRoi2IFYCgtEhq9nmrPPpbRacPs9IH4aJ3gbrFC8dPcXvdSZ4XXfXT5Fshbp2MtlQ==", + "license": "MIT", + "dependencies": { + "b4a": "^1.6.4", + "bare-fs": "^4.5.5", + "fast-fifo": "^1.2.0", + "streamx": "^2.15.0" + } + }, "node_modules/@sindresorhus/is": { "version": "4.6.0", "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", @@ -160,9 +255,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "20.19.33", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.33.tgz", - "integrity": "sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw==", + "version": "20.19.39", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.39.tgz", + "integrity": "sha512-orrrD74MBUyK8jOAD/r0+lfa1I2MO6I+vAkmAWzMYbCcgrN4lCrmK52gRFQq/JRxfYPfonkr4b0jcY7Olqdqbw==", "license": "MIT", "dependencies": { "undici-types": "~6.21.0" @@ -200,14 +295,14 @@ } }, "node_modules/@wdio/config": { - "version": "9.24.0", - "resolved": "https://registry.npmjs.org/@wdio/config/-/config-9.24.0.tgz", - "integrity": "sha512-rcHu0eG16rSEmHL0sEKDcr/vYFmGhQ5GOlmlx54r+1sgh6sf136q+kth4169s16XqviWGW3LjZbUfpTK29pGtw==", + "version": "9.27.0", + "resolved": "https://registry.npmjs.org/@wdio/config/-/config-9.27.0.tgz", + "integrity": "sha512-9y8z7ugIbU6ycKrA2SqCpKh1/hobut2rDq9CLt/BNVzSlebBBVOTMiAt1XroZzcPnA7/ZqpbkpOsbpPUaAQuNQ==", "license": "MIT", "dependencies": { "@wdio/logger": "9.18.0", - "@wdio/types": "9.24.0", - "@wdio/utils": "9.24.0", + "@wdio/types": "9.27.0", + "@wdio/utils": "9.27.0", "deepmerge-ts": "^7.0.3", "glob": "^10.2.2", "import-meta-resolve": "^4.0.0", @@ -217,6 +312,73 @@ "node": ">=18.20.0" } }, + "node_modules/@wdio/config/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/@wdio/config/node_modules/brace-expansion": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.1.0.tgz", + "integrity": "sha512-TN1kCZAgdgweJhWWpgKYrQaMNHcDULHkWwQIspdtjV4Y5aurRdZpjAqn6yX3FPqTA9ngHCc4hJxMAMgGfve85w==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@wdio/config/node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@wdio/config/node_modules/minimatch": { + "version": "9.0.9", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", + "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.2" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@wdio/config/node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/@wdio/logger": { "version": "9.18.0", "resolved": "https://registry.npmjs.org/@wdio/logger/-/logger-9.18.0.tgz", @@ -234,9 +396,9 @@ } }, "node_modules/@wdio/protocols": { - "version": "9.24.0", - "resolved": "https://registry.npmjs.org/@wdio/protocols/-/protocols-9.24.0.tgz", - "integrity": "sha512-ozQKYddBLT4TRvU9J+fGrhVUtx3iDAe+KNCJcTDMFMxNSdDMR2xFQdNp8HLHypspk58oXTYCvz6ZYjySthhqsw==", + "version": "9.27.0", + "resolved": "https://registry.npmjs.org/@wdio/protocols/-/protocols-9.27.0.tgz", + "integrity": "sha512-rIk69BsY1+6uU2PEN5FiRpI6K7HJ86YHzZRFBe4iRzKXQgGNk1zWzbdVJIuNFoOWsnmYUkK42KSSOT4Le6EmiQ==", "license": "MIT" }, "node_modules/@wdio/repl": { @@ -252,9 +414,9 @@ } }, "node_modules/@wdio/types": { - "version": "9.24.0", - "resolved": "https://registry.npmjs.org/@wdio/types/-/types-9.24.0.tgz", - "integrity": "sha512-PYYunNl8Uq1r8YMJAK6ReRy/V/XIrCSyj5cpCtR5EqCL6heETOORFj7gt4uPnzidfgbtMBcCru0LgjjlMiH1UQ==", + "version": "9.27.0", + "resolved": "https://registry.npmjs.org/@wdio/types/-/types-9.27.0.tgz", + "integrity": "sha512-DQJ+OdRBqUBcQ30DN2Z651hEVh3OoxnlDUSRqlWy9An2AY6v9rYWTj825B6zsj5pLLEToYO1tfwWq0ab183pXg==", "license": "MIT", "dependencies": { "@types/node": "^20.1.0" @@ -264,14 +426,14 @@ } }, "node_modules/@wdio/utils": { - "version": "9.24.0", - "resolved": "https://registry.npmjs.org/@wdio/utils/-/utils-9.24.0.tgz", - "integrity": "sha512-6WhtzC5SNCGRBTkaObX6A07Ofnnyyf+TQH/d/fuhZRqvBknrP4AMMZF+PFxGl1fwdySWdBn+gV2QLE+52Byowg==", + "version": "9.27.0", + "resolved": "https://registry.npmjs.org/@wdio/utils/-/utils-9.27.0.tgz", + "integrity": "sha512-fUasd5OKJTy2seJfWnYZ9xlxTtY0p/Kyeuh7Tbb8kcofBqmBi2fTvM3sfZlo1tGQX9yCh+IS2N7hlfyFMmuZ+w==", "license": "MIT", "dependencies": { "@puppeteer/browsers": "^2.2.0", "@wdio/logger": "9.18.0", - "@wdio/types": "9.24.0", + "@wdio/types": "9.27.0", "decamelize": "^6.0.0", "deepmerge-ts": "^7.0.3", "edgedriver": "^6.1.2", @@ -289,9 +451,9 @@ } }, "node_modules/@zip.js/zip.js": { - "version": "2.8.21", - "resolved": "https://registry.npmjs.org/@zip.js/zip.js/-/zip.js-2.8.21.tgz", - "integrity": "sha512-fkyzXISE3IMrstDO1AgPkJCx14MYHP/suIGiAovEYEuBjq3mffsuL6aMV7ohOSjW4rXtuACuUfpA3GtITgdtYg==", + "version": "2.8.26", + "resolved": "https://registry.npmjs.org/@zip.js/zip.js/-/zip.js-2.8.26.tgz", + "integrity": "sha512-RQ4h9F6DOiHxpdocUDrOl6xBM+yOtz+LkUol47AVWcfebGBDpZ7w7Xvz9PS24JgXvLGiXXzSAfdCdVy1tPlaFA==", "license": "BSD-3-Clause", "engines": { "bun": ">=0.7.0", @@ -372,12 +534,15 @@ } }, "node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, "engines": { - "node": ">=12" + "node": ">=8" }, "funding": { "url": "https://github.com/chalk/ansi-styles?sponsor=1" @@ -419,6 +584,165 @@ "node": ">= 14" } }, + "node_modules/archiver-utils/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/archiver-utils/node_modules/brace-expansion": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.1.0.tgz", + "integrity": "sha512-TN1kCZAgdgweJhWWpgKYrQaMNHcDULHkWwQIspdtjV4Y5aurRdZpjAqn6yX3FPqTA9ngHCc4hJxMAMgGfve85w==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/archiver-utils/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/archiver-utils/node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/archiver-utils/node_modules/minimatch": { + "version": "9.0.9", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", + "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.2" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/archiver-utils/node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/archiver-utils/node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/archiver/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/archiver/node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/archiver/node_modules/tar-stream": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.8.tgz", + "integrity": "sha512-U6QpVRyCGHva435KoNWy9PRoi2IFYCgtEhq9nmrPPpbRacPs9IH4aJ3gbrFC8dPcXvdSZ4XXfXT5Fshbp2MtlQ==", + "license": "MIT", + "dependencies": { + "b4a": "^1.6.4", + "bare-fs": "^4.5.5", + "fast-fifo": "^1.2.0", + "streamx": "^2.15.0" + } + }, "node_modules/aria-query": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", @@ -490,10 +814,13 @@ } }, "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "license": "MIT" + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } }, "node_modules/bare-events": { "version": "2.8.2", @@ -510,11 +837,10 @@ } }, "node_modules/bare-fs": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-4.5.4.tgz", - "integrity": "sha512-POK4oplfA7P7gqvetNmCs4CNtm9fNsx+IAh7jH7GgU0OJdge2rso0R20TNWVq6VoWcCvsTdlNDaleLHGaKx8CA==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-4.7.0.tgz", + "integrity": "sha512-xzqKsCFxAek9aezYhjJuJRXBIaYlg/0OGDTZp+T8eYmYMlm66cs6cYko02drIyjN2CBbi+I6L7YfXyqpqtKRXA==", "license": "Apache-2.0", - "optional": true, "dependencies": { "bare-events": "^2.5.4", "bare-path": "^3.0.0", @@ -535,11 +861,10 @@ } }, "node_modules/bare-os": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/bare-os/-/bare-os-3.6.2.tgz", - "integrity": "sha512-T+V1+1srU2qYNBmJCXZkUY5vQ0B4FSlL3QDROnKQYOqeiQR8UbjNHlPa+TIbM4cuidiN9GaTaOZgSEgsvPbh5A==", + "version": "3.8.7", + "resolved": "https://registry.npmjs.org/bare-os/-/bare-os-3.8.7.tgz", + "integrity": "sha512-G4Gr1UsGeEy2qtDTZwL7JFLo2wapUarz7iTMcYcMFdS89AIQuBoyjgXZz0Utv7uHs3xA9LckhVbeBi8lEQrC+w==", "license": "Apache-2.0", - "optional": true, "engines": { "bare": ">=1.14.0" } @@ -549,26 +874,28 @@ "resolved": "https://registry.npmjs.org/bare-path/-/bare-path-3.0.0.tgz", "integrity": "sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==", "license": "Apache-2.0", - "optional": true, "dependencies": { "bare-os": "^3.0.1" } }, "node_modules/bare-stream": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/bare-stream/-/bare-stream-2.8.0.tgz", - "integrity": "sha512-reUN0M2sHRqCdG4lUK3Fw8w98eeUIZHL5c3H7Mbhk2yVBL+oofgaIp0ieLfD5QXwPCypBpmEEKU2WZKzbAk8GA==", + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/bare-stream/-/bare-stream-2.13.0.tgz", + "integrity": "sha512-3zAJRZMDFGjdn+RVnNpF9kuELw+0Fl3lpndM4NcEOhb9zwtSo/deETfuIwMSE5BXanA0FrN1qVjffGwAg2Y7EA==", "license": "Apache-2.0", - "optional": true, "dependencies": { - "streamx": "^2.21.0", + "streamx": "^2.25.0", "teex": "^1.0.1" }, "peerDependencies": { + "bare-abort-controller": "*", "bare-buffer": "*", "bare-events": "*" }, "peerDependenciesMeta": { + "bare-abort-controller": { + "optional": true + }, "bare-buffer": { "optional": true }, @@ -578,11 +905,10 @@ } }, "node_modules/bare-url": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/bare-url/-/bare-url-2.3.2.tgz", - "integrity": "sha512-ZMq4gd9ngV5aTMa5p9+UfY0b3skwhHELaDkhEHetMdX0LRkW9kzaym4oo/Eh+Ghm0CCDuMTsRIGM/ytUc1ZYmw==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/bare-url/-/bare-url-2.4.0.tgz", + "integrity": "sha512-NSTU5WN+fy/L0DDenfE8SXQna4voXuW0FHM7wH8i3/q9khUSchfPbPezO4zSFMnDGIf9YE+mt/RWhZgNRKRIXA==", "license": "Apache-2.0", - "optional": true, "dependencies": { "bare-path": "^3.0.0" } @@ -608,9 +934,9 @@ "license": "MIT" }, "node_modules/baseline-browser-mapping": { - "version": "2.10.16", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.16.tgz", - "integrity": "sha512-Lyf3aK28zpsD1yQMiiHD4RvVb6UdMoo8xzG2XzFIfR9luPzOpcBlAsT/qfB1XWS1bxWT+UtE4WmQgsp297FYOA==", + "version": "2.10.18", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.18.tgz", + "integrity": "sha512-VSnGQAOLtP5mib/DPyg2/t+Tlv65NTBz83BJBJvmLVHHuKJVaDOBvJJykiT5TR++em5nfAySPccDZDa4oSrn8A==", "license": "Apache-2.0", "bin": { "baseline-browser-mapping": "dist/cli.cjs" @@ -620,18 +946,18 @@ } }, "node_modules/basic-ftp": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.2.0.tgz", - "integrity": "sha512-VoMINM2rqJwJgfdHq6RiUudKt2BV+FY5ZFezP/ypmwayk68+NzzAQy4XXLlqsGD4MCzq3DrmNFD/uUmBJuGoXw==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.2.2.tgz", + "integrity": "sha512-1tDrzKsdCg70WGvbFss/ulVAxupNauGnOlgpyjKzeQxzyllBLS0CGLV7tjIXTK3ZQA9/FBEm9qyFFN1bciA6pw==", "license": "MIT", "engines": { "node": ">=10.0.0" } }, "node_modules/better-sqlite3": { - "version": "12.8.0", - "resolved": "https://registry.npmjs.org/better-sqlite3/-/better-sqlite3-12.8.0.tgz", - "integrity": "sha512-RxD2Vd96sQDjQr20kdP+F+dK/1OUNiVOl200vKBZY8u0vTwysfolF6Hq+3ZK2+h8My9YvZhHsF+RSGZW2VYrPQ==", + "version": "12.9.0", + "resolved": "https://registry.npmjs.org/better-sqlite3/-/better-sqlite3-12.9.0.tgz", + "integrity": "sha512-wqUv4Gm3toFpHDQmaKD4QhZm3g1DjUBI0yzS4UBl6lElUmXFYdTQmmEDpAFa5o8FiFiymURypEnfVHzILKaxqQ==", "hasInstallScript": true, "license": "MIT", "dependencies": { @@ -662,44 +988,6 @@ "readable-stream": "^3.4.0" } }, - "node_modules/bl/node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "node_modules/bl/node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/bluebird": { "version": "3.7.2", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", @@ -730,33 +1018,6 @@ "npm": "1.2.8000 || >= 1.4.16" } }, - "node_modules/body-parser/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/body-parser/node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/body-parser/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, "node_modules/boolbase": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", @@ -764,12 +1025,15 @@ "license": "ISC" }, "node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz", + "integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==", "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0" + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" } }, "node_modules/browserslist": { @@ -806,9 +1070,9 @@ } }, "node_modules/buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", "funding": [ { "type": "github", @@ -826,7 +1090,7 @@ "license": "MIT", "dependencies": { "base64-js": "^1.3.1", - "ieee754": "^1.2.1" + "ieee754": "^1.1.13" } }, "node_modules/buffer-crc32": { @@ -919,97 +1183,10 @@ "playwright-core": "*" } }, - "node_modules/camoufox-js/node_modules/balanced-match": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", - "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", - "license": "MIT", - "engines": { - "node": "18 || 20 || >=22" - } - }, - "node_modules/camoufox-js/node_modules/brace-expansion": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz", - "integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==", - "license": "MIT", - "dependencies": { - "balanced-match": "^4.0.2" - }, - "engines": { - "node": "18 || 20 || >=22" - } - }, - "node_modules/camoufox-js/node_modules/commander": { - "version": "14.0.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.3.tgz", - "integrity": "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==", - "license": "MIT", - "engines": { - "node": ">=20" - } - }, - "node_modules/camoufox-js/node_modules/glob": { - "version": "13.0.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-13.0.6.tgz", - "integrity": "sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==", - "license": "BlueOak-1.0.0", - "dependencies": { - "minimatch": "^10.2.2", - "minipass": "^7.1.3", - "path-scurry": "^2.0.2" - }, - "engines": { - "node": "18 || 20 || >=22" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/camoufox-js/node_modules/lru-cache": { - "version": "11.3.2", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.3.2.tgz", - "integrity": "sha512-wgWa6FWQ3QRRJbIjbsldRJZxdxYngT/dO0I5Ynmlnin8qy7tC6xYzbcJjtN4wHLXtkbVwHzk0C+OejVw1XM+DQ==", - "license": "BlueOak-1.0.0", - "engines": { - "node": "20 || >=22" - } - }, - "node_modules/camoufox-js/node_modules/minimatch": { - "version": "10.2.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz", - "integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==", - "license": "BlueOak-1.0.0", - "dependencies": { - "brace-expansion": "^5.0.5" - }, - "engines": { - "node": "18 || 20 || >=22" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/camoufox-js/node_modules/path-scurry": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.2.tgz", - "integrity": "sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==", - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^11.0.0", - "minipass": "^7.1.2" - }, - "engines": { - "node": "18 || 20 || >=22" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/caniuse-lite": { - "version": "1.0.30001786", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001786.tgz", - "integrity": "sha512-4oxTZEvqmLLrERwxO76yfKM7acZo310U+v4kqexI2TL1DkkUEMT8UijrxxcnVdxR3qkVf5awGRX+4Z6aPHVKrA==", + "version": "1.0.30001787", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001787.tgz", + "integrity": "sha512-mNcrMN9KeI68u7muanUpEejSLghOKlVhRqS/Za2IeyGllJ9I9otGpR9g3nsw7n4W378TE/LyIteA0+/FOZm4Kg==", "funding": [ { "type": "opencollective", @@ -1109,41 +1286,6 @@ "node": ">=8" } }, - "node_modules/cliui/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/cliui/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/cliui/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/cliui/node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -1156,23 +1298,6 @@ "node": ">=8" } }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, "node_modules/clone-deep": { "version": "0.2.4", "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-0.2.4.tgz", @@ -1208,12 +1333,12 @@ "license": "MIT" }, "node_modules/commander": { - "version": "9.5.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", - "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", + "version": "14.0.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.3.tgz", + "integrity": "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==", "license": "MIT", "engines": { - "node": "^12.20.0 || >=14" + "node": ">=20" } }, "node_modules/compress-commons": { @@ -1232,6 +1357,46 @@ "node": ">= 14" } }, + "node_modules/compress-commons/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/compress-commons/node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -1311,6 +1476,46 @@ "node": ">= 14" } }, + "node_modules/crc32-stream/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/crc32-stream/node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -1395,20 +1600,12 @@ } }, "node_modules/debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "license": "MIT", "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } + "ms": "2.0.0" } }, "node_modules/decamelize": { @@ -1598,9 +1795,9 @@ } }, "node_modules/dotenv": { - "version": "17.4.1", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.4.1.tgz", - "integrity": "sha512-k8DaKGP6r1G30Lx8V4+pCsLzKr8vLmV2paqEj1Y55GdAgJuIqpRp5FfajGF8KtwMxCz9qJc6wUIJnm053d/WCw==", + "version": "17.4.2", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.4.2.tgz", + "integrity": "sha512-nI4U3TottKAcAD9LLud4Cb7b2QztQMUEfHbvhTH09bqXTxnSie8WnjPALV/WMCrJZ6UV/qHJ6L03OqO3LcdYZw==", "license": "BSD-2-Clause", "engines": { "node": ">=12" @@ -1720,15 +1917,15 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.331", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.331.tgz", - "integrity": "sha512-IbxXrsTlD3hRodkLnbxAPP4OuJYdWCeM3IOdT+CpcMoIwIoDfCmRpEtSPfwBXxVkg9xmBeY7Lz2Eo2TDn/HC3Q==", + "version": "1.5.335", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.335.tgz", + "integrity": "sha512-q9n5T4BR4Xwa2cwbrwcsDJtHD/enpQ5S1xF1IAtdqf5AAgqDFmR/aakqH3ChFdqd/QXJhS3rnnXFtexU7rax6Q==", "license": "ISC" }, "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "license": "MIT" }, "node_modules/encodeurl": { @@ -1753,6 +1950,18 @@ "url": "https://github.com/fb55/encoding-sniffer?sponsor=1" } }, + "node_modules/encoding-sniffer/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/end-of-stream": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", @@ -1962,21 +2171,6 @@ "url": "https://opencollective.com/express" } }, - "node_modules/express/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/express/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, "node_modules/extract-zip": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", @@ -1997,6 +2191,29 @@ "@types/yauzl": "^2.9.1" } }, + "node_modules/extract-zip/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/extract-zip/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/fast-deep-equal": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz", @@ -2025,9 +2242,9 @@ } }, "node_modules/fast-xml-parser": { - "version": "5.5.9", - "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-5.5.9.tgz", - "integrity": "sha512-jldvxr1MC6rtiZKgrFnDSvT8xuH+eJqxqOBThUVjYrxssYTo1avZLGql5l0a0BAERR01CadYzZ83kVEkbyDg+g==", + "version": "5.5.11", + "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-5.5.11.tgz", + "integrity": "sha512-QL0eb0YbSTVWF6tTf1+LEMSgtCEjBYPpnAjoLC8SscESlAjXEIRJ7cHtLG0pLeDFaZLa4VKZLArtA/60ZS7vyA==", "funding": [ { "type": "github", @@ -2037,8 +2254,8 @@ "license": "MIT", "dependencies": { "fast-xml-builder": "^1.1.4", - "path-expression-matcher": "^1.2.0", - "strnum": "^2.2.2" + "path-expression-matcher": "^1.4.0", + "strnum": "^2.2.3" }, "bin": { "fxparser": "src/cli/cli.js" @@ -2077,21 +2294,6 @@ "node": ">= 0.8" } }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/finalhandler/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, "node_modules/fingerprint-generator": { "version": "2.1.82", "resolved": "https://registry.npmjs.org/fingerprint-generator/-/fingerprint-generator-2.1.82.tgz", @@ -2275,9 +2477,9 @@ } }, "node_modules/get-port": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/get-port/-/get-port-7.1.0.tgz", - "integrity": "sha512-QB9NKEeDg3xxVwCCwJQ9+xycaz6pBB6iQ76wiWMl1927n0Kir6alPiP+yuiICLLU4jpMe08dXfpebuQppFA2zw==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/get-port/-/get-port-7.2.0.tgz", + "integrity": "sha512-afP4W205ONCuMoPBqcR6PSXnzX35KTcJygfJfcp+QY+uwm3p20p1YczWXhlICIzGMCxYBQcySEcOgsJcrkyobg==", "license": "MIT", "engines": { "node": ">=16" @@ -2328,6 +2530,29 @@ "node": ">= 14" } }, + "node_modules/get-uri/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/get-uri/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/github-from-package": { "version": "0.0.0", "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", @@ -2335,21 +2560,17 @@ "license": "MIT" }, "node_modules/glob": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", - "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", - "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", - "license": "ISC", + "version": "13.0.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-13.0.6.tgz", + "integrity": "sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==", + "license": "BlueOak-1.0.0", "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" + "minimatch": "^10.2.2", + "minipass": "^7.1.3", + "path-scurry": "^2.0.2" }, - "bin": { - "glob": "dist/esm/bin.mjs" + "engines": { + "node": "18 || 20 || >=22" }, "funding": { "url": "https://github.com/sponsors/isaacs" @@ -2497,6 +2718,29 @@ "node": ">= 14" } }, + "node_modules/http-proxy-agent/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/http-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/https-proxy-agent": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", @@ -2510,13 +2754,36 @@ "node": ">= 14" } }, - "node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "node_modules/https-proxy-agent/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", "license": "MIT", "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/https-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" }, "engines": { "node": ">=0.10.0" @@ -3049,9 +3316,9 @@ } }, "node_modules/lodash": { - "version": "4.17.23", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", - "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz", + "integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==", "license": "MIT" }, "node_modules/lodash.clonedeep": { @@ -3208,15 +3475,15 @@ } }, "node_modules/minimatch": { - "version": "9.0.9", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", - "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", - "license": "ISC", + "version": "10.2.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz", + "integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==", + "license": "BlueOak-1.0.0", "dependencies": { - "brace-expansion": "^2.0.2" + "brace-expansion": "^5.0.5" }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": "18 || 20 || >=22" }, "funding": { "url": "https://github.com/sponsors/isaacs" @@ -3285,18 +3552,18 @@ } }, "node_modules/modern-tar": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/modern-tar/-/modern-tar-0.7.4.tgz", - "integrity": "sha512-5ixBi7pY+H8z3MKExsipXPq6S/Q27KpSY0K+NnIyLQLr58mNeZVhT9TkYcqa74H52DabOyrmGLhT5D7TZ/x26Q==", + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/modern-tar/-/modern-tar-0.7.6.tgz", + "integrity": "sha512-sweCIVXzx1aIGTCdzcMlSZt1h8k5Tmk08VNAuRk3IU28XamGiOH5ypi11g6De2CH7PhYqSSnGy2A/EFhbWnVKg==", "license": "MIT", "engines": { "node": ">=18.0.0" } }, "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "license": "MIT" }, "node_modules/napi-build-utils": { @@ -3315,9 +3582,9 @@ } }, "node_modules/netmask": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.0.2.tgz", - "integrity": "sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.1.1.tgz", + "integrity": "sha512-eonl3sLUha+S1GzTPxychyhnUzKyeQkZ7jLjKrBagJgPla13F+uQ71HgpFefyHgqrjEbCPkDArxYsjY8/+gLKA==", "license": "MIT", "engines": { "node": ">= 0.4.0" @@ -3455,6 +3722,29 @@ "node": ">= 14" } }, + "node_modules/pac-proxy-agent/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/pac-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/pac-resolver": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.1.tgz", @@ -3539,9 +3829,9 @@ } }, "node_modules/path-expression-matcher": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/path-expression-matcher/-/path-expression-matcher-1.2.0.tgz", - "integrity": "sha512-DwmPWeFn+tq7TiyJ2CxezCAirXjFxvaiD03npak3cRjlP9+OjTmSy1EpIrEbh+l6JgUundniloMLDQ/6VTdhLQ==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/path-expression-matcher/-/path-expression-matcher-1.5.0.tgz", + "integrity": "sha512-cbrerZV+6rvdQrrD+iGMcZFEiiSrbv9Tfdkvnusy6y0x0GKBXREFg/Y65GhIfm0tnLntThhzCnfKwp1WRjeCyQ==", "funding": [ { "type": "github", @@ -3572,21 +3862,30 @@ } }, "node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.2.tgz", + "integrity": "sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==", "license": "BlueOak-1.0.0", "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + "lru-cache": "^11.0.0", + "minipass": "^7.1.2" }, "engines": { - "node": ">=16 || 14 >=14.18" + "node": "18 || 20 || >=22" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "11.3.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.3.3.tgz", + "integrity": "sha512-JvNw9Y81y33E+BEYPr0U7omo+U9AySnsMsEiXgwT6yqd31VQWTLNQqmT4ou5eqPFUrTfIDFta2wKhB1hyohtAQ==", + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, "node_modules/path-to-regexp": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz", @@ -3659,6 +3958,29 @@ } } }, + "node_modules/playwright-extra/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/playwright-extra/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/prebuild-install": { "version": "7.1.3", "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.3.tgz", @@ -3686,48 +4008,6 @@ "node": ">=10" } }, - "node_modules/prebuild-install/node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/prebuild-install/node_modules/tar-fs": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", - "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==", - "license": "MIT", - "dependencies": { - "chownr": "^1.1.1", - "mkdirp-classic": "^0.5.2", - "pump": "^3.0.0", - "tar-stream": "^2.1.4" - } - }, - "node_modules/prebuild-install/node_modules/tar-stream": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", - "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", - "license": "MIT", - "dependencies": { - "bl": "^4.0.3", - "end-of-stream": "^1.4.1", - "fs-constants": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.1.1" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -3784,6 +4064,23 @@ "node": ">= 14" } }, + "node_modules/proxy-agent/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, "node_modules/proxy-agent/node_modules/lru-cache": { "version": "7.18.3", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", @@ -3793,6 +4090,12 @@ "node": ">=12" } }, + "node_modules/proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", @@ -3800,9 +4103,9 @@ "license": "MIT" }, "node_modules/pump": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", - "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.4.tgz", + "integrity": "sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA==", "license": "MIT", "dependencies": { "end-of-stream": "^1.1.0", @@ -3861,6 +4164,29 @@ } } }, + "node_modules/puppeteer-extra-plugin-stealth/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/puppeteer-extra-plugin-stealth/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/puppeteer-extra-plugin-user-data-dir": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/puppeteer-extra-plugin-user-data-dir/-/puppeteer-extra-plugin-user-data-dir-2.4.1.tgz", @@ -3888,16 +4214,39 @@ } } }, + "node_modules/puppeteer-extra-plugin-user-data-dir/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, "node_modules/puppeteer-extra-plugin-user-data-dir/node_modules/brace-expansion": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", - "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.14.tgz", + "integrity": "sha512-MWPGfDxnyzKU7rNOW9SP/c50vi3xrmrua/+6hfPbCS2ABNWfx24vPidzvC7krjU/RTo235sV776ymlsMtGKj8g==", "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, + "node_modules/puppeteer-extra-plugin-user-data-dir/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, "node_modules/puppeteer-extra-plugin-user-data-dir/node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", @@ -3931,6 +4280,12 @@ "node": "*" } }, + "node_modules/puppeteer-extra-plugin-user-data-dir/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/puppeteer-extra-plugin-user-data-dir/node_modules/rimraf": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", @@ -3974,6 +4329,52 @@ } } }, + "node_modules/puppeteer-extra-plugin-user-preferences/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/puppeteer-extra-plugin-user-preferences/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/puppeteer-extra-plugin/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/puppeteer-extra-plugin/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/qs": { "version": "6.14.2", "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz", @@ -4019,18 +4420,6 @@ "node": ">= 0.8" } }, - "node_modules/raw-body/node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/rc": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", @@ -4047,19 +4436,17 @@ } }, "node_modules/readable-stream": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", - "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", "license": "MIT", "dependencies": { - "abort-controller": "^3.0.0", - "buffer": "^6.0.3", - "events": "^3.3.0", - "process": "^0.11.10", - "string_decoder": "^1.3.0" + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": ">= 6" } }, "node_modules/readdir-glob": { @@ -4071,6 +4458,21 @@ "minimatch": "^5.1.0" } }, + "node_modules/readdir-glob/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/readdir-glob/node_modules/brace-expansion": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.1.0.tgz", + "integrity": "sha512-TN1kCZAgdgweJhWWpgKYrQaMNHcDULHkWwQIspdtjV4Y5aurRdZpjAqn6yX3FPqTA9ngHCc4hJxMAMgGfve85w==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, "node_modules/readdir-glob/node_modules/minimatch": { "version": "5.1.9", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.9.tgz", @@ -4131,6 +4533,73 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/rimraf/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/rimraf/node_modules/brace-expansion": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.1.0.tgz", + "integrity": "sha512-TN1kCZAgdgweJhWWpgKYrQaMNHcDULHkWwQIspdtjV4Y5aurRdZpjAqn6yX3FPqTA9ngHCc4hJxMAMgGfve85w==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/minimatch": { + "version": "9.0.9", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", + "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.2" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/safaridriver": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/safaridriver/-/safaridriver-1.0.1.tgz", @@ -4161,9 +4630,9 @@ "license": "MIT" }, "node_modules/safe-regex2": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/safe-regex2/-/safe-regex2-5.0.0.tgz", - "integrity": "sha512-YwJwe5a51WlK7KbOJREPdjNrpViQBI3p4T50lfwPuDhZnE3XGVTlGvi+aolc5+RvxDD6bnUmjVsU9n1eboLUYw==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/safe-regex2/-/safe-regex2-5.1.0.tgz", + "integrity": "sha512-pNHAuBW7TrcleFHsxBr5QMi/Iyp0ENjUKz7GCcX1UO7cMh+NmVK6HxQckNL1tJp1XAJVjG6B8OKIPqodqj9rtw==", "funding": [ { "type": "github", @@ -4177,6 +4646,9 @@ "license": "MIT", "dependencies": { "ret": "~0.5.0" + }, + "bin": { + "safe-regex2": "bin/safe-regex2.js" } }, "node_modules/safer-buffer": { @@ -4230,19 +4702,10 @@ "node": ">= 0.8.0" } }, - "node_modules/send/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/send/node_modules/debug/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "license": "MIT" }, "node_modules/serialize-error": { @@ -4394,13 +4857,13 @@ } }, "node_modules/side-channel-list": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", - "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.1.tgz", + "integrity": "sha512-mjn/0bi/oUURjc5Xl7IaWi/OJJJumuoJFQJfDDyO46+hBWsfaVM65TBHq2eoZBhzl9EchxOijpkbRC8SVBQU0w==", "license": "MIT", "dependencies": { "es-errors": "^1.3.0", - "object-inspect": "^1.13.3" + "object-inspect": "^1.13.4" }, "engines": { "node": ">= 0.4" @@ -4541,6 +5004,29 @@ "node": ">= 14" } }, + "node_modules/socks-proxy-agent/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/socks-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", @@ -4595,9 +5081,9 @@ } }, "node_modules/streamx": { - "version": "2.23.0", - "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.23.0.tgz", - "integrity": "sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg==", + "version": "2.25.0", + "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.25.0.tgz", + "integrity": "sha512-0nQuG6jf1w+wddNEEXCF4nTg3LtufWINB5eFEN+5TNZW7KWJp6x87+JFL43vaAUPyCfH1wID+mNVyW6OHtFamg==", "license": "MIT", "dependencies": { "events-universal": "^1.0.0", @@ -4615,20 +5101,17 @@ } }, "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "license": "MIT", "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/string-width-cjs": { @@ -4655,12 +5138,6 @@ "node": ">=8" } }, - "node_modules/string-width-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, "node_modules/string-width-cjs/node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -4673,13 +5150,34 @@ "node": ">=8" } }, - "node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "node_modules/string-width/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "license": "MIT", "dependencies": { - "ansi-regex": "^6.0.1" + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.2.2" }, "engines": { "node": ">=12" @@ -4720,9 +5218,9 @@ } }, "node_modules/strnum": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/strnum/-/strnum-2.2.2.tgz", - "integrity": "sha512-DnR90I+jtXNSTXWdwrEy9FakW7UX+qUZg28gj5fk2vxxl7uS/3bpI4fjFYVmdK9etptYBPNkpahuQnEwhwECqA==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/strnum/-/strnum-2.2.3.tgz", + "integrity": "sha512-oKx6RUCuHfT3oyVjtnrmn19H1SiCqgJSg+54XqURKp5aCMbrXrhLjRN9TjuwMjiYstZ0MzDrHqkGZ5dFTKd+zg==", "funding": [ { "type": "github", @@ -4744,28 +5242,31 @@ } }, "node_modules/tar-fs": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.1.1.tgz", - "integrity": "sha512-LZA0oaPOc2fVo82Txf3gw+AkEd38szODlptMYejQUhndHMLQ9M059uXR+AfS7DNo0NpINvSqDsvyaCrBVkptWg==", + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", + "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==", "license": "MIT", "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", "pump": "^3.0.0", - "tar-stream": "^3.1.5" - }, - "optionalDependencies": { - "bare-fs": "^4.0.1", - "bare-path": "^3.0.0" + "tar-stream": "^2.1.4" } }, "node_modules/tar-stream": { - "version": "3.1.7", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz", - "integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", + "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", "license": "MIT", "dependencies": { - "b4a": "^1.6.4", - "fast-fifo": "^1.2.0", - "streamx": "^2.15.0" + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" } }, "node_modules/teen_process": { @@ -4789,7 +5290,6 @@ "resolved": "https://registry.npmjs.org/teex/-/teex-1.0.1.tgz", "integrity": "sha512-eYE6iEI62Ni1H8oIa7KlDU6uQBtqr4Eajni3wX7rpfXD8ysFx8z0+dri+KWEPWpBsxXfxu58x/0jvTVT1ekOSg==", "license": "MIT", - "optional": true, "dependencies": { "streamx": "^2.12.5" } @@ -4916,9 +5416,9 @@ } }, "node_modules/undici": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/undici/-/undici-7.24.6.tgz", - "integrity": "sha512-Xi4agocCbRzt0yYMZGMA6ApD7gvtUFaxm4ZmeacWI4cZxaF6C+8I8QfofC20NAePiB/IcvZmzkJ7XPa471AEtA==", + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.24.8.tgz", + "integrity": "sha512-6KQ/+QxK49Z/p3HO6E5ZCZWNnCasyZLa5ExaVYyvPxUwKtbCPMKELJOqh7EqOle0t9cH/7d2TaaTRRa6Nhs4YQ==", "license": "MIT", "engines": { "node": ">=20.18.1" @@ -5056,21 +5556,6 @@ "node": ">=10" } }, - "node_modules/wait-port/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, "node_modules/wait-port/node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -5087,19 +5572,51 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/wait-port/node_modules/commander": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", + "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || >=14" + } + }, + "node_modules/wait-port/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/wait-port/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/webdriver": { - "version": "9.24.0", - "resolved": "https://registry.npmjs.org/webdriver/-/webdriver-9.24.0.tgz", - "integrity": "sha512-2R31Ey83NzMsafkl4hdFq6GlIBvOODQMkueLjeRqYAITu3QCYiq9oqBdnWA6CdePuV4dbKlYsKRX0mwMiPclDA==", + "version": "9.27.0", + "resolved": "https://registry.npmjs.org/webdriver/-/webdriver-9.27.0.tgz", + "integrity": "sha512-w07ThZND48SIr0b4S7eFougYUyclmoUwdmju8yXvEJiXYjDjeYUpl8wZrYPEYRBylxpSx+sBHfEUBrPQkcTTRQ==", "license": "MIT", "dependencies": { "@types/node": "^20.1.0", "@types/ws": "^8.5.3", - "@wdio/config": "9.24.0", + "@wdio/config": "9.27.0", "@wdio/logger": "9.18.0", - "@wdio/protocols": "9.24.0", - "@wdio/types": "9.24.0", - "@wdio/utils": "9.24.0", + "@wdio/protocols": "9.27.0", + "@wdio/types": "9.27.0", + "@wdio/utils": "9.27.0", "deepmerge-ts": "^7.0.3", "https-proxy-agent": "^7.0.6", "undici": "^6.21.3", @@ -5119,19 +5636,19 @@ } }, "node_modules/webdriverio": { - "version": "9.24.0", - "resolved": "https://registry.npmjs.org/webdriverio/-/webdriverio-9.24.0.tgz", - "integrity": "sha512-LTJt6Z/iDM0ne/4ytd3BykoPv9CuJ+CAILOzlwFeMGn4Mj02i4Bk2Rg9o/jeJ89f52hnv4OPmNjD0e8nzWAy5g==", + "version": "9.27.0", + "resolved": "https://registry.npmjs.org/webdriverio/-/webdriverio-9.27.0.tgz", + "integrity": "sha512-Y4FbMf4bKBXpPB0lYpglzQ2GfDDe6uojmMZl85uPyrDx18NW7mqN84ZawGoIg/FRvcLaVhcOzc98WOPo725Rag==", "license": "MIT", "dependencies": { "@types/node": "^20.11.30", "@types/sinonjs__fake-timers": "^8.1.5", - "@wdio/config": "9.24.0", + "@wdio/config": "9.27.0", "@wdio/logger": "9.18.0", - "@wdio/protocols": "9.24.0", + "@wdio/protocols": "9.27.0", "@wdio/repl": "9.16.2", - "@wdio/types": "9.24.0", - "@wdio/utils": "9.24.0", + "@wdio/types": "9.27.0", + "@wdio/utils": "9.27.0", "archiver": "^7.0.1", "aria-query": "^5.3.0", "cheerio": "^1.0.0-rc.12", @@ -5148,7 +5665,7 @@ "rgb2hex": "0.2.5", "serialize-error": "^12.0.0", "urlpattern-polyfill": "^10.0.0", - "webdriver": "9.24.0" + "webdriver": "9.27.0" }, "engines": { "node": ">=18.20.0" @@ -5175,6 +5692,18 @@ "node": ">=18" } }, + "node_modules/whatwg-encoding/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/whatwg-mimetype": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", @@ -5200,17 +5729,17 @@ } }, "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "license": "MIT", "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" }, "engines": { - "node": ">=12" + "node": ">=10" }, "funding": { "url": "https://github.com/chalk/wrap-ansi?sponsor=1" @@ -5243,41 +5772,6 @@ "node": ">=8" } }, - "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/wrap-ansi-cjs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -5290,6 +5784,27 @@ "node": ">=8" } }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -5297,9 +5812,9 @@ "license": "ISC" }, "node_modules/ws": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.19.0.tgz", - "integrity": "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==", + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", + "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", "license": "MIT", "engines": { "node": ">=10.0.0" @@ -5375,47 +5890,6 @@ "node": ">=12" } }, - "node_modules/yargs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/yargs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/yauzl": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", @@ -5449,6 +5923,46 @@ "node": ">= 14" } }, + "node_modules/zip-stream/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/zip-stream/node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, "node_modules/zod": { "version": "3.25.76", "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", diff --git a/package.json b/package.json index 309217c82..8d738c36e 100644 --- a/package.json +++ b/package.json @@ -19,6 +19,9 @@ "agent-browser": "^0.13.0", "@askjo/camoufox-browser": "^1.0.0" }, + "overrides": { + "lodash": "4.18.1" + }, "engines": { "node": ">=18.0.0" } diff --git a/pyproject.toml b/pyproject.toml index 5c040c179..f43358f89 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "hermes-agent" -version = "0.8.0" +version = "0.9.0" description = "The self-improving AI agent โ€” creates skills from experience, improves them during use, and runs anywhere" readme = "README.md" requires-python = ">=3.11" @@ -76,6 +76,7 @@ termux = [ ] dingtalk = ["dingtalk-stream>=0.1.0,<1"] feishu = ["lark-oapi>=1.5.3,<2"] +web = ["fastapi>=0.104.0,<1", "uvicorn[standard]>=0.24.0,<1"] rl = [ "atroposlib @ git+https://github.com/NousResearch/atropos.git", "tinker @ git+https://github.com/thinking-machines-lab/tinker.git", @@ -107,6 +108,7 @@ all = [ "hermes-agent[dingtalk]", "hermes-agent[feishu]", "hermes-agent[mistral]", + "hermes-agent[web]", ] [project.scripts] @@ -117,6 +119,9 @@ hermes-acp = "acp_adapter.entry:main" [tool.setuptools] py-modules = ["run_agent", "model_tools", "toolsets", "batch_runner", "trajectory_compressor", "toolset_distributions", "cli", "hermes_constants", "hermes_state", "hermes_time", "hermes_logging", "rl_cli", "utils"] +[tool.setuptools.package-data] +hermes_cli = ["web_dist/**/*"] + [tool.setuptools.packages.find] include = ["agent", "tools", "tools.*", "hermes_cli", "gateway", "gateway.*", "tui_gateway", "tui_gateway.*", "cron", "acp_adapter", "plugins", "plugins.*"] diff --git a/run_agent.py b/run_agent.py index 71092a58b..fdfdca85a 100644 --- a/run_agent.py +++ b/run_agent.py @@ -460,6 +460,40 @@ def _sanitize_messages_non_ascii(messages: list) -> bool: return found +def _sanitize_tools_non_ascii(tools: list) -> bool: + """Strip non-ASCII characters from tool payloads in-place.""" + return _sanitize_structure_non_ascii(tools) + + +def _sanitize_structure_non_ascii(payload: Any) -> bool: + """Strip non-ASCII characters from nested dict/list payloads in-place.""" + found = False + + def _walk(node): + nonlocal found + if isinstance(node, dict): + for key, value in node.items(): + if isinstance(value, str): + sanitized = _strip_non_ascii(value) + if sanitized != value: + node[key] = sanitized + found = True + elif isinstance(value, (dict, list)): + _walk(value) + elif isinstance(node, list): + for idx, value in enumerate(node): + if isinstance(value, str): + sanitized = _strip_non_ascii(value) + if sanitized != value: + node[idx] = sanitized + found = True + elif isinstance(value, (dict, list)): + _walk(value) + + _walk(payload) + return found + + @@ -675,9 +709,17 @@ class AIAgent: # on /v1/chat/completions by both OpenAI and OpenRouter. Also # auto-upgrade for direct OpenAI URLs (api.openai.com) since all # newer tool-calling models prefer Responses there. - if self.api_mode == "chat_completions" and ( - self._is_direct_openai_url() - or self._model_requires_responses_api(self.model) + # ACP runtimes are excluded: CopilotACPClient handles its own + # routing and does not implement the Responses API surface. + if ( + self.api_mode == "chat_completions" + and self.provider != "copilot-acp" + and not str(self.base_url or "").lower().startswith("acp://copilot") + and not str(self.base_url or "").lower().startswith("acp+tcp://") + and ( + self._is_direct_openai_url() + or self._model_requires_responses_api(self.model) + ) ): self.api_mode = "codex_responses" @@ -737,6 +779,7 @@ class AIAgent: self.service_tier = service_tier self.request_overrides = dict(request_overrides or {}) self.prefill_messages = prefill_messages or [] # Prefilled conversation turns + self._force_ascii_payload = False # Anthropic prompt caching: auto-enabled for Claude models via OpenRouter. # Reduces input costs by ~75% on multi-turn conversations by caching the @@ -1212,7 +1255,6 @@ class AIAgent: _compression_cfg = {} compression_threshold = float(_compression_cfg.get("threshold", 0.50)) compression_enabled = str(_compression_cfg.get("enabled", True)).lower() in ("true", "1", "yes") - compression_summary_model = _compression_cfg.get("summary_model") or None compression_target_ratio = float(_compression_cfg.get("target_ratio", 0.20)) compression_protect_last = int(_compression_cfg.get("protect_last_n", 20)) @@ -1233,24 +1275,29 @@ class AIAgent: # Check custom_providers per-model context_length if _config_context_length is None: - _custom_providers = _agent_cfg.get("custom_providers") - if isinstance(_custom_providers, list): - for _cp_entry in _custom_providers: - if not isinstance(_cp_entry, dict): - continue - _cp_url = (_cp_entry.get("base_url") or "").rstrip("/") - if _cp_url and _cp_url == self.base_url.rstrip("/"): - _cp_models = _cp_entry.get("models", {}) - if isinstance(_cp_models, dict): - _cp_model_cfg = _cp_models.get(self.model, {}) - if isinstance(_cp_model_cfg, dict): - _cp_ctx = _cp_model_cfg.get("context_length") - if _cp_ctx is not None: - try: - _config_context_length = int(_cp_ctx) - except (TypeError, ValueError): - pass - break + try: + from hermes_cli.config import get_compatible_custom_providers + _custom_providers = get_compatible_custom_providers(_agent_cfg) + except Exception: + _custom_providers = _agent_cfg.get("custom_providers") + if not isinstance(_custom_providers, list): + _custom_providers = [] + for _cp_entry in _custom_providers: + if not isinstance(_cp_entry, dict): + continue + _cp_url = (_cp_entry.get("base_url") or "").rstrip("/") + if _cp_url and _cp_url == self.base_url.rstrip("/"): + _cp_models = _cp_entry.get("models", {}) + if isinstance(_cp_models, dict): + _cp_model_cfg = _cp_models.get(self.model, {}) + if isinstance(_cp_model_cfg, dict): + _cp_ctx = _cp_model_cfg.get("context_length") + if _cp_ctx is not None: + try: + _config_context_length = int(_cp_ctx) + except (TypeError, ValueError): + pass + break # Select context engine: config-driven (like memory providers). # 1. Check config.yaml context.engine setting @@ -1292,6 +1339,22 @@ class AIAgent: if _selected_engine is not None: self.context_compressor = _selected_engine + # Resolve context_length for plugin engines โ€” mirrors switch_model() path + from agent.model_metadata import get_model_context_length + _plugin_ctx_len = get_model_context_length( + self.model, + base_url=self.base_url, + api_key=getattr(self, "api_key", ""), + config_context_length=_config_context_length, + provider=self.provider, + ) + self.context_compressor.update_model( + model=self.model, + context_length=_plugin_ctx_len, + base_url=self.base_url, + api_key=getattr(self, "api_key", ""), + provider=self.provider, + ) if not self.quiet_mode: logger.info("Using context engine: %s", _selected_engine.name) else: @@ -1301,7 +1364,7 @@ class AIAgent: protect_first_n=3, protect_last_n=compression_protect_last, summary_target_ratio=compression_target_ratio, - summary_model_override=compression_summary_model, + summary_model_override=None, quiet_mode=self.quiet_mode, base_url=self.base_url, api_key=getattr(self, "api_key", ""), @@ -1748,10 +1811,25 @@ class AIAgent: aux_base_url = str(getattr(client, "base_url", "")) aux_api_key = str(getattr(client, "api_key", "")) + + # Read user-configured context_length for the compression model. + # Custom endpoints often don't support /models API queries so + # get_model_context_length() falls through to the 128K default, + # ignoring the explicit config value. Pass it as the highest- + # priority hint so the configured value is always respected. + _aux_cfg = (self.config or {}).get("auxiliary", {}).get("compression", {}) + _aux_context_config = _aux_cfg.get("context_length") if isinstance(_aux_cfg, dict) else None + if _aux_context_config is not None: + try: + _aux_context_config = int(_aux_context_config) + except (TypeError, ValueError): + _aux_context_config = None + aux_context = get_model_context_length( aux_model, base_url=aux_base_url, api_key=aux_api_key, + config_context_length=_aux_context_config, ) threshold = self.context_compressor.threshold_tokens @@ -1872,12 +1950,13 @@ class AIAgent: if not content: return "" # Strip all reasoning tag variants: , , , - # , + # , , (Gemma 4) content = re.sub(r'.*?', '', content, flags=re.DOTALL) content = re.sub(r'.*?', '', content, flags=re.DOTALL | re.IGNORECASE) content = re.sub(r'.*?', '', content, flags=re.DOTALL) content = re.sub(r'.*?', '', content, flags=re.DOTALL) - content = re.sub(r'\s*', '', content, flags=re.IGNORECASE) + content = re.sub(r'.*?', '', content, flags=re.DOTALL | re.IGNORECASE) + content = re.sub(r'\s*', '', content, flags=re.IGNORECASE) return content def _looks_like_codex_intermediate_ack( @@ -2002,6 +2081,7 @@ class AIAgent: inline_patterns = ( r"(.*?)", r"(.*?)", + r"(.*?)", r"(.*?)", r"(.*?)", ) @@ -4262,6 +4342,7 @@ class AIAgent: try: with active_client.responses.stream(**api_kwargs) as stream: for event in stream: + self._touch_activity("receiving stream response") if self._interrupt_requested: break event_type = getattr(event, "type", "") @@ -4386,6 +4467,7 @@ class AIAgent: collected_text_deltas: list = [] try: for event in stream_or_response: + self._touch_activity("receiving stream response") event_type = getattr(event, "type", None) if not event_type and isinstance(event, dict): event_type = event.get("type") @@ -4688,6 +4770,11 @@ class AIAgent: Each worker thread gets its own OpenAI client instance. Interrupts only close that worker-local client, so retries and other requests never inherit a closed transport. + + Includes a stale-call detector: if no response arrives within the + configured timeout, the connection is killed and an error raised so + the main retry loop can try again with backoff / credential rotation / + provider fallback. """ result = {"response": None, "error": None} request_client_holder = {"client": None} @@ -4713,10 +4800,86 @@ class AIAgent: if request_client is not None: self._close_request_openai_client(request_client, reason="request_complete") + # โ”€โ”€ Stale-call timeout (mirrors streaming stale detector) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + # Non-streaming calls return nothing until the full response is + # ready. Without this, a hung provider can block for the full + # httpx timeout (default 1800s) with zero feedback. The stale + # detector kills the connection early so the main retry loop can + # apply richer recovery (credential rotation, provider fallback). + _stale_base = float(os.getenv("HERMES_API_CALL_STALE_TIMEOUT", 300.0)) + _base_url = getattr(self, "_base_url", None) or "" + if _stale_base == 300.0 and _base_url and is_local_endpoint(_base_url): + _stale_timeout = float("inf") + else: + _est_tokens = sum(len(str(v)) for v in api_kwargs.get("messages", [])) // 4 + if _est_tokens > 100_000: + _stale_timeout = max(_stale_base, 600.0) + elif _est_tokens > 50_000: + _stale_timeout = max(_stale_base, 450.0) + else: + _stale_timeout = _stale_base + + _call_start = time.time() + self._touch_activity("waiting for non-streaming API response") + t = threading.Thread(target=_call, daemon=True) t.start() + _poll_count = 0 while t.is_alive(): t.join(timeout=0.3) + _poll_count += 1 + + # Touch activity every ~30s so the gateway's inactivity + # monitor knows we're alive while waiting for the response. + if _poll_count % 100 == 0: # 100 ร— 0.3s = 30s + _elapsed = time.time() - _call_start + self._touch_activity( + f"waiting for non-streaming response ({int(_elapsed)}s elapsed)" + ) + + # Stale-call detector: kill the connection if no response + # arrives within the configured timeout. + _elapsed = time.time() - _call_start + if _elapsed > _stale_timeout: + _est_ctx = sum(len(str(v)) for v in api_kwargs.get("messages", [])) // 4 + logger.warning( + "Non-streaming API call stale for %.0fs (threshold %.0fs). " + "model=%s context=~%s tokens. Killing connection.", + _elapsed, _stale_timeout, + api_kwargs.get("model", "unknown"), f"{_est_ctx:,}", + ) + self._emit_status( + f"โš ๏ธ No response from provider for {int(_elapsed)}s " + f"(non-streaming, model: {api_kwargs.get('model', 'unknown')}). " + f"Aborting call." + ) + try: + if self.api_mode == "anthropic_messages": + from agent.anthropic_adapter import build_anthropic_client + + self._anthropic_client.close() + self._anthropic_client = build_anthropic_client( + self._anthropic_api_key, + getattr(self, "_anthropic_base_url", None), + ) + else: + rc = request_client_holder.get("client") + if rc is not None: + self._close_request_openai_client(rc, reason="stale_call_kill") + except Exception: + pass + self._touch_activity( + f"stale non-streaming call killed after {int(_elapsed)}s" + ) + # Wait briefly for the thread to notice the closed connection. + t.join(timeout=2.0) + if result["error"] is None and result["response"] is None: + result["error"] = TimeoutError( + f"Non-streaming API call timed out after {int(_elapsed)}s " + f"with no response (threshold: {int(_stale_timeout)}s)" + ) + break + if self._interrupt_requested: # Force-close the in-flight worker-local HTTP connection to stop # token generation without poisoning the shared client used to @@ -4937,12 +5100,9 @@ class AIAgent: role = "assistant" reasoning_parts: list = [] usage_obj = None - _first_chunk_seen = False for chunk in stream: last_chunk_time["t"] = time.time() - if not _first_chunk_seen: - _first_chunk_seen = True - self._touch_activity("receiving stream response") + self._touch_activity("receiving stream response") if self._interrupt_requested: break @@ -5118,6 +5278,7 @@ class AIAgent: # actively arriving (the chat_completions path # already does this at the top of its chunk loop). last_chunk_time["t"] = time.time() + self._touch_activity("receiving stream response") if self._interrupt_requested: break @@ -5231,6 +5392,10 @@ class AIAgent: f"({type(e).__name__}). Reconnectingโ€ฆ " f"(attempt {_stream_attempt + 2}/{_max_stream_retries + 1})" ) + self._touch_activity( + f"stream retry {_stream_attempt + 2}/{_max_stream_retries + 1} " + f"after {type(e).__name__}" + ) # Close the stale request client before retry stale = request_client_holder.get("client") if stale is not None: @@ -5255,8 +5420,7 @@ class AIAgent: "try again in a moment." ) logger.warning( - "Streaming exhausted %s retries on transient error, " - "falling back to non-streaming: %s", + "Streaming exhausted %s retries on transient error: %s", _max_stream_retries + 1, e, ) @@ -5267,25 +5431,24 @@ class AIAgent: and "not supported" in _err_lower ) if _is_stream_unsupported: + self._disable_streaming = True self._safe_print( "\nโš  Streaming is not supported for this " - "model/provider. Falling back to non-streaming.\n" + "model/provider. Switching to non-streaming.\n" " To avoid this delay, set display.streaming: false " "in config.yaml\n" ) logger.info( - "Streaming failed before delivery, falling back to non-streaming: %s", + "Streaming failed before delivery: %s", e, ) - try: - # Reset stale timer โ€” the non-streaming fallback - # uses its own client; prevent the stale detector - # from firing on stale timestamps from failed streams. - last_chunk_time["t"] = time.time() - result["response"] = self._interruptible_api_call(api_kwargs) - except Exception as fallback_err: - result["error"] = fallback_err + # Propagate the error to the main retry loop instead of + # falling back to non-streaming inline. The main loop has + # richer recovery: credential rotation, provider fallback, + # backoff, and โ€” for "stream not supported" โ€” will switch + # to non-streaming on the next attempt via _disable_streaming. + result["error"] = e return finally: request_client = request_client_holder.get("client") @@ -5351,6 +5514,9 @@ class AIAgent: # Reset the timer so we don't kill repeatedly while # the inner thread processes the closure. last_chunk_time["t"] = time.time() + self._touch_activity( + f"stale stream detected after {int(_stale_elapsed)}s, reconnecting" + ) if self._interrupt_requested: try: @@ -5376,13 +5542,22 @@ class AIAgent: # a new API call, creating a duplicate message. Return a # partial "stop" response instead so the outer loop treats this # turn as complete (no retry, no fallback). + # Recover whatever content was already streamed to the user. + # _current_streamed_assistant_text accumulates text fired + # through _fire_stream_delta, so it has exactly what the + # user saw before the connection died. + _partial_text = ( + getattr(self, "_current_streamed_assistant_text", "") or "" + ).strip() or None logger.warning( "Partial stream delivered before error; returning stub " - "response to prevent duplicate messages: %s", + "response with %s chars of recovered content to prevent " + "duplicate messages: %s", + len(_partial_text or ""), result["error"], ) _stub_msg = SimpleNamespace( - role="assistant", content=None, tool_calls=None, + role="assistant", content=_partial_text, tool_calls=None, reasoning_content=None, ) return SimpleNamespace( @@ -5841,11 +6016,12 @@ class AIAgent: """True when using an anthropic-compatible endpoint that preserves dots in model names. Alibaba/DashScope keeps dots (e.g. qwen3.5-plus). MiniMax keeps dots (e.g. MiniMax-M2.7). - OpenCode Go keeps dots (e.g. minimax-m2.7).""" - if (getattr(self, "provider", "") or "").lower() in {"alibaba", "minimax", "minimax-cn", "opencode-go"}: + OpenCode Go/Zen keeps dots for non-Claude models (e.g. minimax-m2.5-free). + ZAI/Zhipu keeps dots (e.g. glm-4.7, glm-5.1).""" + if (getattr(self, "provider", "") or "").lower() in {"alibaba", "minimax", "minimax-cn", "opencode-go", "opencode-zen", "zai"}: return True base = (getattr(self, "base_url", "") or "").lower() - return "dashscope" in base or "aliyuncs" in base or "minimax" in base or "opencode.ai/zen/go" in base + return "dashscope" in base or "aliyuncs" in base or "minimax" in base or "opencode.ai/zen/" in base or "bigmodel.cn" in base def _is_qwen_portal(self) -> bool: """Return True when the base URL targets Qwen Portal.""" @@ -8078,6 +8254,8 @@ class AIAgent: try: self._reset_stream_delivery_tracking() api_kwargs = self._build_api_kwargs(api_messages) + if self._force_ascii_payload: + _sanitize_structure_non_ascii(api_kwargs) if self.api_mode == "codex_responses": api_kwargs = self._preflight_codex_api_kwargs(api_kwargs, allow_stream=False) @@ -8125,7 +8303,12 @@ class AIAgent: self.thinking_callback("") _use_streaming = True - if not self._has_stream_consumers(): + # Provider signaled "stream not supported" on a previous + # attempt โ€” switch to non-streaming for the rest of this + # session instead of re-failing every retry. + if getattr(self, "_disable_streaming", False): + _use_streaming = False + elif not self._has_stream_consumers(): # No display/TTS consumer. Still prefer streaming for # health checking, but skip for Mock clients in tests # (mocks return SimpleNamespace, not stream iterators). @@ -8225,7 +8408,8 @@ class AIAgent: if self.thinking_callback: self.thinking_callback("") - # This is often rate limiting or provider returning malformed response + # Invalid response โ€” could be rate limiting, provider timeout, + # upstream server error, or malformed response. retry_count += 1 # Eager fallback: empty/malformed responses are a common @@ -8261,11 +8445,44 @@ class AIAgent: if self.verbose_logging: logging.debug(f"Response attributes for invalid response: {resp_attrs}") + # Extract error code from response for contextual diagnostics + _resp_error_code = None + if response and hasattr(response, 'error') and response.error: + _code_raw = getattr(response.error, 'code', None) + if _code_raw is None and isinstance(response.error, dict): + _code_raw = response.error.get('code') + if _code_raw is not None: + try: + _resp_error_code = int(_code_raw) + except (TypeError, ValueError): + pass + + # Build a human-readable failure hint from the error code + # and response time, instead of always assuming rate limiting. + if _resp_error_code == 524: + _failure_hint = f"upstream provider timed out (Cloudflare 524, {api_duration:.0f}s)" + elif _resp_error_code == 504: + _failure_hint = f"upstream gateway timeout (504, {api_duration:.0f}s)" + elif _resp_error_code == 429: + _failure_hint = f"rate limited by upstream provider (429)" + elif _resp_error_code in (500, 502): + _failure_hint = f"upstream server error ({_resp_error_code}, {api_duration:.0f}s)" + elif _resp_error_code in (503, 529): + _failure_hint = f"upstream provider overloaded ({_resp_error_code})" + elif _resp_error_code is not None: + _failure_hint = f"upstream error (code {_resp_error_code}, {api_duration:.0f}s)" + elif api_duration < 10: + _failure_hint = f"fast response ({api_duration:.1f}s) โ€” likely rate limited" + elif api_duration > 60: + _failure_hint = f"slow response ({api_duration:.0f}s) โ€” likely upstream timeout" + else: + _failure_hint = f"response time {api_duration:.1f}s" + self._vprint(f"{self.log_prefix}โš ๏ธ Invalid API response (attempt {retry_count}/{max_retries}): {', '.join(error_details)}", force=True) self._vprint(f"{self.log_prefix} ๐Ÿข Provider: {provider_name}", force=True) cleaned_provider_error = self._clean_error_message(error_msg) self._vprint(f"{self.log_prefix} ๐Ÿ“ Provider message: {cleaned_provider_error}", force=True) - self._vprint(f"{self.log_prefix} โฑ๏ธ Response time: {api_duration:.2f}s (fast response often indicates rate limiting)", force=True) + self._vprint(f"{self.log_prefix} โฑ๏ธ {_failure_hint}", force=True) if retry_count >= max_retries: # Try fallback before giving up @@ -8282,31 +8499,39 @@ class AIAgent: "messages": messages, "completed": False, "api_calls": api_call_count, - "error": "Invalid API response shape. Likely rate limited or malformed provider response.", + "error": f"Invalid API response after {max_retries} retries: {_failure_hint}", "failed": True # Mark as failure for filtering } - # Longer backoff for rate limiting (likely cause of None choices) - # Jittered exponential: 5s base, 120s cap + random jitter + # Backoff before retry โ€” jittered exponential: 5s base, 120s cap wait_time = jittered_backoff(retry_count, base_delay=5.0, max_delay=120.0) - self._vprint(f"{self.log_prefix}โณ Retrying in {wait_time:.1f}s (extended backoff)...", force=True) + self._vprint(f"{self.log_prefix}โณ Retrying in {wait_time:.1f}s ({_failure_hint})...", force=True) logging.warning(f"Invalid API response (retry {retry_count}/{max_retries}): {', '.join(error_details)} | Provider: {provider_name}") # Sleep in small increments to stay responsive to interrupts sleep_end = time.time() + wait_time + _backoff_touch_counter = 0 while time.time() < sleep_end: if self._interrupt_requested: self._vprint(f"{self.log_prefix}โšก Interrupt detected during retry wait, aborting.", force=True) self._persist_session(messages, conversation_history) self.clear_interrupt() return { - "final_response": f"Operation interrupted: retrying API call after rate limit (retry {retry_count}/{max_retries}).", + "final_response": f"Operation interrupted during retry ({_failure_hint}, attempt {retry_count}/{max_retries}).", "messages": messages, "api_calls": api_call_count, "completed": False, "interrupted": True, } time.sleep(0.2) + # Touch activity every ~30s so the gateway's inactivity + # monitor knows we're alive during backoff waits. + _backoff_touch_counter += 1 + if _backoff_touch_counter % 150 == 0: # 150 ร— 0.2s = 30s + self._touch_activity( + f"retry backoff ({retry_count}/{max_retries}), " + f"{int(sleep_end - time.time())}s remaining" + ) continue # Retry the API call # Check finish_reason before proceeding @@ -8661,18 +8886,56 @@ class AIAgent: ) continue if _is_ascii_codec: + self._force_ascii_payload = True # ASCII codec: the system encoding can't handle # non-ASCII characters at all. Sanitize all - # non-ASCII content from messages and retry. - if _sanitize_messages_non_ascii(messages): + # non-ASCII content from messages/tool schemas and retry. + _messages_sanitized = _sanitize_messages_non_ascii(messages) + _prefill_sanitized = False + if isinstance(getattr(self, "prefill_messages", None), list): + _prefill_sanitized = _sanitize_messages_non_ascii(self.prefill_messages) + + _tools_sanitized = False + if isinstance(getattr(self, "tools", None), list): + _tools_sanitized = _sanitize_tools_non_ascii(self.tools) + + _system_sanitized = False + if isinstance(active_system_prompt, str): + _sanitized_system = _strip_non_ascii(active_system_prompt) + if _sanitized_system != active_system_prompt: + active_system_prompt = _sanitized_system + self._cached_system_prompt = _sanitized_system + _system_sanitized = True + if isinstance(getattr(self, "ephemeral_system_prompt", None), str): + _sanitized_ephemeral = _strip_non_ascii(self.ephemeral_system_prompt) + if _sanitized_ephemeral != self.ephemeral_system_prompt: + self.ephemeral_system_prompt = _sanitized_ephemeral + _system_sanitized = True + + _headers_sanitized = False + _default_headers = ( + self._client_kwargs.get("default_headers") + if isinstance(getattr(self, "_client_kwargs", None), dict) + else None + ) + if isinstance(_default_headers, dict): + _headers_sanitized = _sanitize_structure_non_ascii(_default_headers) + + if ( + _messages_sanitized + or _prefill_sanitized + or _tools_sanitized + or _system_sanitized + or _headers_sanitized + ): self._unicode_sanitization_passes += 1 self._vprint( - f"{self.log_prefix}โš ๏ธ System encoding is ASCII โ€” stripped non-ASCII characters from messages. Retrying...", + f"{self.log_prefix}โš ๏ธ System encoding is ASCII โ€” stripped non-ASCII characters from request payload. Retrying...", force=True, ) continue - # Nothing to sanitize in messages โ€” might be in system - # prompt or prefill. Fall through to normal error path. + # Nothing to sanitize in any payload component. + # Fall through to normal error path. status_code = getattr(api_error, "status_code", None) error_context = self._extract_api_error_context(api_error) @@ -8779,6 +9042,9 @@ class AIAgent: retry_count += 1 elapsed_time = time.time() - api_start_time + self._touch_activity( + f"API error recovery (attempt {retry_count}/{max_retries})" + ) error_type = type(api_error).__name__ error_msg = str(api_error).lower() @@ -9305,6 +9571,7 @@ class AIAgent: # Sleep in small increments so we can respond to interrupts quickly # instead of blocking the entire wait_time in one sleep() call sleep_end = time.time() + wait_time + _backoff_touch_counter = 0 while time.time() < sleep_end: if self._interrupt_requested: self._vprint(f"{self.log_prefix}โšก Interrupt detected during retry wait, aborting.", force=True) @@ -9318,6 +9585,14 @@ class AIAgent: "interrupted": True, } time.sleep(0.2) # Check interrupt every 200ms + # Touch activity every ~30s so the gateway's inactivity + # monitor knows we're alive during backoff waits. + _backoff_touch_counter += 1 + if _backoff_touch_counter % 150 == 0: # 150 ร— 0.2s = 30s + self._touch_activity( + f"error retry backoff ({retry_count}/{max_retries}), " + f"{int(sleep_end - time.time())}s remaining" + ) # If the API call was interrupted, skip response processing if interrupted: @@ -9703,12 +9978,25 @@ class AIAgent: # Pop thinking-only prefill message(s) before appending # (tool-call path โ€” same rationale as the final-response path). + _had_prefill = False while ( messages and isinstance(messages[-1], dict) and messages[-1].get("_thinking_prefill") ): messages.pop() + _had_prefill = True + + # Reset prefill counter when tool calls follow a prefill + # recovery. Without this, the counter accumulates across + # the whole conversation โ€” a model that intermittently + # empties (empty โ†’ prefill โ†’ tools โ†’ empty โ†’ prefill โ†’ + # tools) burns both prefill attempts and the third empty + # gets zero recovery. Resetting here treats each tool- + # call success as a fresh start. + if _had_prefill: + self._thinking_prefill_retries = 0 + self._empty_content_retries = 0 messages.append(assistant_msg) self._emit_interim_assistant_message(assistant_msg) @@ -9827,6 +10115,30 @@ class AIAgent: # Check if response only has think block with no actual content after it if not self._has_content_after_think_block(final_response): + # โ”€โ”€ Partial stream recovery โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + # If content was already streamed to the user before + # the connection died, use it as the final response + # instead of falling through to prior-turn fallback + # or wasting API calls on retries. + _partial_streamed = ( + getattr(self, "_current_streamed_assistant_text", "") or "" + ) + if self._has_content_after_think_block(_partial_streamed): + _turn_exit_reason = "partial_stream_recovery" + _recovered = self._strip_think_blocks(_partial_streamed).strip() + logger.info( + "Partial stream content delivered (%d chars) " + "โ€” using as final response", + len(_recovered), + ) + self._emit_status( + "โ†ป Stream interrupted โ€” using delivered content " + "as final response" + ) + final_response = _recovered + self._response_was_previewed = True + break + # If the previous turn already delivered real content alongside # tool calls (e.g. "You're welcome!" + memory save), the model # has nothing more to say. Use the earlier content immediately @@ -9884,16 +10196,23 @@ class AIAgent: self._save_session_log(messages) continue - # โ”€โ”€ Empty response retry (no reasoning) โ”€โ”€โ”€โ”€โ”€โ”€ - # Model returned nothing โ€” no content, no - # structured reasoning, no tool calls. Common - # with open models (transient provider issues, - # rate limits, sampling flukes). Retry up to 3 - # times before attempting fallback. Skip when - # content has inline tags (model chose - # to reason, just no visible text). - _truly_empty = not final_response.strip() - if _truly_empty and not _has_structured and self._empty_content_retries < 3: + # โ”€โ”€ Empty response retry โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + # Model returned nothing usable. Retry up to 3 + # times before attempting fallback. This covers + # both truly empty responses (no content, no + # reasoning) AND reasoning-only responses after + # prefill exhaustion โ€” models like mimo-v2-pro + # always populate reasoning fields via OpenRouter, + # so the old `not _has_structured` guard blocked + # retries for every reasoning model after prefill. + _truly_empty = not self._strip_think_blocks( + final_response + ).strip() + _prefill_exhausted = ( + _has_structured + and self._thinking_prefill_retries >= 2 + ) + if _truly_empty and (not _has_structured or _prefill_exhausted) and self._empty_content_retries < 3: self._empty_content_retries += 1 logger.warning( "Empty response (no content or reasoning) โ€” " @@ -10087,17 +10406,11 @@ class AIAgent: if final_response is None and ( api_call_count >= self.max_iterations or self.iteration_budget.remaining <= 0 - ) and not self._budget_exhausted_injected: - # Budget exhausted but we haven't tried asking the model to - # summarise yet. Inject a user message and give it one grace - # API call to produce a text response. - self._budget_exhausted_injected = True - self._budget_grace_call = True - _grace_msg = ( - "Your tool budget ran out. Please give me the information " - "or actions you've completed so far." - ) - messages.append({"role": "user", "content": _grace_msg}) + ): + # Budget exhausted โ€” ask the model for a summary via one extra + # API call with tools stripped. _handle_max_iterations injects a + # user message and makes a single toolless request. + _turn_exit_reason = f"max_iterations_reached({api_call_count}/{self.max_iterations})" self._emit_status( f"โš ๏ธ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) " "โ€” asking model to summarise" @@ -10107,14 +10420,6 @@ class AIAgent: f"\nโš ๏ธ Iteration budget exhausted ({api_call_count}/{self.max_iterations}) " "โ€” requesting summary..." ) - - if final_response is None and ( - api_call_count >= self.max_iterations - or self.iteration_budget.remaining <= 0 - ) and not self._budget_grace_call: - _turn_exit_reason = f"max_iterations_reached({api_call_count}/{self.max_iterations})" - if self.iteration_budget.remaining <= 0 and not self.quiet_mode: - print(f"\nโš ๏ธ Iteration budget exhausted ({self.iteration_budget.used}/{self.iteration_budget.max_total} iterations used)") final_response = self._handle_max_iterations(messages, api_call_count) # Determine if conversation completed successfully diff --git a/scripts/build_skills_index.py b/scripts/build_skills_index.py new file mode 100644 index 000000000..efa1ba76e --- /dev/null +++ b/scripts/build_skills_index.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python3 +"""Build the Hermes Skills Index โ€” a centralized JSON catalog of all skills. + +This script crawls every skill source (skills.sh, GitHub taps, official, +clawhub, lobehub, claude-marketplace) and writes a JSON index with resolved +GitHub paths. The index is served as a static file on the docs site so that +`hermes skills search/install` can use it without hitting the GitHub API. + +Usage: + # Local (uses gh CLI or GITHUB_TOKEN for auth) + python scripts/build_skills_index.py + + # CI (set GITHUB_TOKEN as secret) + GITHUB_TOKEN=ghp_... python scripts/build_skills_index.py + +Output: website/static/api/skills-index.json +""" + +import json +import os +import sys +import time +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor, as_completed +from datetime import datetime, timezone + +# Allow importing from repo root +REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, REPO_ROOT) + +# Ensure HERMES_HOME is set (needed by tools/skills_hub.py imports) +os.environ.setdefault("HERMES_HOME", os.path.join(os.path.expanduser("~"), ".hermes")) + +from tools.skills_hub import ( + GitHubAuth, + GitHubSource, + SkillsShSource, + OptionalSkillSource, + WellKnownSkillSource, + ClawHubSource, + ClaudeMarketplaceSource, + LobeHubSource, + SkillMeta, +) +import httpx + +OUTPUT_PATH = os.path.join(REPO_ROOT, "website", "static", "api", "skills-index.json") +INDEX_VERSION = 1 + + +def _meta_to_dict(meta: SkillMeta) -> dict: + """Convert a SkillMeta to a serializable dict.""" + return { + "name": meta.name, + "description": meta.description, + "source": meta.source, + "identifier": meta.identifier, + "trust_level": meta.trust_level, + "repo": meta.repo or "", + "path": meta.path or "", + "tags": meta.tags or [], + "extra": meta.extra or {}, + } + + +def crawl_source(source, source_name: str, limit: int) -> list: + """Crawl a single source and return skill dicts.""" + print(f" Crawling {source_name}...", flush=True) + start = time.time() + try: + results = source.search("", limit=limit) + except Exception as e: + print(f" Error crawling {source_name}: {e}", file=sys.stderr) + return [] + skills = [_meta_to_dict(m) for m in results] + elapsed = time.time() - start + print(f" {source_name}: {len(skills)} skills ({elapsed:.1f}s)", flush=True) + return skills + + +def crawl_skills_sh(source: SkillsShSource) -> list: + """Crawl skills.sh using popular queries for broad coverage.""" + print(" Crawling skills.sh (popular queries)...", flush=True) + start = time.time() + + queries = [ + "", # featured + "react", "python", "web", "api", "database", "docker", + "testing", "scraping", "design", "typescript", "git", + "aws", "security", "data", "ml", "ai", "devops", + "frontend", "backend", "mobile", "cli", "documentation", + "kubernetes", "terraform", "rust", "go", "java", + ] + + all_skills: dict[str, dict] = {} + for query in queries: + try: + results = source.search(query, limit=50) + for meta in results: + entry = _meta_to_dict(meta) + if entry["identifier"] not in all_skills: + all_skills[entry["identifier"]] = entry + except Exception as e: + print(f" Warning: skills.sh search '{query}' failed: {e}", + file=sys.stderr) + + elapsed = time.time() - start + print(f" skills.sh: {len(all_skills)} unique skills ({elapsed:.1f}s)", + flush=True) + return list(all_skills.values()) + + +def _fetch_repo_tree(repo: str, auth: GitHubAuth) -> list: + """Fetch the recursive tree for a repo. Returns list of tree entries.""" + headers = auth.get_headers() + try: + resp = httpx.get( + f"https://api.github.com/repos/{repo}", + headers=headers, timeout=15, follow_redirects=True, + ) + if resp.status_code != 200: + return [] + branch = resp.json().get("default_branch", "main") + + resp = httpx.get( + f"https://api.github.com/repos/{repo}/git/trees/{branch}", + params={"recursive": "1"}, + headers=headers, timeout=30, follow_redirects=True, + ) + if resp.status_code != 200: + return [] + data = resp.json() + if data.get("truncated"): + return [] + return data.get("tree", []) + except Exception: + return [] + + +def batch_resolve_paths(skills: list, auth: GitHubAuth) -> list: + """Resolve GitHub paths for skills.sh entries using batch tree lookups. + + Instead of resolving each skill individually (Nร—M API calls), we: + 1. Group skills by repo + 2. Fetch one tree per repo (2 API calls per repo) + 3. Find all SKILL.md files in the tree + 4. Match skills to their resolved paths + """ + # Filter to skills.sh entries that need resolution + skills_sh = [s for s in skills if s["source"] in ("skills.sh", "skills-sh")] + if not skills_sh: + return skills + + print(f" Resolving paths for {len(skills_sh)} skills.sh entries...", + flush=True) + start = time.time() + + # Group by repo + by_repo: dict[str, list] = defaultdict(list) + for s in skills_sh: + repo = s.get("repo", "") + if repo: + by_repo[repo].append(s) + + print(f" {len(by_repo)} unique repos to scan", flush=True) + + resolved_count = 0 + + # Fetch trees in parallel (up to 6 concurrent) + def _resolve_repo(repo: str, entries: list): + tree = _fetch_repo_tree(repo, auth) + if not tree: + return 0 + + # Find all SKILL.md paths in this repo + skill_paths = {} # skill_dir_name -> full_path + for item in tree: + if item.get("type") != "blob": + continue + path = item.get("path", "") + if path.endswith("/SKILL.md"): + skill_dir = path[: -len("/SKILL.md")] + dir_name = skill_dir.split("/")[-1] + skill_paths[dir_name.lower()] = f"{repo}/{skill_dir}" + + # Also check SKILL.md frontmatter name if we can match by path + # For now, just index by directory name + elif path == "SKILL.md": + # Root-level SKILL.md + skill_paths["_root_"] = f"{repo}" + + count = 0 + for entry in entries: + # Try to match the skill's name/path to a tree entry + skill_name = entry.get("name", "").lower() + skill_path = entry.get("path", "").lower() + identifier = entry.get("identifier", "") + + # Extract the skill token from the identifier + # e.g. "skills-sh/d4vinci/scrapling/scrapling-official" -> "scrapling-official" + parts = identifier.replace("skills-sh/", "").replace("skills.sh/", "") + skill_token = parts.split("/")[-1].lower() if "/" in parts else "" + + # Try matching in order of likelihood + for candidate in [skill_token, skill_name, skill_path]: + if not candidate: + continue + matched = skill_paths.get(candidate) + if matched: + entry["resolved_github_id"] = matched + count += 1 + break + else: + # Try fuzzy: skill_token with common transformations + for tree_name, tree_path in skill_paths.items(): + if (skill_token and ( + tree_name.replace("-", "") == skill_token.replace("-", "") + or skill_token in tree_name + or tree_name in skill_token + )): + entry["resolved_github_id"] = tree_path + count += 1 + break + + return count + + with ThreadPoolExecutor(max_workers=6) as pool: + futures = { + pool.submit(_resolve_repo, repo, entries): repo + for repo, entries in by_repo.items() + } + for future in as_completed(futures): + try: + resolved_count += future.result() + except Exception as e: + repo = futures[future] + print(f" Warning: {repo}: {e}", file=sys.stderr) + + elapsed = time.time() - start + print(f" Resolved {resolved_count}/{len(skills_sh)} paths ({elapsed:.1f}s)", + flush=True) + return skills + + +def main(): + print("Building Hermes Skills Index...", flush=True) + overall_start = time.time() + + auth = GitHubAuth() + print(f"GitHub auth: {auth.auth_method()}") + if auth.auth_method() == "anonymous": + print("WARNING: No GitHub authentication โ€” rate limit is 60/hr. " + "Set GITHUB_TOKEN for better results.", file=sys.stderr) + + skills_sh_source = SkillsShSource(auth=auth) + sources = { + "official": OptionalSkillSource(), + "well-known": WellKnownSkillSource(), + "github": GitHubSource(auth=auth), + "clawhub": ClawHubSource(), + "claude-marketplace": ClaudeMarketplaceSource(auth=auth), + "lobehub": LobeHubSource(), + } + + all_skills: list[dict] = [] + + # Crawl skills.sh + all_skills.extend(crawl_skills_sh(skills_sh_source)) + + # Crawl other sources in parallel + with ThreadPoolExecutor(max_workers=4) as pool: + futures = {} + for name, source in sources.items(): + futures[pool.submit(crawl_source, source, name, 500)] = name + for future in as_completed(futures): + try: + all_skills.extend(future.result()) + except Exception as e: + print(f" Error: {e}", file=sys.stderr) + + # Batch resolve GitHub paths for skills.sh entries + all_skills = batch_resolve_paths(all_skills, auth) + + # Deduplicate by identifier + seen: dict[str, dict] = {} + for skill in all_skills: + key = skill["identifier"] + if key not in seen: + seen[key] = skill + deduped = list(seen.values()) + + # Sort + source_order = {"official": 0, "skills-sh": 1, "skills.sh": 1, + "github": 2, "well-known": 3, "clawhub": 4, + "claude-marketplace": 5, "lobehub": 6} + deduped.sort(key=lambda s: (source_order.get(s["source"], 99), s["name"])) + + # Build index + index = { + "version": INDEX_VERSION, + "generated_at": datetime.now(timezone.utc).isoformat(), + "skill_count": len(deduped), + "skills": deduped, + } + + os.makedirs(os.path.dirname(OUTPUT_PATH), exist_ok=True) + with open(OUTPUT_PATH, "w") as f: + json.dump(index, f, separators=(",", ":"), ensure_ascii=False) + + elapsed = time.time() - overall_start + file_size = os.path.getsize(OUTPUT_PATH) + print(f"\nDone! {len(deduped)} skills indexed in {elapsed:.0f}s") + print(f"Output: {OUTPUT_PATH} ({file_size / 1024:.0f} KB)") + + from collections import Counter + by_source = Counter(s["source"] for s in deduped) + for src, count in sorted(by_source.items(), key=lambda x: -x[1]): + resolved = sum(1 for s in deduped + if s["source"] == src and s.get("resolved_github_id")) + extra = f" ({resolved} resolved)" if resolved else "" + print(f" {src}: {count}{extra}") + + +if __name__ == "__main__": + main() diff --git a/scripts/whatsapp-bridge/package-lock.json b/scripts/whatsapp-bridge/package-lock.json index 23ea30a09..570d8a735 100644 --- a/scripts/whatsapp-bridge/package-lock.json +++ b/scripts/whatsapp-bridge/package-lock.json @@ -15,9 +15,9 @@ } }, "node_modules/@borewit/text-codec": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.2.1.tgz", - "integrity": "sha512-k7vvKPbf7J2fZ5klGRD9AeKfUvojuZIQ3BT5u7Jfv+puwXkUBUT5PVyMDfJZpy30CBDXGMgw7fguK/lpOMBvgw==", + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.2.2.tgz", + "integrity": "sha512-DDaRehssg1aNrH4+2hnj1B7vnUGEjU6OIlyRdkMd0aUdIUvKXrJfXsy8LVtXAy7DRvYVluWbMspsRhz2lcW0mQ==", "license": "MIT", "funding": { "type": "github", @@ -1088,9 +1088,9 @@ } }, "node_modules/file-type": { - "version": "21.3.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-21.3.0.tgz", - "integrity": "sha512-8kPJMIGz1Yt/aPEwOsrR97ZyZaD1Iqm8PClb1nYFclUCkBi0Ma5IsYNQzvSFS9ib51lWyIw5mIT9rWzI/xjpzA==", + "version": "21.3.4", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-21.3.4.tgz", + "integrity": "sha512-Ievi/yy8DS3ygGvT47PjSfdFoX+2isQueoYP1cntFW1JLYAuS4GD7NUPGg4zv2iZfV52uDyk5w5Z0TdpRS6Q1g==", "license": "MIT", "dependencies": { "@tokenizer/inflate": "^0.4.1", @@ -1456,9 +1456,9 @@ "license": "MIT" }, "node_modules/music-metadata": { - "version": "11.12.1", - "resolved": "https://registry.npmjs.org/music-metadata/-/music-metadata-11.12.1.tgz", - "integrity": "sha512-j++ltLxHDb5VCXET9FzQ8bnueiLHwQKgCO7vcbkRH/3F7fRjPkv6qncGEJ47yFhmemcYtgvsOAlcQ1dRBTkDjg==", + "version": "11.12.3", + "resolved": "https://registry.npmjs.org/music-metadata/-/music-metadata-11.12.3.tgz", + "integrity": "sha512-n6hSTZkuD59qWgHh6IP5dtDlDZQXoxk/bcA85Jywg8Z1iFrlNgl2+GTFgjZyn52W5UgQpV42V4XqrQZZAMbZTQ==", "funding": [ { "type": "github", @@ -1471,11 +1471,11 @@ ], "license": "MIT", "dependencies": { - "@borewit/text-codec": "^0.2.1", + "@borewit/text-codec": "^0.2.2", "@tokenizer/token": "^0.3.0", "content-type": "^1.0.5", "debug": "^4.4.3", - "file-type": "^21.3.0", + "file-type": "^21.3.1", "media-typer": "^1.1.0", "strtok3": "^10.3.4", "token-types": "^6.1.2", @@ -1589,9 +1589,9 @@ } }, "node_modules/path-to-regexp": { - "version": "0.1.12", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", - "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz", + "integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA==", "license": "MIT" }, "node_modules/pino": { @@ -2002,9 +2002,9 @@ } }, "node_modules/strtok3": { - "version": "10.3.4", - "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-10.3.4.tgz", - "integrity": "sha512-KIy5nylvC5le1OdaaoCJ07L+8iQzJHGH6pWDuzS+d07Cu7n1MZ2x26P8ZKIWfbK02+XIL8Mp4RkWeqdUCrDMfg==", + "version": "10.3.5", + "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-10.3.5.tgz", + "integrity": "sha512-ki4hZQfh5rX0QDLLkOCj+h+CVNkqmp/CMf8v8kZpkNVK6jGQooMytqzLZYUVYIZcFZ6yDB70EfD8POcFXiF5oA==", "license": "MIT", "dependencies": { "@tokenizer/token": "^0.3.0" diff --git a/skills/autonomous-ai-agents/hermes-agent/SKILL.md b/skills/autonomous-ai-agents/hermes-agent/SKILL.md index 6d8cd1c61..9e0b412f5 100644 --- a/skills/autonomous-ai-agents/hermes-agent/SKILL.md +++ b/skills/autonomous-ai-agents/hermes-agent/SKILL.md @@ -19,7 +19,7 @@ What makes Hermes different: - **Self-improving through skills** โ€” Hermes learns from experience by saving reusable procedures as skills. When it solves a complex problem, discovers a workflow, or gets corrected, it can persist that knowledge as a skill document that loads into future sessions. Skills accumulate over time, making the agent better at your specific tasks and environment. - **Persistent memory across sessions** โ€” remembers who you are, your preferences, environment details, and lessons learned. Pluggable memory backends (built-in, Honcho, Mem0, and more) let you choose how memory works. -- **Multi-platform gateway** โ€” the same agent runs on Telegram, Discord, Slack, WhatsApp, Signal, Matrix, Email, and 8+ other platforms with full tool access, not just chat. +- **Multi-platform gateway** โ€” the same agent runs on Telegram, Discord, Slack, WhatsApp, Signal, Matrix, Email, and 10+ other platforms with full tool access, not just chat. - **Provider-agnostic** โ€” swap models and providers mid-workflow without changing anything else. Credential pools rotate across multiple API keys automatically. - **Profiles** โ€” run multiple independent Hermes instances with isolated configs, sessions, skills, and memory. - **Extensible** โ€” plugins, MCP servers, custom tools, webhook triggers, cron scheduling, and the full Python ecosystem. @@ -148,7 +148,7 @@ hermes gateway status Check status hermes gateway setup Configure platforms ``` -Supported platforms: Telegram, Discord, Slack, WhatsApp, Signal, Email, SMS, Matrix, Mattermost, Home Assistant, DingTalk, Feishu, WeCom, API Server, Webhooks, Open WebUI. +Supported platforms: Telegram, Discord, Slack, WhatsApp, Signal, Email, SMS, Matrix, Mattermost, Home Assistant, DingTalk, Feishu, WeCom, BlueBubbles (iMessage), Weixin (WeChat), API Server, Webhooks. Open WebUI connects via the API Server adapter. Platform docs: https://hermes-agent.nousresearch.com/docs/user-guide/messaging/ @@ -215,7 +215,7 @@ hermes insights [--days N] Usage analytics hermes update Update to latest version hermes pairing list/approve/revoke DM authorization hermes plugins list/install/remove Plugin management -hermes honcho setup/status Honcho memory integration +hermes honcho setup/status Honcho memory integration (requires honcho plugin) hermes memory setup/status/off Memory provider config hermes completion bash|zsh Shell completions hermes acp ACP server (IDE integration) @@ -269,6 +269,28 @@ Type these during an interactive chat session. /plugins List plugins (CLI) ``` +### Gateway +``` +/approve Approve a pending command (gateway) +/deny Deny a pending command (gateway) +/restart Restart gateway (gateway) +/sethome Set current chat as home channel (gateway) +/update Update Hermes to latest (gateway) +/platforms (/gateway) Show platform connection status (gateway) +``` + +### Utility +``` +/branch (/fork) Branch the current session +/btw Ephemeral side question (doesn't interrupt main task) +/fast Toggle priority/fast processing +/browser Open CDP browser connection +/history Show conversation history (CLI) +/save Save conversation to file (CLI) +/paste Attach clipboard image (CLI) +/image Attach local image file (CLI) +``` + ### Info ``` /help Show commands @@ -311,11 +333,11 @@ Edit with `hermes config edit` or `hermes config set section.key value`. | `terminal` | `backend` (local/docker/ssh/modal), `cwd`, `timeout` (180) | | `compression` | `enabled`, `threshold` (0.50), `target_ratio` (0.20) | | `display` | `skin`, `tool_progress`, `show_reasoning`, `show_cost` | -| `stt` | `enabled`, `provider` (local/groq/openai) | -| `tts` | `provider` (edge/elevenlabs/openai/kokoro/fish) | +| `stt` | `enabled`, `provider` (local/groq/openai/mistral) | +| `tts` | `provider` (edge/elevenlabs/openai/minimax/mistral/neutts) | | `memory` | `memory_enabled`, `user_profile_enabled`, `provider` | | `security` | `tirith_enabled`, `website_blocklist` | -| `delegation` | `model`, `provider`, `max_iterations` (50) | +| `delegation` | `model`, `provider`, `base_url`, `api_key`, `max_iterations` (50), `reasoning_effort` | | `smart_model_routing` | `enabled`, `cheap_model` | | `checkpoints` | `enabled`, `max_snapshots` (50) | @@ -323,7 +345,7 @@ Full config reference: https://hermes-agent.nousresearch.com/docs/user-guide/con ### Providers -18 providers supported. Set via `hermes model` or `hermes setup`. +20+ providers supported. Set via `hermes model` or `hermes setup`. | Provider | Auth | Key env var | |----------|------|-------------| @@ -332,16 +354,23 @@ Full config reference: https://hermes-agent.nousresearch.com/docs/user-guide/con | Nous Portal | OAuth | `hermes login --provider nous` | | OpenAI Codex | OAuth | `hermes login --provider openai-codex` | | GitHub Copilot | Token | `COPILOT_GITHUB_TOKEN` | +| Google Gemini | API key | `GOOGLE_API_KEY` or `GEMINI_API_KEY` | | DeepSeek | API key | `DEEPSEEK_API_KEY` | +| xAI / Grok | API key | `XAI_API_KEY` | | Hugging Face | Token | `HF_TOKEN` | | Z.AI / GLM | API key | `GLM_API_KEY` | | MiniMax | API key | `MINIMAX_API_KEY` | +| MiniMax CN | API key | `MINIMAX_CN_API_KEY` | | Kimi / Moonshot | API key | `KIMI_API_KEY` | | Alibaba / DashScope | API key | `DASHSCOPE_API_KEY` | +| Xiaomi MiMo | API key | `XIAOMI_API_KEY` | | Kilo Code | API key | `KILOCODE_API_KEY` | +| AI Gateway (Vercel) | API key | `AI_GATEWAY_API_KEY` | +| OpenCode Zen | API key | `OPENCODE_ZEN_API_KEY` | +| OpenCode Go | API key | `OPENCODE_GO_API_KEY` | +| Qwen OAuth | OAuth | `hermes login --provider qwen-oauth` | | Custom endpoint | Config | `model.base_url` + `model.api_key` in config.yaml | - -Plus: AI Gateway, OpenCode Zen, OpenCode Go, MiniMax CN, GitHub Copilot ACP. +| GitHub Copilot ACP | External | `COPILOT_CLI_PATH` or Copilot CLI | Full provider docs: https://hermes-agent.nousresearch.com/docs/integrations/providers @@ -365,6 +394,10 @@ Enable/disable via `hermes tools` (interactive) or `hermes tools enable/disable | `delegation` | Subagent task delegation | | `cronjob` | Scheduled task management | | `clarify` | Ask user clarifying questions | +| `messaging` | Cross-platform message sending | +| `search` | Web search only (subset of `web`) | +| `todo` | In-session task planning and tracking | +| `rl` | Reinforcement learning tools (off by default) | | `moa` | Mixture of Agents (off by default) | | `homeassistant` | Smart home control (off by default) | @@ -382,12 +415,13 @@ Provider priority (auto-detected): 1. **Local faster-whisper** โ€” free, no API key: `pip install faster-whisper` 2. **Groq Whisper** โ€” free tier: set `GROQ_API_KEY` 3. **OpenAI Whisper** โ€” paid: set `VOICE_TOOLS_OPENAI_KEY` +4. **Mistral Voxtral** โ€” set `MISTRAL_API_KEY` Config: ```yaml stt: enabled: true - provider: local # local, groq, openai + provider: local # local, groq, openai, mistral local: model: base # tiny, base, small, medium, large-v3 ``` @@ -399,8 +433,9 @@ stt: | Edge TTS | None | Yes (default) | | ElevenLabs | `ELEVENLABS_API_KEY` | Free tier | | OpenAI | `VOICE_TOOLS_OPENAI_KEY` | Paid | -| Kokoro (local) | None | Free | -| Fish Audio | `FISH_AUDIO_API_KEY` | Free tier | +| MiniMax | `MINIMAX_API_KEY` | Paid | +| Mistral (Voxtral) | `MISTRAL_API_KEY` | Paid | +| NeuTTS (local) | None (`pip install neutts[all]` + `espeak-ng`) | Free | Voice commands: `/voice on` (voice-to-voice), `/voice tts` (always voice), `/voice off`. @@ -492,7 +527,7 @@ terminal(command="tmux new-session -d -s resumed 'hermes --resume 20260225_14305 ### Voice not working 1. Check `stt.enabled: true` in config.yaml 2. Verify provider: `pip install faster-whisper` or set API key -3. Restart gateway: `/restart` +3. In gateway: `/restart`. In CLI: exit and relaunch. ### Tool not available 1. `hermes tools` โ€” check if toolset is enabled for your platform @@ -503,10 +538,11 @@ terminal(command="tmux new-session -d -s resumed 'hermes --resume 20260225_14305 1. `hermes doctor` โ€” check config and dependencies 2. `hermes login` โ€” re-authenticate OAuth providers 3. Check `.env` has the right API key +4. **Copilot 403**: `gh auth login` tokens do NOT work for Copilot API. You must use the Copilot-specific OAuth device code flow via `hermes model` โ†’ GitHub Copilot. ### Changes not taking effect - **Tools/skills:** `/reset` starts a new session with updated toolset -- **Config changes:** `/restart` reloads gateway config +- **Config changes:** In gateway: `/restart`. In CLI: exit and relaunch. - **Code changes:** Restart the CLI or gateway process ### Skills not showing @@ -520,6 +556,23 @@ Check logs first: grep -i "failed to send\|error" ~/.hermes/logs/gateway.log | tail -20 ``` +Common gateway problems: +- **Gateway dies on SSH logout**: Enable linger: `sudo loginctl enable-linger $USER` +- **Gateway dies on WSL2 close**: WSL2 requires `systemd=true` in `/etc/wsl.conf` for systemd services to work. Without it, gateway falls back to `nohup` (dies when session closes). +- **Gateway crash loop**: Reset the failed state: `systemctl --user reset-failed hermes-gateway` + +### Platform-specific issues +- **Discord bot silent**: Must enable **Message Content Intent** in Bot โ†’ Privileged Gateway Intents. +- **Slack bot only works in DMs**: Must subscribe to `message.channels` event. Without it, the bot ignores public channels. +- **Windows HTTP 400 "No models provided"**: Config file encoding issue (BOM). Ensure `config.yaml` is saved as UTF-8 without BOM. + +### Auxiliary models not working +If `auxiliary` tasks (vision, compression, session_search) fail silently, the `auto` provider can't find a backend. Either set `OPENROUTER_API_KEY` or `GOOGLE_API_KEY`, or explicitly configure each auxiliary task's provider: +```bash +hermes config set auxiliary.vision.provider +hermes config set auxiliary.vision.model +``` + --- ## Where to Find Things @@ -557,7 +610,7 @@ hermes-agent/ โ”œโ”€โ”€ toolsets.py # Toolset definitions โ”œโ”€โ”€ cli.py # Interactive CLI (HermesCLI) โ”œโ”€โ”€ hermes_state.py # SQLite session store -โ”œโ”€โ”€ agent/ # Prompt builder, compression, display, adapters +โ”œโ”€โ”€ agent/ # Prompt builder, context compression, memory, model routing, credential pooling, skill dispatch โ”œโ”€โ”€ hermes_cli/ # CLI subcommands, config, setup, commands โ”‚ โ”œโ”€โ”€ commands.py # Slash command registry (CommandDef) โ”‚ โ”œโ”€โ”€ config.py # DEFAULT_CONFIG, env var definitions @@ -626,7 +679,6 @@ run_conversation(): ### Testing ```bash -source venv/bin/activate # or .venv/bin/activate python -m pytest tests/ -o 'addopts=' -q # Full suite python -m pytest tests/tools/ -q # Specific area ``` diff --git a/skills/research/research-paper-writing/SKILL.md b/skills/research/research-paper-writing/SKILL.md index e773e0987..f45ce7e2f 100644 --- a/skills/research/research-paper-writing/SKILL.md +++ b/skills/research/research-paper-writing/SKILL.md @@ -820,6 +820,24 @@ Every successful ML paper centers on what Neel Nanda calls "the narrative": a sh **If you cannot state your contribution in one sentence, you don't yet have a paper.** +### The Sources Behind This Guidance + +This skill synthesizes writing philosophy from researchers who have published extensively at top venues. The writing philosophy layer was originally compiled by [Orchestra Research](https://github.com/orchestra-research) as the `ml-paper-writing` skill. + +| Source | Key Contribution | Link | +|--------|-----------------|------| +| **Neel Nanda** (Google DeepMind) | The Narrative Principle, What/Why/So What framework | [How to Write ML Papers](https://www.alignmentforum.org/posts/eJGptPbbFPZGLpjsp/highly-opinionated-advice-on-how-to-write-ml-papers) | +| **Sebastian Farquhar** (DeepMind) | 5-sentence abstract formula | [How to Write ML Papers](https://sebastianfarquhar.com/on-research/2024/11/04/how_to_write_ml_papers/) | +| **Gopen & Swan** | 7 principles of reader expectations | [Science of Scientific Writing](https://cseweb.ucsd.edu/~swanson/papers/science-of-writing.pdf) | +| **Zachary Lipton** | Word choice, eliminating hedging | [Heuristics for Scientific Writing](https://www.approximatelycorrect.com/2018/01/29/heuristics-technical-scientific-writing-machine-learning-perspective/) | +| **Jacob Steinhardt** (UC Berkeley) | Precision, consistent terminology | [Writing Tips](https://bounded-regret.ghost.io/) | +| **Ethan Perez** (Anthropic) | Micro-level clarity tips | [Easy Paper Writing Tips](https://ethanperez.net/easy-paper-writing-tips/) | +| **Andrej Karpathy** | Single contribution focus | Various lectures | + +**For deeper dives into any of these, see:** +- [references/writing-guide.md](references/writing-guide.md) โ€” Full explanations with examples +- [references/sources.md](references/sources.md) โ€” Complete bibliography + ### Time Allocation Spend approximately **equal time** on each of: diff --git a/skills/research/research-paper-writing/references/sources.md b/skills/research/research-paper-writing/references/sources.md index 47d727353..9ffa95428 100644 --- a/skills/research/research-paper-writing/references/sources.md +++ b/skills/research/research-paper-writing/references/sources.md @@ -4,6 +4,12 @@ This document lists all authoritative sources used to build this skill, organize --- +## Origin & Attribution + +The writing philosophy, citation verification workflow, and conference reference materials in this skill were originally compiled by **[Orchestra Research](https://github.com/orchestra-research)** as the `ml-paper-writing` skill (January 2026), drawing on Neel Nanda's blog post and other researcher guides listed below. The skill was integrated into hermes-agent by teknium (January 2026), then expanded into the current `research-paper-writing` pipeline by SHL0MS (April 2026, PR #4654), which added experiment design, execution monitoring, iterative refinement, and submission phases while preserving the original writing philosophy and reference files. + +--- + ## Writing Philosophy & Guides ### Primary Sources (Must-Read) diff --git a/tests/agent/test_auxiliary_client.py b/tests/agent/test_auxiliary_client.py index 77004c4e1..e6a9d1919 100644 --- a/tests/agent/test_auxiliary_client.py +++ b/tests/agent/test_auxiliary_client.py @@ -17,7 +17,6 @@ from agent.auxiliary_client import ( call_llm, async_call_llm, _read_codex_access_token, - _get_auxiliary_provider, _get_provider_chain, _is_payment_error, _try_payment_fallback, @@ -32,12 +31,6 @@ def _clean_env(monkeypatch): "OPENROUTER_API_KEY", "OPENAI_BASE_URL", "OPENAI_API_KEY", "OPENAI_MODEL", "LLM_MODEL", "NOUS_INFERENCE_BASE_URL", "ANTHROPIC_API_KEY", "ANTHROPIC_TOKEN", "CLAUDE_CODE_OAUTH_TOKEN", - # Per-task provider/model/direct-endpoint overrides - "AUXILIARY_VISION_PROVIDER", "AUXILIARY_VISION_MODEL", - "AUXILIARY_VISION_BASE_URL", "AUXILIARY_VISION_API_KEY", - "AUXILIARY_WEB_EXTRACT_PROVIDER", "AUXILIARY_WEB_EXTRACT_MODEL", - "AUXILIARY_WEB_EXTRACT_BASE_URL", "AUXILIARY_WEB_EXTRACT_API_KEY", - "CONTEXT_COMPRESSION_PROVIDER", "CONTEXT_COMPRESSION_MODEL", ): monkeypatch.delenv(key, raising=False) @@ -568,29 +561,6 @@ class TestGetTextAuxiliaryClient: call_kwargs = mock_openai.call_args assert call_kwargs.kwargs["base_url"] == "http://localhost:1234/v1" - def test_task_direct_endpoint_override(self, monkeypatch): - monkeypatch.setenv("OPENROUTER_API_KEY", "or-key") - monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_BASE_URL", "http://localhost:2345/v1") - monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_API_KEY", "task-key") - monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_MODEL", "task-model") - with patch("agent.auxiliary_client.OpenAI") as mock_openai: - client, model = get_text_auxiliary_client("web_extract") - assert model == "task-model" - assert mock_openai.call_args.kwargs["base_url"] == "http://localhost:2345/v1" - assert mock_openai.call_args.kwargs["api_key"] == "task-key" - - def test_task_direct_endpoint_without_openai_key_uses_placeholder(self, monkeypatch): - """Local endpoints without an API key should use 'no-key-required' placeholder.""" - monkeypatch.setenv("OPENROUTER_API_KEY", "or-key") - monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_BASE_URL", "http://localhost:2345/v1") - monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_MODEL", "task-model") - with patch("agent.auxiliary_client.OpenAI") as mock_openai: - client, model = get_text_auxiliary_client("web_extract") - assert client is not None - assert model == "task-model" - assert mock_openai.call_args.kwargs["api_key"] == "no-key-required" - assert mock_openai.call_args.kwargs["base_url"] == "http://localhost:2345/v1" - def test_custom_endpoint_uses_config_saved_base_url(self, monkeypatch): config = { "model": { @@ -879,73 +849,9 @@ class TestAuxiliaryPoolAwareness: -class TestGetAuxiliaryProvider: - """Tests for _get_auxiliary_provider env var resolution.""" - - def test_no_task_returns_auto(self): - assert _get_auxiliary_provider() == "auto" - assert _get_auxiliary_provider("") == "auto" - - def test_auxiliary_prefix_takes_priority(self, monkeypatch): - monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "openrouter") - assert _get_auxiliary_provider("vision") == "openrouter" - - def test_context_prefix_fallback(self, monkeypatch): - monkeypatch.setenv("CONTEXT_COMPRESSION_PROVIDER", "nous") - assert _get_auxiliary_provider("compression") == "nous" - - def test_auxiliary_prefix_over_context_prefix(self, monkeypatch): - monkeypatch.setenv("AUXILIARY_COMPRESSION_PROVIDER", "openrouter") - monkeypatch.setenv("CONTEXT_COMPRESSION_PROVIDER", "nous") - assert _get_auxiliary_provider("compression") == "openrouter" - - def test_auto_value_treated_as_auto(self, monkeypatch): - monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "auto") - assert _get_auxiliary_provider("vision") == "auto" - - def test_whitespace_stripped(self, monkeypatch): - monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", " openrouter ") - assert _get_auxiliary_provider("vision") == "openrouter" - - def test_case_insensitive(self, monkeypatch): - monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "OpenRouter") - assert _get_auxiliary_provider("vision") == "openrouter" - - def test_main_provider(self, monkeypatch): - monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_PROVIDER", "main") - assert _get_auxiliary_provider("web_extract") == "main" - - class TestTaskSpecificOverrides: """Integration tests for per-task provider routing via get_text_auxiliary_client(task=...).""" - def test_text_with_vision_provider_override(self, monkeypatch): - """AUXILIARY_VISION_PROVIDER should not affect text tasks.""" - monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "nous") - monkeypatch.setenv("OPENROUTER_API_KEY", "or-key") - with patch("agent.auxiliary_client.OpenAI"): - client, model = get_text_auxiliary_client() # no task โ†’ auto - assert model == "google/gemini-3-flash-preview" # OpenRouter, not Nous - - def test_compression_task_reads_context_prefix(self, monkeypatch): - """Compression task should check CONTEXT_COMPRESSION_PROVIDER env var.""" - monkeypatch.setenv("CONTEXT_COMPRESSION_PROVIDER", "nous") - monkeypatch.setenv("OPENROUTER_API_KEY", "or-key") # would win in auto - with patch("agent.auxiliary_client._read_nous_auth") as mock_nous, \ - patch("agent.auxiliary_client.OpenAI"): - mock_nous.return_value = {"access_token": "***"} - client, model = get_text_auxiliary_client("compression") - # Config-first: model comes from config.yaml summary_model default, - # but provider is forced to Nous via env var - assert client is not None - - def test_web_extract_task_override(self, monkeypatch): - monkeypatch.setenv("AUXILIARY_WEB_EXTRACT_PROVIDER", "openrouter") - monkeypatch.setenv("OPENROUTER_API_KEY", "or-key") - with patch("agent.auxiliary_client.OpenAI"): - client, model = get_text_auxiliary_client("web_extract") - assert model == "google/gemini-3-flash-preview" - def test_task_direct_endpoint_from_config(self, monkeypatch, tmp_path): hermes_home = tmp_path / "hermes" hermes_home.mkdir(parents=True, exist_ok=True) @@ -979,8 +885,6 @@ class TestTaskSpecificOverrides: """model: default: glm-5.1 provider: opencode-go -compression: - summary_provider: auto """ ) monkeypatch.setenv("HERMES_HOME", str(hermes_home)) @@ -1039,24 +943,45 @@ model: "model": "gpt-5.4", } - def test_compression_summary_base_url_from_config(self, monkeypatch, tmp_path): - """compression.summary_base_url should produce a custom-endpoint client.""" - hermes_home = tmp_path / "hermes" - hermes_home.mkdir(parents=True, exist_ok=True) - (hermes_home / "config.yaml").write_text( - """compression: - summary_provider: custom - summary_model: glm-4.7 - summary_base_url: https://api.z.ai/api/coding/paas/v4 -""" - ) - monkeypatch.setenv("HERMES_HOME", str(hermes_home)) - # Custom endpoints need an API key to build the client - monkeypatch.setenv("OPENAI_API_KEY", "test-key") - with patch("agent.auxiliary_client.OpenAI") as mock_openai: - client, model = get_text_auxiliary_client("compression") - assert model == "glm-4.7" - assert mock_openai.call_args.kwargs["base_url"] == "https://api.z.ai/api/coding/paas/v4" + +def test_resolve_provider_client_supports_copilot_acp_external_process(): + fake_client = MagicMock() + + with patch("agent.auxiliary_client._read_main_model", return_value="gpt-5.4-mini"), \ + patch("agent.auxiliary_client.CodexAuxiliaryClient", MagicMock()), \ + patch("agent.copilot_acp_client.CopilotACPClient", return_value=fake_client) as mock_acp, \ + patch("hermes_cli.auth.resolve_external_process_provider_credentials", return_value={ + "provider": "copilot-acp", + "api_key": "copilot-acp", + "base_url": "acp://copilot", + "command": "/usr/bin/copilot", + "args": ["--acp", "--stdio"], + }): + client, model = resolve_provider_client("copilot-acp") + + assert client is fake_client + assert model == "gpt-5.4-mini" + assert mock_acp.call_args.kwargs["api_key"] == "copilot-acp" + assert mock_acp.call_args.kwargs["base_url"] == "acp://copilot" + assert mock_acp.call_args.kwargs["command"] == "/usr/bin/copilot" + assert mock_acp.call_args.kwargs["args"] == ["--acp", "--stdio"] + + +def test_resolve_provider_client_copilot_acp_requires_explicit_or_configured_model(): + with patch("agent.auxiliary_client._read_main_model", return_value=""), \ + patch("agent.copilot_acp_client.CopilotACPClient") as mock_acp, \ + patch("hermes_cli.auth.resolve_external_process_provider_credentials", return_value={ + "provider": "copilot-acp", + "api_key": "copilot-acp", + "base_url": "acp://copilot", + "command": "/usr/bin/copilot", + "args": ["--acp", "--stdio"], + }): + client, model = resolve_provider_client("copilot-acp") + + assert client is None + assert model is None + mock_acp.assert_not_called() class TestAuxiliaryMaxTokensParam: diff --git a/tests/agent/test_auxiliary_config_bridge.py b/tests/agent/test_auxiliary_config_bridge.py index 91dea15af..66350519b 100644 --- a/tests/agent/test_auxiliary_config_bridge.py +++ b/tests/agent/test_auxiliary_config_bridge.py @@ -273,18 +273,6 @@ class TestDefaultConfigShape: assert web["provider"] == "auto" assert web["model"] == "" - def test_compression_provider_default(self): - from hermes_cli.config import DEFAULT_CONFIG - compression = DEFAULT_CONFIG["compression"] - assert "summary_provider" in compression - assert compression["summary_provider"] == "auto" - - def test_compression_base_url_default(self): - from hermes_cli.config import DEFAULT_CONFIG - compression = DEFAULT_CONFIG["compression"] - assert "summary_base_url" in compression - assert compression["summary_base_url"] is None - # โ”€โ”€ CLI defaults parity โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ diff --git a/tests/agent/test_auxiliary_named_custom_providers.py b/tests/agent/test_auxiliary_named_custom_providers.py index 4c16bcb01..224910ac4 100644 --- a/tests/agent/test_auxiliary_named_custom_providers.py +++ b/tests/agent/test_auxiliary_named_custom_providers.py @@ -12,17 +12,6 @@ def _isolate(tmp_path, monkeypatch): hermes_home = tmp_path / ".hermes" hermes_home.mkdir() monkeypatch.setenv("HERMES_HOME", str(hermes_home)) - for env_var in ( - "AUXILIARY_VISION_PROVIDER", - "AUXILIARY_VISION_MODEL", - "AUXILIARY_VISION_BASE_URL", - "AUXILIARY_VISION_API_KEY", - "CONTEXT_VISION_PROVIDER", - "CONTEXT_VISION_MODEL", - "CONTEXT_VISION_BASE_URL", - "CONTEXT_VISION_API_KEY", - ): - monkeypatch.delenv(env_var, raising=False) # Write a minimal config so load_config doesn't fail (hermes_home / "config.yaml").write_text("model:\n default: test-model\n") @@ -69,6 +58,10 @@ class TestNormalizeVisionProvider: assert _normalize_vision_provider("beans") == "beans" assert _normalize_vision_provider("deepseek") == "deepseek" + def test_custom_colon_named_provider_preserved(self): + from agent.auxiliary_client import _normalize_vision_provider + assert _normalize_vision_provider("custom:beans") == "beans" + def test_codex_alias_still_works(self): from agent.auxiliary_client import _normalize_vision_provider assert _normalize_vision_provider("codex") == "openai-codex" @@ -240,3 +233,22 @@ class TestResolveVisionProviderClientModelNormalization: assert provider == "zai" assert client is not None assert model == "glm-5.1" + + +class TestVisionPathApiMode: + """Vision path should propagate api_mode to _get_cached_client.""" + + def test_explicit_provider_passes_api_mode(self, tmp_path): + _write_config(tmp_path, { + "model": {"default": "test-model"}, + "auxiliary": {"vision": {"api_mode": "chat_completions"}}, + }) + with patch("agent.auxiliary_client._get_cached_client") as mock_gcc: + mock_gcc.return_value = (MagicMock(), "test-model") + from agent.auxiliary_client import resolve_vision_provider_client + + provider, client, model = resolve_vision_provider_client(provider="deepseek") + + mock_gcc.assert_called_once() + _, kwargs = mock_gcc.call_args + assert kwargs.get("api_mode") == "chat_completions" diff --git a/tests/agent/test_minimax_provider.py b/tests/agent/test_minimax_provider.py index 1673bfd94..85c9c9520 100644 --- a/tests/agent/test_minimax_provider.py +++ b/tests/agent/test_minimax_provider.py @@ -308,6 +308,34 @@ class TestMinimaxPreserveDots: from run_agent import AIAgent assert AIAgent._anthropic_preserve_dots(agent) is False + def test_opencode_zen_provider_preserves_dots(self): + from types import SimpleNamespace + agent = SimpleNamespace(provider="opencode-zen", base_url="") + from run_agent import AIAgent + assert AIAgent._anthropic_preserve_dots(agent) is True + + def test_opencode_zen_url_preserves_dots(self): + from types import SimpleNamespace + agent = SimpleNamespace(provider="custom", base_url="https://opencode.ai/zen/v1") + from run_agent import AIAgent + assert AIAgent._anthropic_preserve_dots(agent) is True + + def test_zai_provider_preserves_dots(self): + from types import SimpleNamespace + agent = SimpleNamespace(provider="zai", base_url="") + from run_agent import AIAgent + assert AIAgent._anthropic_preserve_dots(agent) is True + + def test_bigmodel_cn_url_preserves_dots(self): + from types import SimpleNamespace + agent = SimpleNamespace(provider="custom", base_url="https://open.bigmodel.cn/api/paas/v4") + from run_agent import AIAgent + assert AIAgent._anthropic_preserve_dots(agent) is True + + def test_normalize_preserves_m25_free_dot(self): + from agent.anthropic_adapter import normalize_model_name + assert normalize_model_name("minimax-m2.5-free", preserve_dots=True) == "minimax-m2.5-free" + def test_normalize_preserves_m27_dot(self): from agent.anthropic_adapter import normalize_model_name assert normalize_model_name("MiniMax-M2.7", preserve_dots=True) == "MiniMax-M2.7" diff --git a/tests/agent/test_model_metadata_local_ctx.py b/tests/agent/test_model_metadata_local_ctx.py index e5ad0dc58..6852a82cc 100644 --- a/tests/agent/test_model_metadata_local_ctx.py +++ b/tests/agent/test_model_metadata_local_ctx.py @@ -70,6 +70,44 @@ class TestQueryLocalContextLengthOllama: assert result == 32768 + def test_ollama_num_ctx_wins_over_model_info(self): + """When both num_ctx (Modelfile) and model_info (GGUF) are present, + num_ctx wins because it's the *runtime* context Ollama actually + allocates KV cache for. The GGUF model_info.context_length is the + training max โ€” using it would let Hermes grow conversations past + the runtime limit and Ollama would silently truncate. + + Concrete example: hermes-brain:qwen3-14b-ctx32k is a Modelfile + derived from qwen3:14b with `num_ctx 32768`, but the underlying + GGUF reports `qwen3.context_length: 40960` (training max). If + Hermes used 40960 it would let the conversation grow past 32768 + before compressing, and Ollama would truncate the prefix. + """ + from agent.model_metadata import _query_local_context_length + + show_resp = self._make_resp(200, { + "model_info": {"qwen3.context_length": 40960}, + "parameters": "num_ctx 32768\ntemperature 0.6\n", + }) + models_resp = self._make_resp(404, {}) + + client_mock = MagicMock() + client_mock.__enter__ = lambda s: client_mock + client_mock.__exit__ = MagicMock(return_value=False) + client_mock.post.return_value = show_resp + client_mock.get.return_value = models_resp + + with patch("agent.model_metadata.detect_local_server_type", return_value="ollama"), \ + patch("httpx.Client", return_value=client_mock): + result = _query_local_context_length( + "hermes-brain:qwen3-14b-ctx32k", "http://100.77.243.5:11434/v1" + ) + + assert result == 32768, ( + f"Expected num_ctx (32768) to win over model_info (40960), got {result}. " + "If Hermes uses the GGUF training max, conversations will silently truncate." + ) + def test_ollama_show_404_falls_through(self): """When /api/show returns 404, falls through to /v1/models/{model}.""" from agent.model_metadata import _query_local_context_length diff --git a/tests/cli/test_cli_save_config_value.py b/tests/cli/test_cli_save_config_value.py index 7d030c03c..e48119414 100644 --- a/tests/cli/test_cli_save_config_value.py +++ b/tests/cli/test_cli_save_config_value.py @@ -51,10 +51,10 @@ class TestSaveConfigValueAtomic: def test_creates_nested_keys(self, config_env): """Dot-separated paths create intermediate dicts as needed.""" from cli import save_config_value - save_config_value("compression.summary_model", "google/gemini-3-flash-preview") + save_config_value("auxiliary.compression.model", "google/gemini-3-flash-preview") result = yaml.safe_load(config_env.read_text()) - assert result["compression"]["summary_model"] == "google/gemini-3-flash-preview" + assert result["auxiliary"]["compression"]["model"] == "google/gemini-3-flash-preview" def test_overwrites_existing_value(self, config_env): """Updating an existing key replaces the value.""" diff --git a/tests/cli/test_resume_display.py b/tests/cli/test_resume_display.py index d0c156d13..d183e48b2 100644 --- a/tests/cli/test_resume_display.py +++ b/tests/cli/test_resume_display.py @@ -180,33 +180,71 @@ class TestDisplayResumedHistory: assert 200 <= a_count <= 310 # roughly 300 chars (ยฑpanel padding) def test_long_assistant_message_truncated(self): + """Non-last assistant messages are still truncated.""" cli = _make_cli() long_text = "B" * 400 cli.conversation_history = [ {"role": "user", "content": "Tell me a lot."}, {"role": "assistant", "content": long_text}, + {"role": "user", "content": "And more?"}, + {"role": "assistant", "content": "Short final reply."}, ] output = self._capture_display(cli) - assert "..." in output + # The non-last assistant message should be truncated assert "B" * 400 not in output + # The last assistant message shown in full + assert "Short final reply." in output def test_multiline_assistant_truncated(self): + """Non-last multiline assistant messages are truncated to 3 lines.""" cli = _make_cli() multi = "\n".join([f"Line {i}" for i in range(20)]) cli.conversation_history = [ {"role": "user", "content": "Show me lines."}, {"role": "assistant", "content": multi}, + {"role": "user", "content": "What else?"}, + {"role": "assistant", "content": "Done."}, ] output = self._capture_display(cli) - # First 3 lines should be there + # First 3 lines of non-last assistant should be there assert "Line 0" in output assert "Line 1" in output assert "Line 2" in output - # Line 19 should NOT be there (truncated after 3 lines) + # Line 19 should NOT be in the truncated message assert "Line 19" not in output + def test_last_assistant_response_shown_in_full(self): + """The last assistant response is shown un-truncated so the user + knows where they left off without wasting tokens re-asking.""" + cli = _make_cli() + long_text = "X" * 500 + cli.conversation_history = [ + {"role": "user", "content": "Tell me everything."}, + {"role": "assistant", "content": long_text}, + ] + output = self._capture_display(cli) + + # Full 500-char text should be present (may be line-wrapped by Rich) + x_count = output.count("X") + assert x_count >= 490 # allow small Rich formatting variance + + def test_last_assistant_multiline_shown_in_full(self): + """The last assistant response shows all lines, not just 3.""" + cli = _make_cli() + multi = "\n".join([f"Line {i}" for i in range(20)]) + cli.conversation_history = [ + {"role": "user", "content": "Show me everything."}, + {"role": "assistant", "content": multi}, + ] + output = self._capture_display(cli) + + # All 20 lines should be present since it's the last response + assert "Line 0" in output + assert "Line 10" in output + assert "Line 19" in output + def test_large_history_shows_truncation_indicator(self): cli = _make_cli() cli.conversation_history = _large_history(n_exchanges=15) diff --git a/tests/gateway/restart_test_helpers.py b/tests/gateway/restart_test_helpers.py index 54dcd69b9..8b4897467 100644 --- a/tests/gateway/restart_test_helpers.py +++ b/tests/gateway/restart_test_helpers.py @@ -35,6 +35,7 @@ def make_restart_source(chat_id: str = "123456", chat_type: str = "dm") -> Sessi platform=Platform.TELEGRAM, chat_id=chat_id, chat_type=chat_type, + user_id="u1", ) diff --git a/tests/gateway/test_api_server_normalize.py b/tests/gateway/test_api_server_normalize.py new file mode 100644 index 000000000..2dd2c70f7 --- /dev/null +++ b/tests/gateway/test_api_server_normalize.py @@ -0,0 +1,87 @@ +"""Tests for _normalize_chat_content in the API server adapter.""" + +from gateway.platforms.api_server import _normalize_chat_content + + +class TestNormalizeChatContent: + """Content normalization converts array-based content parts to plain text.""" + + def test_none_returns_empty_string(self): + assert _normalize_chat_content(None) == "" + + def test_plain_string_returned_as_is(self): + assert _normalize_chat_content("hello world") == "hello world" + + def test_empty_string_returned_as_is(self): + assert _normalize_chat_content("") == "" + + def test_text_content_part(self): + content = [{"type": "text", "text": "hello"}] + assert _normalize_chat_content(content) == "hello" + + def test_input_text_content_part(self): + content = [{"type": "input_text", "text": "user input"}] + assert _normalize_chat_content(content) == "user input" + + def test_output_text_content_part(self): + content = [{"type": "output_text", "text": "assistant output"}] + assert _normalize_chat_content(content) == "assistant output" + + def test_multiple_text_parts_joined_with_newline(self): + content = [ + {"type": "text", "text": "first"}, + {"type": "text", "text": "second"}, + ] + assert _normalize_chat_content(content) == "first\nsecond" + + def test_mixed_string_and_dict_parts(self): + content = ["plain string", {"type": "text", "text": "dict part"}] + assert _normalize_chat_content(content) == "plain string\ndict part" + + def test_image_url_parts_silently_skipped(self): + content = [ + {"type": "text", "text": "check this:"}, + {"type": "image_url", "image_url": {"url": "https://example.com/img.png"}}, + ] + assert _normalize_chat_content(content) == "check this:" + + def test_integer_content_converted(self): + assert _normalize_chat_content(42) == "42" + + def test_boolean_content_converted(self): + assert _normalize_chat_content(True) == "True" + + def test_deeply_nested_list_respects_depth_limit(self): + """Nesting beyond max_depth returns empty string.""" + content = [[[[[[[[[[[["deep"]]]]]]]]]]]] + result = _normalize_chat_content(content) + # The deep nesting should be truncated, not crash + assert isinstance(result, str) + + def test_large_list_capped(self): + """Lists beyond MAX_CONTENT_LIST_SIZE are truncated.""" + content = [{"type": "text", "text": f"item{i}"} for i in range(2000)] + result = _normalize_chat_content(content) + # Should not contain all 2000 items + assert result.count("item") <= 1000 + + def test_oversized_string_truncated(self): + """Strings beyond 64KB are truncated.""" + huge = "x" * 100_000 + result = _normalize_chat_content(huge) + assert len(result) == 65_536 + + def test_empty_text_parts_filtered(self): + content = [ + {"type": "text", "text": ""}, + {"type": "text", "text": "actual"}, + {"type": "text", "text": ""}, + ] + assert _normalize_chat_content(content) == "actual" + + def test_dict_without_type_skipped(self): + content = [{"foo": "bar"}, {"type": "text", "text": "real"}] + assert _normalize_chat_content(content) == "real" + + def test_empty_list_returns_empty(self): + assert _normalize_chat_content([]) == "" diff --git a/tests/gateway/test_discord_free_response.py b/tests/gateway/test_discord_free_response.py index 29f65efc6..c2ef286d8 100644 --- a/tests/gateway/test_discord_free_response.py +++ b/tests/gateway/test_discord_free_response.py @@ -359,3 +359,44 @@ async def test_discord_thread_participation_tracked_on_dispatch(adapter, monkeyp await adapter._handle_message(message) assert "777" in adapter._threads + + +@pytest.mark.asyncio +async def test_discord_voice_linked_channel_skips_mention_requirement_and_auto_thread(adapter, monkeypatch): + """Active voice-linked text channels should behave like free-response channels.""" + monkeypatch.setenv("DISCORD_REQUIRE_MENTION", "true") + monkeypatch.delenv("DISCORD_FREE_RESPONSE_CHANNELS", raising=False) + monkeypatch.delenv("DISCORD_AUTO_THREAD", raising=False) + + adapter._voice_text_channels[111] = 789 + adapter._auto_create_thread = AsyncMock() + + message = make_message( + channel=FakeTextChannel(channel_id=789), + content="follow-up from voice text chat", + ) + + await adapter._handle_message(message) + + adapter._auto_create_thread.assert_not_awaited() + adapter.handle_message.assert_awaited_once() + event = adapter.handle_message.await_args.args[0] + assert event.text == "follow-up from voice text chat" + assert event.source.chat_type == "group" + + +@pytest.mark.asyncio +async def test_discord_voice_linked_parent_thread_still_requires_mention(adapter, monkeypatch): + """Threads under a voice-linked channel should still require @mention.""" + monkeypatch.setenv("DISCORD_REQUIRE_MENTION", "true") + monkeypatch.delenv("DISCORD_FREE_RESPONSE_CHANNELS", raising=False) + + adapter._voice_text_channels[111] = 789 + message = make_message( + channel=FakeThread(channel_id=790, parent=FakeTextChannel(channel_id=789)), + content="thread reply without mention", + ) + + await adapter._handle_message(message) + + adapter.handle_message.assert_not_awaited() diff --git a/tests/gateway/test_discord_reply_mode.py b/tests/gateway/test_discord_reply_mode.py index 5a9bb9cd1..2346d086f 100644 --- a/tests/gateway/test_discord_reply_mode.py +++ b/tests/gateway/test_discord_reply_mode.py @@ -124,7 +124,7 @@ class TestSendWithReplyToMode: @pytest.mark.asyncio async def test_off_mode_no_reply_reference(self): adapter, channel, ref_msg = _make_discord_adapter("off") - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"] await adapter.send("12345", "test content", reply_to="999") @@ -137,7 +137,7 @@ class TestSendWithReplyToMode: @pytest.mark.asyncio async def test_first_mode_only_first_chunk_references(self): adapter, channel, ref_msg = _make_discord_adapter("first") - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"] await adapter.send("12345", "test content", reply_to="999") @@ -152,7 +152,7 @@ class TestSendWithReplyToMode: @pytest.mark.asyncio async def test_all_mode_all_chunks_reference(self): adapter, channel, ref_msg = _make_discord_adapter("all") - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"] await adapter.send("12345", "test content", reply_to="999") @@ -165,7 +165,7 @@ class TestSendWithReplyToMode: @pytest.mark.asyncio async def test_no_reply_to_param_no_reference(self): adapter, channel, ref_msg = _make_discord_adapter("all") - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2"] await adapter.send("12345", "test content", reply_to=None) @@ -176,7 +176,7 @@ class TestSendWithReplyToMode: @pytest.mark.asyncio async def test_single_chunk_respects_first_mode(self): adapter, channel, ref_msg = _make_discord_adapter("first") - adapter.truncate_message = lambda content, max_len: ["single chunk"] + adapter.truncate_message = lambda content, max_len, **kw: ["single chunk"] await adapter.send("12345", "test", reply_to="999") @@ -187,7 +187,7 @@ class TestSendWithReplyToMode: @pytest.mark.asyncio async def test_single_chunk_off_mode(self): adapter, channel, ref_msg = _make_discord_adapter("off") - adapter.truncate_message = lambda content, max_len: ["single chunk"] + adapter.truncate_message = lambda content, max_len, **kw: ["single chunk"] await adapter.send("12345", "test", reply_to="999") @@ -200,7 +200,7 @@ class TestSendWithReplyToMode: async def test_invalid_mode_falls_back_to_first_behavior(self): """Invalid mode behaves like 'first' โ€” only first chunk gets reference.""" adapter, channel, ref_msg = _make_discord_adapter("banana") - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2"] await adapter.send("12345", "test", reply_to="999") diff --git a/tests/gateway/test_display_config.py b/tests/gateway/test_display_config.py index 4dd73ebd2..c9ad51280 100644 --- a/tests/gateway/test_display_config.py +++ b/tests/gateway/test_display_config.py @@ -189,14 +189,14 @@ class TestPlatformDefaults: """Slack, Mattermost, Matrix default to 'new' tool progress.""" from gateway.display_config import resolve_display_setting - for plat in ("slack", "mattermost", "matrix", "feishu"): + for plat in ("slack", "mattermost", "matrix", "feishu", "whatsapp"): assert resolve_display_setting({}, plat, "tool_progress") == "new", plat def test_low_tier_platforms(self): - """Signal, WhatsApp, etc. default to 'off' tool progress.""" + """Signal, BlueBubbles, etc. default to 'off' tool progress.""" from gateway.display_config import resolve_display_setting - for plat in ("signal", "whatsapp", "bluebubbles", "weixin", "wecom", "dingtalk"): + for plat in ("signal", "bluebubbles", "weixin", "wecom", "dingtalk"): assert resolve_display_setting({}, plat, "tool_progress") == "off", plat def test_minimal_tier_platforms(self): diff --git a/tests/gateway/test_feishu_onboard.py b/tests/gateway/test_feishu_onboard.py new file mode 100644 index 000000000..1ba1a64aa --- /dev/null +++ b/tests/gateway/test_feishu_onboard.py @@ -0,0 +1,438 @@ +"""Tests for gateway.platforms.feishu โ€” Feishu scan-to-create registration.""" + +import json +from unittest.mock import patch, MagicMock +import pytest + + +def _mock_urlopen(response_data, status=200): + """Create a mock for urllib.request.urlopen that returns JSON response_data.""" + mock_response = MagicMock() + mock_response.read.return_value = json.dumps(response_data).encode("utf-8") + mock_response.status = status + mock_response.__enter__ = lambda s: s + mock_response.__exit__ = MagicMock(return_value=False) + return mock_response + + +class TestPostRegistration: + """Tests for the low-level HTTP helper.""" + + @patch("gateway.platforms.feishu.urlopen") + def test_post_registration_returns_parsed_json(self, mock_urlopen_fn): + from gateway.platforms.feishu import _post_registration + + mock_urlopen_fn.return_value = _mock_urlopen({"nonce": "abc", "supported_auth_methods": ["client_secret"]}) + result = _post_registration("https://accounts.feishu.cn", {"action": "init"}) + assert result["nonce"] == "abc" + assert "client_secret" in result["supported_auth_methods"] + + @patch("gateway.platforms.feishu.urlopen") + def test_post_registration_sends_form_encoded_body(self, mock_urlopen_fn): + from gateway.platforms.feishu import _post_registration + + mock_urlopen_fn.return_value = _mock_urlopen({}) + _post_registration("https://accounts.feishu.cn", {"action": "init", "key": "val"}) + call_args = mock_urlopen_fn.call_args + request = call_args[0][0] + body = request.data.decode("utf-8") + assert "action=init" in body + assert "key=val" in body + assert request.get_header("Content-type") == "application/x-www-form-urlencoded" + + +class TestInitRegistration: + """Tests for the init step.""" + + @patch("gateway.platforms.feishu.urlopen") + def test_init_succeeds_when_client_secret_supported(self, mock_urlopen_fn): + from gateway.platforms.feishu import _init_registration + + mock_urlopen_fn.return_value = _mock_urlopen({ + "nonce": "abc", + "supported_auth_methods": ["client_secret"], + }) + _init_registration("feishu") + + @patch("gateway.platforms.feishu.urlopen") + def test_init_raises_when_client_secret_not_supported(self, mock_urlopen_fn): + from gateway.platforms.feishu import _init_registration + + mock_urlopen_fn.return_value = _mock_urlopen({ + "nonce": "abc", + "supported_auth_methods": ["other_method"], + }) + with pytest.raises(RuntimeError, match="client_secret"): + _init_registration("feishu") + + @patch("gateway.platforms.feishu.urlopen") + def test_init_uses_lark_url_for_lark_domain(self, mock_urlopen_fn): + from gateway.platforms.feishu import _init_registration + + mock_urlopen_fn.return_value = _mock_urlopen({ + "nonce": "abc", + "supported_auth_methods": ["client_secret"], + }) + _init_registration("lark") + call_args = mock_urlopen_fn.call_args + request = call_args[0][0] + assert "larksuite.com" in request.full_url + + +class TestBeginRegistration: + """Tests for the begin step.""" + + @patch("gateway.platforms.feishu.urlopen") + def test_begin_returns_device_code_and_qr_url(self, mock_urlopen_fn): + from gateway.platforms.feishu import _begin_registration + + mock_urlopen_fn.return_value = _mock_urlopen({ + "device_code": "dc_123", + "verification_uri_complete": "https://accounts.feishu.cn/qr/abc", + "user_code": "ABCD-1234", + "interval": 5, + "expire_in": 600, + }) + result = _begin_registration("feishu") + assert result["device_code"] == "dc_123" + assert "qr_url" in result + assert "accounts.feishu.cn" in result["qr_url"] + assert result["user_code"] == "ABCD-1234" + assert result["interval"] == 5 + assert result["expire_in"] == 600 + + @patch("gateway.platforms.feishu.urlopen") + def test_begin_sends_correct_archetype(self, mock_urlopen_fn): + from gateway.platforms.feishu import _begin_registration + + mock_urlopen_fn.return_value = _mock_urlopen({ + "device_code": "dc_123", + "verification_uri_complete": "https://example.com/qr", + "user_code": "X", + "interval": 5, + "expire_in": 600, + }) + _begin_registration("feishu") + request = mock_urlopen_fn.call_args[0][0] + body = request.data.decode("utf-8") + assert "archetype=PersonalAgent" in body + assert "auth_method=client_secret" in body + + +class TestPollRegistration: + """Tests for the poll step.""" + + @patch("gateway.platforms.feishu.time") + @patch("gateway.platforms.feishu.urlopen") + def test_poll_returns_credentials_on_success(self, mock_urlopen_fn, mock_time): + from gateway.platforms.feishu import _poll_registration + + mock_time.time.side_effect = [0, 1] + mock_time.sleep = MagicMock() + + mock_urlopen_fn.return_value = _mock_urlopen({ + "client_id": "cli_app123", + "client_secret": "secret456", + "user_info": {"open_id": "ou_owner", "tenant_brand": "feishu"}, + }) + result = _poll_registration( + device_code="dc_123", interval=1, expire_in=60, domain="feishu" + ) + assert result is not None + assert result["app_id"] == "cli_app123" + assert result["app_secret"] == "secret456" + assert result["domain"] == "feishu" + assert result["open_id"] == "ou_owner" + + @patch("gateway.platforms.feishu.time") + @patch("gateway.platforms.feishu.urlopen") + def test_poll_switches_domain_on_lark_tenant_brand(self, mock_urlopen_fn, mock_time): + from gateway.platforms.feishu import _poll_registration + + mock_time.time.side_effect = [0, 1, 2] + mock_time.sleep = MagicMock() + + pending_resp = _mock_urlopen({ + "error": "authorization_pending", + "user_info": {"tenant_brand": "lark"}, + }) + success_resp = _mock_urlopen({ + "client_id": "cli_lark", + "client_secret": "secret_lark", + "user_info": {"open_id": "ou_lark", "tenant_brand": "lark"}, + }) + mock_urlopen_fn.side_effect = [pending_resp, success_resp] + + result = _poll_registration( + device_code="dc_123", interval=0, expire_in=60, domain="feishu" + ) + assert result is not None + assert result["domain"] == "lark" + + @patch("gateway.platforms.feishu.time") + @patch("gateway.platforms.feishu.urlopen") + def test_poll_success_with_lark_brand_in_same_response(self, mock_urlopen_fn, mock_time): + """Credentials and lark tenant_brand in one response must not be discarded.""" + from gateway.platforms.feishu import _poll_registration + + mock_time.time.side_effect = [0, 1] + mock_time.sleep = MagicMock() + + mock_urlopen_fn.return_value = _mock_urlopen({ + "client_id": "cli_lark_direct", + "client_secret": "secret_lark_direct", + "user_info": {"open_id": "ou_lark_direct", "tenant_brand": "lark"}, + }) + result = _poll_registration( + device_code="dc_123", interval=1, expire_in=60, domain="feishu" + ) + assert result is not None + assert result["app_id"] == "cli_lark_direct" + assert result["domain"] == "lark" + assert result["open_id"] == "ou_lark_direct" + + @patch("gateway.platforms.feishu.time") + @patch("gateway.platforms.feishu.urlopen") + def test_poll_returns_none_on_access_denied(self, mock_urlopen_fn, mock_time): + from gateway.platforms.feishu import _poll_registration + + mock_time.time.side_effect = [0, 1] + mock_time.sleep = MagicMock() + + mock_urlopen_fn.return_value = _mock_urlopen({ + "error": "access_denied", + }) + result = _poll_registration( + device_code="dc_123", interval=1, expire_in=60, domain="feishu" + ) + assert result is None + + @patch("gateway.platforms.feishu.time") + @patch("gateway.platforms.feishu.urlopen") + def test_poll_returns_none_on_timeout(self, mock_urlopen_fn, mock_time): + from gateway.platforms.feishu import _poll_registration + + mock_time.time.side_effect = [0, 999] + mock_time.sleep = MagicMock() + + mock_urlopen_fn.return_value = _mock_urlopen({ + "error": "authorization_pending", + }) + result = _poll_registration( + device_code="dc_123", interval=1, expire_in=1, domain="feishu" + ) + assert result is None + + +class TestRenderQr: + """Tests for QR code terminal rendering.""" + + @patch("gateway.platforms.feishu._qrcode_mod", create=True) + def test_render_qr_returns_true_on_success(self, mock_qrcode_mod): + from gateway.platforms.feishu import _render_qr + + mock_qr = MagicMock() + mock_qrcode_mod.QRCode.return_value = mock_qr + assert _render_qr("https://example.com/qr") is True + mock_qr.add_data.assert_called_once_with("https://example.com/qr") + mock_qr.make.assert_called_once_with(fit=True) + mock_qr.print_ascii.assert_called_once() + + def test_render_qr_returns_false_when_qrcode_missing(self): + from gateway.platforms.feishu import _render_qr + + with patch("gateway.platforms.feishu._qrcode_mod", None): + assert _render_qr("https://example.com/qr") is False + + +class TestProbeBot: + """Tests for bot connectivity verification.""" + + @patch("gateway.platforms.feishu.FEISHU_AVAILABLE", True) + def test_probe_returns_bot_info_on_success(self): + from gateway.platforms.feishu import probe_bot + + with patch("gateway.platforms.feishu._probe_bot_sdk") as mock_sdk: + mock_sdk.return_value = {"bot_name": "TestBot", "bot_open_id": "ou_bot123"} + result = probe_bot("cli_app", "secret", "feishu") + + assert result is not None + assert result["bot_name"] == "TestBot" + assert result["bot_open_id"] == "ou_bot123" + + @patch("gateway.platforms.feishu.FEISHU_AVAILABLE", True) + def test_probe_returns_none_on_failure(self): + from gateway.platforms.feishu import probe_bot + + with patch("gateway.platforms.feishu._probe_bot_sdk") as mock_sdk: + mock_sdk.return_value = None + result = probe_bot("bad_id", "bad_secret", "feishu") + + assert result is None + + @patch("gateway.platforms.feishu.FEISHU_AVAILABLE", False) + @patch("gateway.platforms.feishu.urlopen") + def test_http_fallback_when_sdk_unavailable(self, mock_urlopen_fn): + """Without lark_oapi, probe falls back to raw HTTP.""" + from gateway.platforms.feishu import probe_bot + + token_resp = _mock_urlopen({"code": 0, "tenant_access_token": "t-123"}) + bot_resp = _mock_urlopen({"code": 0, "bot": {"bot_name": "HttpBot", "open_id": "ou_http"}}) + mock_urlopen_fn.side_effect = [token_resp, bot_resp] + + result = probe_bot("cli_app", "secret", "feishu") + assert result is not None + assert result["bot_name"] == "HttpBot" + + @patch("gateway.platforms.feishu.FEISHU_AVAILABLE", False) + @patch("gateway.platforms.feishu.urlopen") + def test_http_fallback_returns_none_on_network_error(self, mock_urlopen_fn): + from gateway.platforms.feishu import probe_bot + from urllib.error import URLError + + mock_urlopen_fn.side_effect = URLError("connection refused") + result = probe_bot("cli_app", "secret", "feishu") + assert result is None + + +class TestQrRegister: + """Tests for the public qr_register entry point.""" + + @patch("gateway.platforms.feishu.probe_bot") + @patch("gateway.platforms.feishu._render_qr") + @patch("gateway.platforms.feishu._poll_registration") + @patch("gateway.platforms.feishu._begin_registration") + @patch("gateway.platforms.feishu._init_registration") + def test_qr_register_success_flow( + self, mock_init, mock_begin, mock_poll, mock_render, mock_probe + ): + from gateway.platforms.feishu import qr_register + + mock_begin.return_value = { + "device_code": "dc_123", + "qr_url": "https://example.com/qr", + "user_code": "ABCD", + "interval": 1, + "expire_in": 60, + } + mock_poll.return_value = { + "app_id": "cli_app", + "app_secret": "secret", + "domain": "feishu", + "open_id": "ou_owner", + } + mock_probe.return_value = {"bot_name": "MyBot", "bot_open_id": "ou_bot"} + + result = qr_register() + assert result is not None + assert result["app_id"] == "cli_app" + assert result["app_secret"] == "secret" + assert result["bot_name"] == "MyBot" + mock_init.assert_called_once() + mock_render.assert_called_once() + + @patch("gateway.platforms.feishu._init_registration") + def test_qr_register_returns_none_on_init_failure(self, mock_init): + from gateway.platforms.feishu import qr_register + + mock_init.side_effect = RuntimeError("not supported") + result = qr_register() + assert result is None + + @patch("gateway.platforms.feishu._render_qr") + @patch("gateway.platforms.feishu._poll_registration") + @patch("gateway.platforms.feishu._begin_registration") + @patch("gateway.platforms.feishu._init_registration") + def test_qr_register_returns_none_on_poll_failure( + self, mock_init, mock_begin, mock_poll, mock_render + ): + from gateway.platforms.feishu import qr_register + + mock_begin.return_value = { + "device_code": "dc_123", + "qr_url": "https://example.com/qr", + "user_code": "ABCD", + "interval": 1, + "expire_in": 60, + } + mock_poll.return_value = None + + result = qr_register() + assert result is None + + # -- Contract: expected errors โ†’ None, unexpected errors โ†’ propagate -- + + @patch("gateway.platforms.feishu._init_registration") + def test_qr_register_returns_none_on_network_error(self, mock_init): + """URLError (network down) is an expected failure โ†’ None.""" + from gateway.platforms.feishu import qr_register + from urllib.error import URLError + + mock_init.side_effect = URLError("DNS resolution failed") + result = qr_register() + assert result is None + + @patch("gateway.platforms.feishu._init_registration") + def test_qr_register_returns_none_on_json_error(self, mock_init): + """Malformed server response is an expected failure โ†’ None.""" + from gateway.platforms.feishu import qr_register + + mock_init.side_effect = json.JSONDecodeError("bad json", "", 0) + result = qr_register() + assert result is None + + @patch("gateway.platforms.feishu._init_registration") + def test_qr_register_propagates_unexpected_errors(self, mock_init): + """Bugs (e.g. AttributeError) must not be swallowed โ€” they propagate.""" + from gateway.platforms.feishu import qr_register + + mock_init.side_effect = AttributeError("some internal bug") + with pytest.raises(AttributeError, match="some internal bug"): + qr_register() + + # -- Negative paths: partial/malformed server responses -- + + @patch("gateway.platforms.feishu._render_qr") + @patch("gateway.platforms.feishu._begin_registration") + @patch("gateway.platforms.feishu._init_registration") + def test_qr_register_returns_none_when_begin_missing_device_code( + self, mock_init, mock_begin, mock_render + ): + """Server returns begin response without device_code โ†’ RuntimeError โ†’ None.""" + from gateway.platforms.feishu import qr_register + + mock_begin.side_effect = RuntimeError("Feishu registration did not return a device_code") + result = qr_register() + assert result is None + + @patch("gateway.platforms.feishu.probe_bot") + @patch("gateway.platforms.feishu._render_qr") + @patch("gateway.platforms.feishu._poll_registration") + @patch("gateway.platforms.feishu._begin_registration") + @patch("gateway.platforms.feishu._init_registration") + def test_qr_register_succeeds_even_when_probe_fails( + self, mock_init, mock_begin, mock_poll, mock_render, mock_probe + ): + """Registration succeeds but probe fails โ†’ result with bot_name=None.""" + from gateway.platforms.feishu import qr_register + + mock_begin.return_value = { + "device_code": "dc_123", + "qr_url": "https://example.com/qr", + "user_code": "ABCD", + "interval": 1, + "expire_in": 60, + } + mock_poll.return_value = { + "app_id": "cli_app", + "app_secret": "secret", + "domain": "feishu", + "open_id": "ou_owner", + } + mock_probe.return_value = None # probe failed + + result = qr_register() + assert result is not None + assert result["app_id"] == "cli_app" + assert result["bot_name"] is None + assert result["bot_open_id"] is None diff --git a/tests/gateway/test_matrix_mention.py b/tests/gateway/test_matrix_mention.py index 873b873c2..b5db0da7c 100644 --- a/tests/gateway/test_matrix_mention.py +++ b/tests/gateway/test_matrix_mention.py @@ -48,6 +48,7 @@ def _make_event( room_id="!room1:example.org", formatted_body=None, thread_id=None, + mention_user_ids=None, ): """Create a fake room message event. @@ -60,6 +61,9 @@ def _make_event( content["formatted_body"] = formatted_body content["format"] = "org.matrix.custom.html" + if mention_user_ids is not None: + content["m.mentions"] = {"user_ids": mention_user_ids} + relates_to = {} if thread_id: relates_to["rel_type"] = "m.thread" @@ -108,6 +112,44 @@ class TestIsBotMentioned: # "hermesbot" should not match word-boundary check for "hermes" assert not self.adapter._is_bot_mentioned("hermesbot is here") + # m.mentions.user_ids โ€” MSC3952 / Matrix v1.7 authoritative mentions + # Ported from openclaw/openclaw#64796 + + def test_m_mentions_user_ids_authoritative(self): + """m.mentions.user_ids alone is sufficient โ€” no body text needed.""" + assert self.adapter._is_bot_mentioned( + "please reply", # no @hermes anywhere in body + mention_user_ids=["@hermes:example.org"], + ) + + def test_m_mentions_user_ids_with_body_mention(self): + """Both m.mentions and body mention โ€” should still be True.""" + assert self.adapter._is_bot_mentioned( + "hey @hermes:example.org help", + mention_user_ids=["@hermes:example.org"], + ) + + def test_m_mentions_user_ids_other_user_only(self): + """m.mentions with a different user โ€” bot is NOT mentioned.""" + assert not self.adapter._is_bot_mentioned( + "hello", + mention_user_ids=["@alice:example.org"], + ) + + def test_m_mentions_user_ids_empty_list(self): + """Empty user_ids list โ€” falls through to text detection.""" + assert not self.adapter._is_bot_mentioned( + "hello everyone", + mention_user_ids=[], + ) + + def test_m_mentions_user_ids_none(self): + """None mention_user_ids โ€” falls through to text detection.""" + assert not self.adapter._is_bot_mentioned( + "hello everyone", + mention_user_ids=None, + ) + class TestStripMention: def setup_method(self): @@ -176,6 +218,44 @@ async def test_require_mention_html_pill(monkeypatch): adapter.handle_message.assert_awaited_once() +@pytest.mark.asyncio +async def test_require_mention_m_mentions_user_ids(monkeypatch): + """m.mentions.user_ids is authoritative per MSC3952 โ€” no body mention needed. + + Ported from openclaw/openclaw#64796. + """ + monkeypatch.delenv("MATRIX_REQUIRE_MENTION", raising=False) + monkeypatch.delenv("MATRIX_FREE_RESPONSE_ROOMS", raising=False) + monkeypatch.setenv("MATRIX_AUTO_THREAD", "false") + + adapter = _make_adapter() + # Body has NO mention, but m.mentions.user_ids includes the bot. + event = _make_event( + "please reply", + mention_user_ids=["@hermes:example.org"], + ) + + await adapter._on_room_message(event) + adapter.handle_message.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_require_mention_m_mentions_other_user_ignored(monkeypatch): + """m.mentions.user_ids mentioning another user should NOT activate the bot.""" + monkeypatch.delenv("MATRIX_REQUIRE_MENTION", raising=False) + monkeypatch.delenv("MATRIX_FREE_RESPONSE_ROOMS", raising=False) + monkeypatch.setenv("MATRIX_AUTO_THREAD", "false") + + adapter = _make_adapter() + event = _make_event( + "hey alice check this", + mention_user_ids=["@alice:example.org"], + ) + + await adapter._on_room_message(event) + adapter.handle_message.assert_not_awaited() + + @pytest.mark.asyncio async def test_require_mention_dm_always_responds(monkeypatch): """DMs always respond regardless of mention setting.""" diff --git a/tests/gateway/test_platform_base.py b/tests/gateway/test_platform_base.py index f2d133ea2..690a82095 100644 --- a/tests/gateway/test_platform_base.py +++ b/tests/gateway/test_platform_base.py @@ -9,6 +9,8 @@ from gateway.platforms.base import ( MessageEvent, MessageType, safe_url_for_log, + utf16_len, + _prefix_within_utf16_limit, ) @@ -448,3 +450,135 @@ class TestGetHumanDelay: with patch.dict(os.environ, env): delay = BasePlatformAdapter._get_human_delay() assert 0.1 <= delay <= 0.2 + + +# --------------------------------------------------------------------------- +# utf16_len / _prefix_within_utf16_limit / truncate_message with len_fn +# --------------------------------------------------------------------------- +# Ported from nearai/ironclaw#2304 โ€” Telegram counts message length in UTF-16 +# code units, not Unicode code-points. Astral-plane characters (emoji, CJK +# Extension B) are surrogate pairs: 1 Python char but 2 UTF-16 units. + + +class TestUtf16Len: + """Verify the UTF-16 length helper.""" + + def test_ascii(self): + assert utf16_len("hello") == 5 + + def test_bmp_cjk(self): + # CJK ideographs in the BMP are 1 code unit each + assert utf16_len("ไฝ ๅฅฝ") == 2 + + def test_emoji_surrogate_pair(self): + # ๐Ÿ˜€ (U+1F600) is outside BMP โ†’ 2 UTF-16 code units + assert utf16_len("๐Ÿ˜€") == 2 + + def test_mixed(self): + # "hi๐Ÿ˜€" = 2 + 2 = 4 UTF-16 units + assert utf16_len("hi๐Ÿ˜€") == 4 + + def test_musical_symbol(self): + # ๐„ž (U+1D11E) โ€” Musical Symbol G Clef, surrogate pair + assert utf16_len("๐„ž") == 2 + + def test_empty(self): + assert utf16_len("") == 0 + + +class TestPrefixWithinUtf16Limit: + """Verify UTF-16-aware prefix truncation.""" + + def test_fits_entirely(self): + assert _prefix_within_utf16_limit("hello", 10) == "hello" + + def test_ascii_truncation(self): + result = _prefix_within_utf16_limit("hello world", 5) + assert result == "hello" + assert utf16_len(result) <= 5 + + def test_does_not_split_surrogate_pair(self): + # "a๐Ÿ˜€b" = 1 + 2 + 1 = 4 UTF-16 units; limit 2 should give "a" + result = _prefix_within_utf16_limit("a๐Ÿ˜€b", 2) + assert result == "a" + assert utf16_len(result) <= 2 + + def test_emoji_at_limit(self): + # "๐Ÿ˜€" = 2 UTF-16 units; limit 2 should include it + result = _prefix_within_utf16_limit("๐Ÿ˜€x", 2) + assert result == "๐Ÿ˜€" + + def test_all_emoji(self): + msg = "๐Ÿ˜€" * 10 # 20 UTF-16 units + result = _prefix_within_utf16_limit(msg, 6) + assert result == "๐Ÿ˜€๐Ÿ˜€๐Ÿ˜€" + assert utf16_len(result) == 6 + + def test_empty(self): + assert _prefix_within_utf16_limit("", 5) == "" + + +class TestTruncateMessageUtf16: + """Verify truncate_message respects UTF-16 lengths when len_fn=utf16_len.""" + + def test_short_emoji_message_no_split(self): + """A short message under the UTF-16 limit should not be split.""" + msg = "Hello ๐Ÿ˜€ world" + chunks = BasePlatformAdapter.truncate_message(msg, 4096, len_fn=utf16_len) + assert len(chunks) == 1 + assert chunks[0] == msg + + def test_emoji_near_limit_triggers_split(self): + """A message at 4096 codepoints but >4096 UTF-16 units must split.""" + # 2049 emoji = 2049 codepoints but 4098 UTF-16 units โ†’ exceeds 4096 + msg = "๐Ÿ˜€" * 2049 + assert len(msg) == 2049 # Python len sees 2049 chars + assert utf16_len(msg) == 4098 # but it's 4098 UTF-16 units + + # Without UTF-16 awareness, this would NOT split (2049 < 4096) + chunks_naive = BasePlatformAdapter.truncate_message(msg, 4096) + assert len(chunks_naive) == 1, "Without len_fn, no split expected" + + # With UTF-16 awareness, it MUST split + chunks = BasePlatformAdapter.truncate_message(msg, 4096, len_fn=utf16_len) + assert len(chunks) > 1, "With utf16_len, message should be split" + + # Each chunk must fit within the UTF-16 limit + for i, chunk in enumerate(chunks): + assert utf16_len(chunk) <= 4096, ( + f"Chunk {i} exceeds 4096 UTF-16 units: {utf16_len(chunk)}" + ) + + def test_each_utf16_chunk_within_limit(self): + """All chunks produced with utf16_len must fit the limit.""" + # Mix of BMP and astral-plane characters + msg = ("Hello ๐Ÿ˜€ world ๐ŸŽต test ๐„ž " * 200).strip() + max_len = 200 + chunks = BasePlatformAdapter.truncate_message(msg, max_len, len_fn=utf16_len) + for i, chunk in enumerate(chunks): + u16_len = utf16_len(chunk) + assert u16_len <= max_len + 20, ( + f"Chunk {i} UTF-16 length {u16_len} exceeds {max_len}" + ) + + def test_all_content_preserved(self): + """Splitting with utf16_len must not lose content.""" + words = ["emoji๐Ÿ˜€", "music๐ŸŽต", "cjkไฝ ๅฅฝ", "plain"] * 100 + msg = " ".join(words) + chunks = BasePlatformAdapter.truncate_message(msg, 200, len_fn=utf16_len) + reassembled = " ".join(chunks) + for word in words: + assert word in reassembled, f"Word '{word}' lost during UTF-16 split" + + def test_code_blocks_preserved_with_utf16(self): + """Code block fence handling should work with utf16_len too.""" + msg = "Before\n```python\n" + "x = '๐Ÿ˜€'\n" * 200 + "```\nAfter" + chunks = BasePlatformAdapter.truncate_message(msg, 300, len_fn=utf16_len) + assert len(chunks) > 1 + # Each chunk should have balanced fences + for i, chunk in enumerate(chunks): + fence_count = chunk.count("```") + assert fence_count % 2 == 0, ( + f"Chunk {i} has unbalanced fences ({fence_count})" + ) + diff --git a/tests/gateway/test_restart_notification.py b/tests/gateway/test_restart_notification.py new file mode 100644 index 000000000..c92659649 --- /dev/null +++ b/tests/gateway/test_restart_notification.py @@ -0,0 +1,215 @@ +"""Tests for /restart notification โ€” the gateway notifies the requester on comeback.""" + +import asyncio +import json +from pathlib import Path +from unittest.mock import AsyncMock, MagicMock + +import pytest + +import gateway.run as gateway_run +from gateway.config import Platform +from gateway.platforms.base import MessageEvent, MessageType +from gateway.session import build_session_key +from tests.gateway.restart_test_helpers import ( + make_restart_runner, + make_restart_source, +) + + +# โ”€โ”€ _handle_restart_command writes .restart_notify.json โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + +@pytest.mark.asyncio +async def test_restart_command_writes_notify_file(tmp_path, monkeypatch): + """When /restart fires, the requester's routing info is persisted to disk.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + + runner, _adapter = make_restart_runner() + runner.request_restart = MagicMock(return_value=True) + + source = make_restart_source(chat_id="42") + event = MessageEvent( + text="/restart", + message_type=MessageType.TEXT, + source=source, + message_id="m1", + ) + + result = await runner._handle_restart_command(event) + assert "Restarting" in result + + notify_path = tmp_path / ".restart_notify.json" + assert notify_path.exists() + data = json.loads(notify_path.read_text()) + assert data["platform"] == "telegram" + assert data["chat_id"] == "42" + assert "thread_id" not in data # no thread โ†’ omitted + + +@pytest.mark.asyncio +async def test_restart_command_uses_service_restart_under_systemd(tmp_path, monkeypatch): + """Under systemd (INVOCATION_ID set), /restart uses via_service=True.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + monkeypatch.setenv("INVOCATION_ID", "abc123") + + runner, _adapter = make_restart_runner() + runner.request_restart = MagicMock(return_value=True) + + source = make_restart_source(chat_id="42") + event = MessageEvent( + text="/restart", + message_type=MessageType.TEXT, + source=source, + message_id="m1", + ) + + await runner._handle_restart_command(event) + runner.request_restart.assert_called_once_with(detached=False, via_service=True) + + +@pytest.mark.asyncio +async def test_restart_command_uses_detached_without_systemd(tmp_path, monkeypatch): + """Without systemd, /restart uses the detached subprocess approach.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + monkeypatch.delenv("INVOCATION_ID", raising=False) + + runner, _adapter = make_restart_runner() + runner.request_restart = MagicMock(return_value=True) + + source = make_restart_source(chat_id="42") + event = MessageEvent( + text="/restart", + message_type=MessageType.TEXT, + source=source, + message_id="m1", + ) + + await runner._handle_restart_command(event) + runner.request_restart.assert_called_once_with(detached=True, via_service=False) + + +@pytest.mark.asyncio +async def test_restart_command_preserves_thread_id(tmp_path, monkeypatch): + """Thread ID is saved when the requester is in a threaded chat.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + + runner, _adapter = make_restart_runner() + runner.request_restart = MagicMock(return_value=True) + + source = make_restart_source(chat_id="99") + source.thread_id = "topic_7" + + event = MessageEvent( + text="/restart", + message_type=MessageType.TEXT, + source=source, + message_id="m2", + ) + + await runner._handle_restart_command(event) + + data = json.loads((tmp_path / ".restart_notify.json").read_text()) + assert data["thread_id"] == "topic_7" + + +# โ”€โ”€ _send_restart_notification โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + +@pytest.mark.asyncio +async def test_send_restart_notification_delivers_and_cleans_up(tmp_path, monkeypatch): + """On startup, the notification is sent and the file is removed.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + + notify_path = tmp_path / ".restart_notify.json" + notify_path.write_text(json.dumps({ + "platform": "telegram", + "chat_id": "42", + })) + + runner, adapter = make_restart_runner() + adapter.send = AsyncMock() + + await runner._send_restart_notification() + + adapter.send.assert_called_once() + call_args = adapter.send.call_args + assert call_args[0][0] == "42" # chat_id + assert "restarted" in call_args[0][1].lower() + assert call_args[1].get("metadata") is None # no thread + assert not notify_path.exists() + + +@pytest.mark.asyncio +async def test_send_restart_notification_with_thread(tmp_path, monkeypatch): + """Thread ID is passed as metadata so the message lands in the right topic.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + + notify_path = tmp_path / ".restart_notify.json" + notify_path.write_text(json.dumps({ + "platform": "telegram", + "chat_id": "99", + "thread_id": "topic_7", + })) + + runner, adapter = make_restart_runner() + adapter.send = AsyncMock() + + await runner._send_restart_notification() + + call_args = adapter.send.call_args + assert call_args[1]["metadata"] == {"thread_id": "topic_7"} + assert not notify_path.exists() + + +@pytest.mark.asyncio +async def test_send_restart_notification_noop_when_no_file(tmp_path, monkeypatch): + """Nothing happens if there's no pending restart notification.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + + runner, adapter = make_restart_runner() + adapter.send = AsyncMock() + + await runner._send_restart_notification() + + adapter.send.assert_not_called() + + +@pytest.mark.asyncio +async def test_send_restart_notification_skips_when_adapter_missing(tmp_path, monkeypatch): + """If the requester's platform isn't connected, clean up without crashing.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + + notify_path = tmp_path / ".restart_notify.json" + notify_path.write_text(json.dumps({ + "platform": "discord", # runner only has telegram adapter + "chat_id": "42", + })) + + runner, _adapter = make_restart_runner() + + await runner._send_restart_notification() + + # File cleaned up even though we couldn't send + assert not notify_path.exists() + + +@pytest.mark.asyncio +async def test_send_restart_notification_cleans_up_on_send_failure( + tmp_path, monkeypatch +): + """If the adapter.send() raises, the file is still cleaned up.""" + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + + notify_path = tmp_path / ".restart_notify.json" + notify_path.write_text(json.dumps({ + "platform": "telegram", + "chat_id": "42", + })) + + runner, adapter = make_restart_runner() + adapter.send = AsyncMock(side_effect=RuntimeError("network down")) + + await runner._send_restart_notification() + + assert not notify_path.exists() # cleaned up despite error diff --git a/tests/gateway/test_run_progress_topics.py b/tests/gateway/test_run_progress_topics.py index 6b1d46567..c1dda60b5 100644 --- a/tests/gateway/test_run_progress_topics.py +++ b/tests/gateway/test_run_progress_topics.py @@ -396,6 +396,27 @@ class QueuedCommentaryAgent: } +class VerboseAgent: + """Agent that emits a tool call with args whose JSON exceeds 200 chars.""" + LONG_CODE = "x" * 300 + + def __init__(self, **kwargs): + self.tool_progress_callback = kwargs.get("tool_progress_callback") + self.tools = [] + + def run_conversation(self, message, conversation_history=None, task_id=None): + self.tool_progress_callback( + "tool.started", "execute_code", None, + {"code": self.LONG_CODE}, + ) + time.sleep(0.35) + return { + "final_response": "done", + "messages": [], + "api_calls": 1, + } + + async def _run_with_agent( monkeypatch, tmp_path, @@ -575,3 +596,45 @@ async def test_run_agent_queued_message_does_not_treat_commentary_as_final(monke assert result["final_response"] == "final response 2" assert "I'll inspect the repo first." in sent_texts assert "final response 1" in sent_texts + + +@pytest.mark.asyncio +async def test_verbose_mode_does_not_truncate_args_by_default(monkeypatch, tmp_path): + """Verbose mode with default tool_preview_length (0) should NOT truncate args. + + Previously, verbose mode capped args at 200 chars when tool_preview_length + was 0 (default). The user explicitly opted into verbose โ€” show full detail. + """ + adapter, result = await _run_with_agent( + monkeypatch, + tmp_path, + VerboseAgent, + session_id="sess-verbose-no-truncate", + config_data={"display": {"tool_progress": "verbose", "tool_preview_length": 0}}, + ) + + assert result["final_response"] == "done" + # The full 300-char 'x' string should be present, not truncated to 200 + all_content = " ".join(call["content"] for call in adapter.sent) + all_content += " ".join(call["content"] for call in adapter.edits) + assert VerboseAgent.LONG_CODE in all_content + + +@pytest.mark.asyncio +async def test_verbose_mode_respects_explicit_tool_preview_length(monkeypatch, tmp_path): + """When tool_preview_length is set to a positive value, verbose truncates to that.""" + adapter, result = await _run_with_agent( + monkeypatch, + tmp_path, + VerboseAgent, + session_id="sess-verbose-explicit-cap", + config_data={"display": {"tool_progress": "verbose", "tool_preview_length": 50}}, + ) + + assert result["final_response"] == "done" + all_content = " ".join(call["content"] for call in adapter.sent) + all_content += " ".join(call["content"] for call in adapter.edits) + # Should be truncated โ€” full 300-char string NOT present + assert VerboseAgent.LONG_CODE not in all_content + # But should still contain the truncated portion with "..." + assert "..." in all_content diff --git a/tests/gateway/test_session.py b/tests/gateway/test_session.py index b86d18575..50bc7c046 100644 --- a/tests/gateway/test_session.py +++ b/tests/gateway/test_session.py @@ -552,6 +552,45 @@ class TestLoadTranscriptPreferLongerSource: assert result[0]["content"] == "db-q" +class TestSessionStoreSwitchSession: + """Regression coverage for gateway /resume session switching semantics.""" + + def test_switch_session_reopens_target_session_in_db(self, tmp_path): + from hermes_state import SessionDB + + config = GatewayConfig() + with patch("gateway.session.SessionStore._ensure_loaded"): + store = SessionStore(sessions_dir=tmp_path / "sessions", config=config) + db = SessionDB(db_path=tmp_path / "state.db") + store._db = db + store._loaded = True + + source = SessionSource( + platform=Platform.FEISHU, + chat_id="chat-1", + chat_type="dm", + user_id="user-1", + user_name="tester", + ) + current_entry = store.get_or_create_session(source) + current_session_id = current_entry.session_id + + target_session_id = "old_session_abc" + db.create_session(target_session_id, source="feishu", user_id="user-1") + db.end_session(target_session_id, end_reason="user_exit") + assert db.get_session(target_session_id)["ended_at"] is not None + + switched = store.switch_session(current_entry.session_key, target_session_id) + + assert switched is not None + assert switched.session_id == target_session_id + assert db.get_session(current_session_id)["end_reason"] == "session_switch" + resumed = db.get_session(target_session_id) + assert resumed["ended_at"] is None + assert resumed["end_reason"] is None + db.close() + + class TestWhatsAppDMSessionKeyConsistency: """Regression: all session-key construction must go through build_session_key so DMs are isolated by chat_id across platforms.""" diff --git a/tests/gateway/test_session_race_guard.py b/tests/gateway/test_session_race_guard.py index 7a4f6f101..fcfaba784 100644 --- a/tests/gateway/test_session_race_guard.py +++ b/tests/gateway/test_session_race_guard.py @@ -60,7 +60,8 @@ def _make_runner(): def _make_event(text="hello", chat_id="12345"): source = SessionSource( - platform=Platform.TELEGRAM, chat_id=chat_id, chat_type="dm" + platform=Platform.TELEGRAM, chat_id=chat_id, chat_type="dm", + user_id="u1", ) return MessageEvent(text=text, message_type=MessageType.TEXT, source=source) @@ -192,7 +193,8 @@ async def test_command_messages_do_not_leave_sentinel(): _handle_message. They must NOT leave a sentinel behind.""" runner = _make_runner() source = SessionSource( - platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm" + platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm", + user_id="u1", ) event = MessageEvent( text="/help", message_type=MessageType.TEXT, source=source @@ -240,9 +242,7 @@ async def test_stop_during_sentinel_force_cleans_session(): stop_event = _make_event(text="/stop") result = await runner._handle_message(stop_event) assert result is not None, "/stop during sentinel should return a message" - assert "force-stopped" in result.lower() or "unlocked" in result.lower() - - # Sentinel must be cleaned up + assert "stopped" in result.lower() assert session_key not in runner._running_agents, ( "/stop must remove sentinel so the session is unlocked" ) @@ -268,7 +268,7 @@ async def test_stop_hard_kills_running_agent(): forever โ€” showing 'writing...' but never producing output.""" runner = _make_runner() session_key = build_session_key( - SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm") + SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm", user_id="u1") ) # Simulate a running (possibly hung) agent @@ -289,7 +289,7 @@ async def test_stop_hard_kills_running_agent(): # Must return a confirmation assert result is not None - assert "force-stopped" in result.lower() or "unlocked" in result.lower() + assert "stopped" in result.lower() # ------------------------------------------------------------------ @@ -301,7 +301,7 @@ async def test_stop_clears_pending_messages(): queued during the run must be discarded.""" runner = _make_runner() session_key = build_session_key( - SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm") + SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm", user_id="u1") ) fake_agent = MagicMock() diff --git a/tests/gateway/test_setup_feishu.py b/tests/gateway/test_setup_feishu.py new file mode 100644 index 000000000..26165528e --- /dev/null +++ b/tests/gateway/test_setup_feishu.py @@ -0,0 +1,279 @@ +"""Tests for _setup_feishu() in hermes_cli/gateway.py. + +Verifies that the interactive setup writes env vars that correctly drive the +Feishu adapter: credentials, connection mode, DM policy, and group policy. +""" + +import os +from unittest.mock import patch + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _run_setup_feishu( + *, + qr_result=None, + prompt_yes_no_responses=None, + prompt_choice_responses=None, + prompt_responses=None, + existing_env=None, +): + """Run _setup_feishu() with mocked I/O and return the env vars that were saved. + + Returns a dict of {env_var_name: value} for all save_env_value calls. + """ + existing_env = existing_env or {} + prompt_yes_no_responses = list(prompt_yes_no_responses or [True]) + # QR path: method(0), dm(0), group(0) โ€” 3 choices (no connection mode) + # Manual path: method(1), domain(0), connection(0), dm(0), group(0) โ€” 5 choices + prompt_choice_responses = list(prompt_choice_responses or [0, 0, 0]) + prompt_responses = list(prompt_responses or [""]) + + saved_env = {} + + def mock_save(name, value): + saved_env[name] = value + + def mock_get(name): + return existing_env.get(name, "") + + with patch("hermes_cli.gateway.save_env_value", side_effect=mock_save), \ + patch("hermes_cli.gateway.get_env_value", side_effect=mock_get), \ + patch("hermes_cli.gateway.prompt_yes_no", side_effect=prompt_yes_no_responses), \ + patch("hermes_cli.gateway.prompt_choice", side_effect=prompt_choice_responses), \ + patch("hermes_cli.gateway.prompt", side_effect=prompt_responses), \ + patch("hermes_cli.gateway.print_info"), \ + patch("hermes_cli.gateway.print_success"), \ + patch("hermes_cli.gateway.print_warning"), \ + patch("hermes_cli.gateway.print_error"), \ + patch("hermes_cli.gateway.color", side_effect=lambda t, c: t), \ + patch("gateway.platforms.feishu.qr_register", return_value=qr_result): + + from hermes_cli.gateway import _setup_feishu + _setup_feishu() + + return saved_env + + +# --------------------------------------------------------------------------- +# QR scan-to-create path +# --------------------------------------------------------------------------- + +class TestSetupFeishuQrPath: + """Tests for the QR scan-to-create happy path.""" + + def test_qr_success_saves_core_credentials(self): + env = _run_setup_feishu( + qr_result={ + "app_id": "cli_test", + "app_secret": "secret_test", + "domain": "feishu", + "open_id": "ou_owner", + "bot_name": "TestBot", + "bot_open_id": "ou_bot", + }, + prompt_yes_no_responses=[True], # Start QR + prompt_choice_responses=[0, 0, 0], # method=QR, dm=pairing, group=open + prompt_responses=[""], # home channel: skip + ) + assert env["FEISHU_APP_ID"] == "cli_test" + assert env["FEISHU_APP_SECRET"] == "secret_test" + assert env["FEISHU_DOMAIN"] == "feishu" + + def test_qr_success_does_not_persist_bot_identity(self): + """Bot identity is discovered at runtime by _hydrate_bot_identity โ€” not persisted + in env, so it stays fresh if the user renames the bot later.""" + env = _run_setup_feishu( + qr_result={ + "app_id": "cli_test", + "app_secret": "secret_test", + "domain": "feishu", + "open_id": "ou_owner", + "bot_name": "TestBot", + "bot_open_id": "ou_bot", + }, + prompt_yes_no_responses=[True], + prompt_choice_responses=[0, 0, 0], + prompt_responses=[""], + ) + assert "FEISHU_BOT_OPEN_ID" not in env + assert "FEISHU_BOT_NAME" not in env + + +# --------------------------------------------------------------------------- +# Connection mode +# --------------------------------------------------------------------------- + +class TestSetupFeishuConnectionMode: + """Connection mode: QR always websocket, manual path lets user choose.""" + + def test_qr_path_defaults_to_websocket(self): + env = _run_setup_feishu( + qr_result={ + "app_id": "cli_test", "app_secret": "s", "domain": "feishu", + "open_id": None, "bot_name": None, "bot_open_id": None, + }, + prompt_choice_responses=[0, 0, 0], # method=QR, dm=pairing, group=open + prompt_responses=[""], + ) + assert env["FEISHU_CONNECTION_MODE"] == "websocket" + + @patch("gateway.platforms.feishu.probe_bot", return_value=None) + def test_manual_path_websocket(self, _mock_probe): + env = _run_setup_feishu( + qr_result=None, + prompt_choice_responses=[1, 0, 0, 0, 0], # method=manual, domain=feishu, connection=ws, dm=pairing, group=open + prompt_responses=["cli_manual", "secret_manual", ""], # app_id, app_secret, home_channel + ) + assert env["FEISHU_CONNECTION_MODE"] == "websocket" + + @patch("gateway.platforms.feishu.probe_bot", return_value=None) + def test_manual_path_webhook(self, _mock_probe): + env = _run_setup_feishu( + qr_result=None, + prompt_choice_responses=[1, 0, 1, 0, 0], # method=manual, domain=feishu, connection=webhook, dm=pairing, group=open + prompt_responses=["cli_manual", "secret_manual", ""], # app_id, app_secret, home_channel + ) + assert env["FEISHU_CONNECTION_MODE"] == "webhook" + + +# --------------------------------------------------------------------------- +# DM security policy +# --------------------------------------------------------------------------- + +class TestSetupFeishuDmPolicy: + """DM policy must use platform-scoped FEISHU_ALLOW_ALL_USERS, not the global flag.""" + + def _run_with_dm_choice(self, dm_choice_idx, prompt_responses=None): + return _run_setup_feishu( + qr_result={ + "app_id": "cli_test", "app_secret": "s", "domain": "feishu", + "open_id": "ou_owner", "bot_name": None, "bot_open_id": None, + }, + prompt_yes_no_responses=[True], + prompt_choice_responses=[0, dm_choice_idx, 0], # method=QR, dm=, group=open + prompt_responses=prompt_responses or [""], + ) + + def test_pairing_sets_feishu_allow_all_false(self): + env = self._run_with_dm_choice(0) + assert env["FEISHU_ALLOW_ALL_USERS"] == "false" + assert env["FEISHU_ALLOWED_USERS"] == "" + assert "GATEWAY_ALLOW_ALL_USERS" not in env + + def test_allow_all_sets_feishu_allow_all_true(self): + env = self._run_with_dm_choice(1) + assert env["FEISHU_ALLOW_ALL_USERS"] == "true" + assert env["FEISHU_ALLOWED_USERS"] == "" + assert "GATEWAY_ALLOW_ALL_USERS" not in env + + def test_allowlist_sets_feishu_allow_all_false_with_list(self): + env = self._run_with_dm_choice(2, prompt_responses=["ou_user1,ou_user2", ""]) + assert env["FEISHU_ALLOW_ALL_USERS"] == "false" + assert env["FEISHU_ALLOWED_USERS"] == "ou_user1,ou_user2" + assert "GATEWAY_ALLOW_ALL_USERS" not in env + + def test_allowlist_prepopulates_with_scan_owner_open_id(self): + """When open_id is available from QR scan, it should be the default allowlist value.""" + # We return the owner's open_id from prompt (+ empty home channel). + env = self._run_with_dm_choice(2, prompt_responses=["ou_owner", ""]) + assert env["FEISHU_ALLOWED_USERS"] == "ou_owner" + + + +# --------------------------------------------------------------------------- +# Group policy +# --------------------------------------------------------------------------- + +class TestSetupFeishuGroupPolicy: + + def test_open_with_mention(self): + env = _run_setup_feishu( + qr_result={ + "app_id": "cli_test", "app_secret": "s", "domain": "feishu", + "open_id": None, "bot_name": None, "bot_open_id": None, + }, + prompt_yes_no_responses=[True], + prompt_choice_responses=[0, 0, 0], # method=QR, dm=pairing, group=open + prompt_responses=[""], + ) + assert env["FEISHU_GROUP_POLICY"] == "open" + + def test_disabled(self): + env = _run_setup_feishu( + qr_result={ + "app_id": "cli_test", "app_secret": "s", "domain": "feishu", + "open_id": None, "bot_name": None, "bot_open_id": None, + }, + prompt_yes_no_responses=[True], + prompt_choice_responses=[0, 0, 1], # method=QR, dm=pairing, group=disabled + prompt_responses=[""], + ) + assert env["FEISHU_GROUP_POLICY"] == "disabled" + + +# --------------------------------------------------------------------------- +# Adapter integration: env vars โ†’ FeishuAdapterSettings +# --------------------------------------------------------------------------- + +class TestSetupFeishuAdapterIntegration: + """Verify that env vars written by _setup_feishu() produce a valid adapter config. + + This bridges the gap between 'setup wrote the right env vars' and + 'the adapter will actually initialize correctly from those vars'. + """ + + def _make_env_from_setup(self, dm_idx=0, group_idx=0): + """Run _setup_feishu via QR path and return the env vars it would write.""" + return _run_setup_feishu( + qr_result={ + "app_id": "cli_test_app", + "app_secret": "test_secret_value", + "domain": "feishu", + "open_id": "ou_owner", + "bot_name": "IntegrationBot", + "bot_open_id": "ou_bot_integration", + }, + prompt_yes_no_responses=[True], + prompt_choice_responses=[0, dm_idx, group_idx], # method=QR, dm, group + prompt_responses=[""], + ) + + @patch.dict(os.environ, {}, clear=True) + def test_qr_env_produces_valid_adapter_settings(self): + """QR setup โ†’ adapter initializes with websocket mode.""" + env = self._make_env_from_setup() + + with patch.dict(os.environ, env, clear=True): + from gateway.config import PlatformConfig + from gateway.platforms.feishu import FeishuAdapter + adapter = FeishuAdapter(PlatformConfig()) + assert adapter._app_id == "cli_test_app" + assert adapter._app_secret == "test_secret_value" + assert adapter._domain_name == "feishu" + assert adapter._connection_mode == "websocket" + + @patch.dict(os.environ, {}, clear=True) + def test_open_dm_env_sets_correct_adapter_state(self): + """Setup with 'allow all DMs' โ†’ adapter sees allow-all flag.""" + env = self._make_env_from_setup(dm_idx=1) + + with patch.dict(os.environ, env, clear=True): + from gateway.platforms.feishu import FeishuAdapter + from gateway.config import PlatformConfig + # Verify adapter initializes without error and env var is correct. + FeishuAdapter(PlatformConfig()) + assert os.getenv("FEISHU_ALLOW_ALL_USERS") == "true" + + @patch.dict(os.environ, {}, clear=True) + def test_group_open_env_sets_adapter_group_policy(self): + """Setup with 'open groups' โ†’ adapter group_policy is 'open'.""" + env = self._make_env_from_setup(group_idx=0) + + with patch.dict(os.environ, env, clear=True): + from gateway.config import PlatformConfig + from gateway.platforms.feishu import FeishuAdapter + adapter = FeishuAdapter(PlatformConfig()) + assert adapter._group_policy == "open" diff --git a/tests/gateway/test_status.py b/tests/gateway/test_status.py index 16d4bfc5e..4b9675e72 100644 --- a/tests/gateway/test_status.py +++ b/tests/gateway/test_status.py @@ -209,6 +209,33 @@ class TestScopedLocks: assert payload["pid"] == os.getpid() assert payload["metadata"]["platform"] == "telegram" + def test_acquire_scoped_lock_recovers_empty_lock_file(self, tmp_path, monkeypatch): + """Empty lock file (0 bytes) left by a crashed process should be treated as stale.""" + monkeypatch.setenv("HERMES_GATEWAY_LOCK_DIR", str(tmp_path / "locks")) + lock_path = tmp_path / "locks" / "slack-app-token-2bb80d537b1da3e3.lock" + lock_path.parent.mkdir(parents=True, exist_ok=True) + lock_path.write_text("") # simulate crash between O_CREAT and json.dump + + acquired, existing = status.acquire_scoped_lock("slack-app-token", "secret", metadata={"platform": "slack"}) + + assert acquired is True + payload = json.loads(lock_path.read_text()) + assert payload["pid"] == os.getpid() + assert payload["metadata"]["platform"] == "slack" + + def test_acquire_scoped_lock_recovers_corrupt_lock_file(self, tmp_path, monkeypatch): + """Lock file with invalid JSON should be treated as stale.""" + monkeypatch.setenv("HERMES_GATEWAY_LOCK_DIR", str(tmp_path / "locks")) + lock_path = tmp_path / "locks" / "slack-app-token-2bb80d537b1da3e3.lock" + lock_path.parent.mkdir(parents=True, exist_ok=True) + lock_path.write_text("{truncated") # simulate partial write + + acquired, existing = status.acquire_scoped_lock("slack-app-token", "secret", metadata={"platform": "slack"}) + + assert acquired is True + payload = json.loads(lock_path.read_text()) + assert payload["pid"] == os.getpid() + def test_release_scoped_lock_only_removes_current_owner(self, tmp_path, monkeypatch): monkeypatch.setenv("HERMES_GATEWAY_LOCK_DIR", str(tmp_path / "locks")) diff --git a/tests/gateway/test_telegram_photo_interrupts.py b/tests/gateway/test_telegram_photo_interrupts.py index 9235e539d..e808e68db 100644 --- a/tests/gateway/test_telegram_photo_interrupts.py +++ b/tests/gateway/test_telegram_photo_interrupts.py @@ -29,7 +29,7 @@ def _make_runner(): @pytest.mark.asyncio async def test_handle_message_does_not_priority_interrupt_photo_followup(): runner = _make_runner() - source = SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm") + source = SessionSource(platform=Platform.TELEGRAM, chat_id="12345", chat_type="dm", user_id="u1") session_key = build_session_key(source) running_agent = MagicMock() runner._running_agents[session_key] = running_agent diff --git a/tests/gateway/test_telegram_reply_mode.py b/tests/gateway/test_telegram_reply_mode.py index 1218afa0c..a433b1801 100644 --- a/tests/gateway/test_telegram_reply_mode.py +++ b/tests/gateway/test_telegram_reply_mode.py @@ -121,7 +121,7 @@ class TestSendWithReplyToMode: adapter = adapter_factory(reply_to_mode="off") adapter._bot = MagicMock() adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"] await adapter.send("12345", "test content", reply_to="999") @@ -133,7 +133,7 @@ class TestSendWithReplyToMode: adapter = adapter_factory(reply_to_mode="first") adapter._bot = MagicMock() adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"] await adapter.send("12345", "test content", reply_to="999") @@ -148,7 +148,7 @@ class TestSendWithReplyToMode: adapter = adapter_factory(reply_to_mode="all") adapter._bot = MagicMock() adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2", "chunk3"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2", "chunk3"] await adapter.send("12345", "test content", reply_to="999") @@ -162,7 +162,7 @@ class TestSendWithReplyToMode: adapter = adapter_factory(reply_to_mode="all") adapter._bot = MagicMock() adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) - adapter.truncate_message = lambda content, max_len: ["chunk1", "chunk2"] + adapter.truncate_message = lambda content, max_len, **kw: ["chunk1", "chunk2"] await adapter.send("12345", "test content", reply_to=None) @@ -175,7 +175,7 @@ class TestSendWithReplyToMode: adapter = adapter_factory(reply_to_mode="first") adapter._bot = MagicMock() adapter._bot.send_message = AsyncMock(return_value=MagicMock(message_id=1)) - adapter.truncate_message = lambda content, max_len: ["single chunk"] + adapter.truncate_message = lambda content, max_len, **kw: ["single chunk"] await adapter.send("12345", "test", reply_to="999") diff --git a/tests/gateway/test_voice_command.py b/tests/gateway/test_voice_command.py index 0638452f0..f0c3171d6 100644 --- a/tests/gateway/test_voice_command.py +++ b/tests/gateway/test_voice_command.py @@ -417,6 +417,7 @@ class TestDiscordPlayTtsSkip: adapter.config = config adapter._voice_clients = {} adapter._voice_text_channels = {} + adapter._voice_sources = {} adapter._voice_timeout_tasks = {} adapter._voice_receivers = {} adapter._voice_listen_tasks = {} @@ -702,13 +703,18 @@ class TestVoiceChannelCommands: mock_adapter.join_voice_channel = AsyncMock(return_value=True) mock_adapter.get_user_voice_channel = AsyncMock(return_value=mock_channel) mock_adapter._voice_text_channels = {} + mock_adapter._voice_sources = {} mock_adapter._voice_input_callback = None event = self._make_discord_event() + event.source.chat_type = "group" + event.source.chat_name = "Hermes Server / #general" runner.adapters[event.source.platform] = mock_adapter result = await runner._handle_voice_channel_join(event) assert "joined" in result.lower() assert "General" in result assert runner._voice_mode["123"] == "all" + assert mock_adapter._voice_sources[111]["chat_id"] == "123" + assert mock_adapter._voice_sources[111]["chat_type"] == "group" @pytest.mark.asyncio async def test_join_failure(self, runner): @@ -815,6 +821,7 @@ class TestVoiceChannelCommands: from gateway.config import Platform mock_adapter = AsyncMock() mock_adapter._voice_text_channels = {111: 123} + mock_adapter._voice_sources = {} mock_channel = AsyncMock() mock_adapter._client = MagicMock() mock_adapter._client.get_channel = MagicMock(return_value=mock_channel) @@ -828,12 +835,45 @@ class TestVoiceChannelCommands: assert event.source.chat_id == "123" assert event.source.chat_type == "channel" + @pytest.mark.asyncio + async def test_input_reuses_bound_source_metadata(self, runner): + """Voice input should share the linked text channel session metadata.""" + from gateway.config import Platform + + bound_source = SessionSource( + chat_id="123", + chat_name="Hermes Server / #general", + chat_type="group", + user_id="user1", + user_name="user1", + platform=Platform.DISCORD, + ) + + mock_adapter = AsyncMock() + mock_adapter._voice_text_channels = {111: 123} + mock_adapter._voice_sources = {111: bound_source.to_dict()} + mock_channel = AsyncMock() + mock_adapter._client = MagicMock() + mock_adapter._client.get_channel = MagicMock(return_value=mock_channel) + mock_adapter.handle_message = AsyncMock() + runner.adapters[Platform.DISCORD] = mock_adapter + + await runner._handle_voice_channel_input(111, 42, "Hello from VC") + + mock_adapter.handle_message.assert_called_once() + event = mock_adapter.handle_message.call_args[0][0] + assert event.source.chat_id == "123" + assert event.source.chat_type == "group" + assert event.source.chat_name == "Hermes Server / #general" + assert event.source.user_id == "42" + @pytest.mark.asyncio async def test_input_posts_transcript_in_text_channel(self, runner): """Voice input sends transcript message to text channel.""" from gateway.config import Platform mock_adapter = AsyncMock() mock_adapter._voice_text_channels = {111: 123} + mock_adapter._voice_sources = {} mock_channel = AsyncMock() mock_adapter._client = MagicMock() mock_adapter._client.get_channel = MagicMock(return_value=mock_channel) @@ -892,6 +932,7 @@ class TestDiscordVoiceChannelMethods: adapter._client = MagicMock() adapter._voice_clients = {} adapter._voice_text_channels = {} + adapter._voice_sources = {} adapter._voice_timeout_tasks = {} adapter._voice_receivers = {} adapter._voice_listen_tasks = {} @@ -926,6 +967,7 @@ class TestDiscordVoiceChannelMethods: mock_vc.disconnect = AsyncMock() adapter._voice_clients[111] = mock_vc adapter._voice_text_channels[111] = 123 + adapter._voice_sources[111] = {"chat_id": "123", "chat_type": "group"} mock_receiver = MagicMock() adapter._voice_receivers[111] = mock_receiver @@ -944,6 +986,7 @@ class TestDiscordVoiceChannelMethods: mock_timeout.cancel.assert_called_once() assert 111 not in adapter._voice_clients assert 111 not in adapter._voice_text_channels + assert 111 not in adapter._voice_sources assert 111 not in adapter._voice_receivers @pytest.mark.asyncio @@ -1670,6 +1713,7 @@ class TestVoiceTimeoutCleansRunnerState: adapter.config = config adapter._voice_clients = {} adapter._voice_text_channels = {} + adapter._voice_sources = {} adapter._voice_timeout_tasks = {} adapter._voice_receivers = {} adapter._voice_listen_tasks = {} @@ -1759,6 +1803,7 @@ class TestPlaybackTimeout: adapter.config = config adapter._voice_clients = {} adapter._voice_text_channels = {} + adapter._voice_sources = {} adapter._voice_timeout_tasks = {} adapter._voice_receivers = {} adapter._voice_listen_tasks = {} @@ -1939,6 +1984,7 @@ class TestVoiceChannelAwareness: adapter = object.__new__(DiscordAdapter) adapter._voice_clients = {} adapter._voice_text_channels = {} + adapter._voice_sources = {} adapter._voice_receivers = {} adapter._client = MagicMock() adapter._client.user = SimpleNamespace(id=99999, name="HermesBot") @@ -2408,6 +2454,7 @@ class TestVoiceTTSPlayback: adapter.config = config adapter._voice_clients = {} adapter._voice_text_channels = {} + adapter._voice_sources = {} adapter._voice_receivers = {} return adapter @@ -2587,6 +2634,7 @@ class TestUDPKeepalive: adapter.config = config adapter._voice_clients = {} adapter._voice_text_channels = {} + adapter._voice_sources = {} adapter._voice_receivers = {} adapter._voice_listen_tasks = {} diff --git a/tests/gateway/test_weak_credential_guard.py b/tests/gateway/test_weak_credential_guard.py new file mode 100644 index 000000000..7d6ea84b3 --- /dev/null +++ b/tests/gateway/test_weak_credential_guard.py @@ -0,0 +1,141 @@ +"""Tests for gateway weak credential rejection at startup. + +Ported from openclaw/openclaw#64586: rejects known-weak placeholder +tokens at gateway startup instead of letting them silently fail +against platform APIs. +""" + +import logging + +import pytest + +from gateway.config import PlatformConfig, Platform, _validate_gateway_config + + +# --------------------------------------------------------------------------- +# Helper: create a minimal GatewayConfig with one enabled platform +# --------------------------------------------------------------------------- + + +def _make_gateway_config(platform, token, enabled=True, **extra_kwargs): + """Create a minimal GatewayConfig-like object for validation testing.""" + from gateway.config import GatewayConfig + + config = GatewayConfig(platforms={}) + pconfig = PlatformConfig(enabled=enabled, token=token, **extra_kwargs) + config.platforms[platform] = pconfig + return config + + +def _validate_and_return(config): + """Call _validate_gateway_config and return the config (mutated in place).""" + _validate_gateway_config(config) + return config + + +# --------------------------------------------------------------------------- +# Unit tests: platform token placeholder rejection +# --------------------------------------------------------------------------- + + +class TestPlatformTokenPlaceholderGuard: + """Verify that _validate_gateway_config disables platforms with placeholder tokens.""" + + def test_rejects_triple_asterisk(self, caplog): + """'***' is the .env.example placeholder โ€” should be rejected.""" + config = _make_gateway_config(Platform.TELEGRAM, "***") + with caplog.at_level(logging.ERROR): + _validate_and_return(config) + assert config.platforms[Platform.TELEGRAM].enabled is False + assert "placeholder" in caplog.text.lower() + + def test_rejects_changeme(self, caplog): + config = _make_gateway_config(Platform.DISCORD, "changeme") + with caplog.at_level(logging.ERROR): + _validate_and_return(config) + assert config.platforms[Platform.DISCORD].enabled is False + + def test_rejects_your_api_key(self, caplog): + config = _make_gateway_config(Platform.SLACK, "your_api_key") + with caplog.at_level(logging.ERROR): + _validate_and_return(config) + assert config.platforms[Platform.SLACK].enabled is False + + def test_rejects_placeholder(self, caplog): + config = _make_gateway_config(Platform.MATRIX, "placeholder") + with caplog.at_level(logging.ERROR): + _validate_and_return(config) + assert config.platforms[Platform.MATRIX].enabled is False + + def test_accepts_real_token(self, caplog): + """A real-looking bot token should pass validation.""" + config = _make_gateway_config( + Platform.TELEGRAM, "7123456789:AAHdqTcvCH1vGWJxfSeOfSAs0K5PALDsaw" + ) + with caplog.at_level(logging.ERROR): + _validate_and_return(config) + assert config.platforms[Platform.TELEGRAM].enabled is True + assert "placeholder" not in caplog.text.lower() + + def test_accepts_empty_token_without_error(self, caplog): + """Empty tokens get a warning (existing behavior), not a placeholder error.""" + config = _make_gateway_config(Platform.TELEGRAM, "") + with caplog.at_level(logging.WARNING): + _validate_and_return(config) + # Empty token doesn't trigger placeholder rejection โ€” enabled stays True + # (the existing empty-token warning is separate) + assert config.platforms[Platform.TELEGRAM].enabled is True + + def test_disabled_platform_not_checked(self, caplog): + """Disabled platforms should not be validated.""" + config = _make_gateway_config(Platform.TELEGRAM, "***", enabled=False) + with caplog.at_level(logging.ERROR): + _validate_and_return(config) + assert "placeholder" not in caplog.text.lower() + + def test_rejects_whitespace_padded_placeholder(self, caplog): + """Whitespace-padded placeholders should still be caught.""" + config = _make_gateway_config(Platform.TELEGRAM, " *** ") + with caplog.at_level(logging.ERROR): + _validate_and_return(config) + assert config.platforms[Platform.TELEGRAM].enabled is False + + +# --------------------------------------------------------------------------- +# Integration test: API server placeholder key on network-accessible host +# --------------------------------------------------------------------------- + + +class TestAPIServerPlaceholderKeyGuard: + """Verify that the API server rejects placeholder keys on network hosts.""" + + @pytest.mark.asyncio + async def test_refuses_wildcard_with_placeholder_key(self): + from gateway.platforms.api_server import APIServerAdapter + + adapter = APIServerAdapter( + PlatformConfig(enabled=True, extra={"host": "0.0.0.0", "key": "changeme"}) + ) + result = await adapter.connect() + assert result is False + + @pytest.mark.asyncio + async def test_refuses_wildcard_with_asterisk_key(self): + from gateway.platforms.api_server import APIServerAdapter + + adapter = APIServerAdapter( + PlatformConfig(enabled=True, extra={"host": "0.0.0.0", "key": "***"}) + ) + result = await adapter.connect() + assert result is False + + def test_allows_loopback_with_placeholder_key(self): + """Loopback with a placeholder key is fine โ€” not network-exposed.""" + from gateway.platforms.api_server import APIServerAdapter + from gateway.platforms.base import is_network_accessible + + adapter = APIServerAdapter( + PlatformConfig(enabled=True, extra={"host": "127.0.0.1", "key": "changeme"}) + ) + # On loopback the placeholder guard doesn't fire + assert is_network_accessible(adapter._host) is False diff --git a/tests/gateway/test_weixin.py b/tests/gateway/test_weixin.py index f2afe1049..4633171fe 100644 --- a/tests/gateway/test_weixin.py +++ b/tests/gateway/test_weixin.py @@ -30,7 +30,7 @@ class TestWeixinFormatting: assert ( adapter.format_message(content) - == "ใ€Titleใ€‘\n\n**Plan**\n\nUse **bold** and [docs](https://example.com)." + == "ใ€Titleใ€‘\n\n**Plan**\n\nUse **bold** and docs (https://example.com)." ) def test_format_message_rewrites_markdown_tables(self): @@ -374,3 +374,149 @@ class TestWeixinRemoteMediaSafety: assert "Blocked unsafe URL" in str(exc) else: raise AssertionError("expected ValueError for unsafe URL") + + +class TestWeixinMarkdownLinks: + """Markdown links should be converted to plaintext since WeChat can't render them.""" + + def test_format_message_converts_markdown_links_to_plain_text(self): + adapter = _make_adapter() + + content = "Check [the docs](https://example.com) and [GitHub](https://github.com) for details" + assert ( + adapter.format_message(content) + == "Check the docs (https://example.com) and GitHub (https://github.com) for details" + ) + + def test_format_message_preserves_links_inside_code_blocks(self): + adapter = _make_adapter() + + content = "See below:\n\n```\n[link](https://example.com)\n```\n\nDone." + result = adapter.format_message(content) + assert "[link](https://example.com)" in result + + +class TestWeixinBlankMessagePrevention: + """Regression tests for the blank-bubble bugs. + + Three separate guards now prevent a blank WeChat message from ever being + dispatched: + + 1. ``_split_text_for_weixin_delivery("")`` returns ``[]`` โ€” not ``[""]``. + 2. ``send()`` filters out empty/whitespace-only chunks before calling + ``_send_text_chunk``. + 3. ``_send_message()`` raises ``ValueError`` for empty text as a last-resort + safety net. + """ + + def test_split_text_returns_empty_list_for_empty_string(self): + adapter = _make_adapter() + assert adapter._split_text("") == [] + + def test_split_text_returns_empty_list_for_empty_string_split_per_line(self): + adapter = WeixinAdapter( + PlatformConfig( + enabled=True, + extra={ + "account_id": "acct", + "token": "test-tok", + "split_multiline_messages": True, + }, + ) + ) + assert adapter._split_text("") == [] + + @patch("gateway.platforms.weixin._send_message", new_callable=AsyncMock) + def test_send_empty_content_does_not_call_send_message(self, send_message_mock): + adapter = _make_adapter() + adapter._session = object() + adapter._token = "test-token" + adapter._base_url = "https://weixin.example.com" + adapter._token_store.get = lambda account_id, chat_id: "ctx-token" + + result = asyncio.run(adapter.send("wxid_test123", "")) + # Empty content โ†’ no chunks โ†’ no _send_message calls + assert result.success is True + send_message_mock.assert_not_awaited() + + def test_send_message_rejects_empty_text(self): + """_send_message raises ValueError for empty/whitespace text.""" + import pytest + with pytest.raises(ValueError, match="text must not be empty"): + asyncio.run( + weixin._send_message( + AsyncMock(), + base_url="https://example.com", + token="tok", + to="wxid_test", + text="", + context_token=None, + client_id="cid", + ) + ) + + +class TestWeixinStreamingCursorSuppression: + """WeChat doesn't support message editing โ€” cursor must be suppressed.""" + + def test_supports_message_editing_is_false(self): + adapter = _make_adapter() + assert adapter.SUPPORTS_MESSAGE_EDITING is False + + +class TestWeixinMediaBuilder: + """Media builder uses base64(hex_key), not base64(raw_bytes) for aes_key.""" + + def test_image_builder_aes_key_is_base64_of_hex(self): + import base64 + adapter = _make_adapter() + media_type, builder = adapter._outbound_media_builder("photo.jpg") + assert media_type == weixin.MEDIA_IMAGE + + fake_hex_key = "0123456789abcdef0123456789abcdef" + expected_aes = base64.b64encode(fake_hex_key.encode("ascii")).decode("ascii") + item = builder( + encrypt_query_param="eq", + aes_key_for_api=expected_aes, + ciphertext_size=1024, + plaintext_size=1000, + filename="photo.jpg", + rawfilemd5="abc123", + ) + assert item["image_item"]["media"]["aes_key"] == expected_aes + + def test_video_builder_includes_md5(self): + adapter = _make_adapter() + media_type, builder = adapter._outbound_media_builder("clip.mp4") + assert media_type == weixin.MEDIA_VIDEO + + item = builder( + encrypt_query_param="eq", + aes_key_for_api="fakekey", + ciphertext_size=2048, + plaintext_size=2000, + filename="clip.mp4", + rawfilemd5="deadbeef", + ) + assert item["video_item"]["video_md5"] == "deadbeef" + + def test_voice_builder_for_audio_files(self): + adapter = _make_adapter() + media_type, builder = adapter._outbound_media_builder("note.mp3") + assert media_type == weixin.MEDIA_VOICE + + item = builder( + encrypt_query_param="eq", + aes_key_for_api="fakekey", + ciphertext_size=512, + plaintext_size=500, + filename="note.mp3", + rawfilemd5="abc", + ) + assert item["type"] == weixin.ITEM_VOICE + assert "voice_item" in item + + def test_voice_builder_for_silk_files(self): + adapter = _make_adapter() + media_type, builder = adapter._outbound_media_builder("recording.silk") + assert media_type == weixin.MEDIA_VOICE diff --git a/tests/gateway/test_whatsapp_formatting.py b/tests/gateway/test_whatsapp_formatting.py new file mode 100644 index 000000000..129384783 --- /dev/null +++ b/tests/gateway/test_whatsapp_formatting.py @@ -0,0 +1,271 @@ +"""Tests for WhatsApp message formatting and chunking. + +Covers: +- format_message(): markdown โ†’ WhatsApp syntax conversion +- send(): message chunking for long responses +- MAX_MESSAGE_LENGTH: practical UX limit +""" + +import asyncio +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from gateway.config import Platform, PlatformConfig + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _make_adapter(): + """Create a WhatsAppAdapter with test attributes (bypass __init__).""" + from gateway.platforms.whatsapp import WhatsAppAdapter + + adapter = WhatsAppAdapter.__new__(WhatsAppAdapter) + adapter.platform = Platform.WHATSAPP + adapter.config = MagicMock() + adapter.config.extra = {} + adapter._bridge_port = 3000 + adapter._bridge_script = "/tmp/test-bridge.js" + adapter._session_path = MagicMock() + adapter._bridge_log_fh = None + adapter._bridge_log = None + adapter._bridge_process = None + adapter._reply_prefix = None + adapter._running = True + adapter._message_handler = None + adapter._fatal_error_code = None + adapter._fatal_error_message = None + adapter._fatal_error_retryable = True + adapter._fatal_error_handler = None + adapter._active_sessions = {} + adapter._pending_messages = {} + adapter._background_tasks = set() + adapter._auto_tts_disabled_chats = set() + adapter._message_queue = asyncio.Queue() + adapter._http_session = MagicMock() + adapter._mention_patterns = [] + return adapter + + +class _AsyncCM: + """Minimal async context manager returning a fixed value.""" + + def __init__(self, value): + self.value = value + + async def __aenter__(self): + return self.value + + async def __aexit__(self, *exc): + return False + + +# --------------------------------------------------------------------------- +# format_message tests +# --------------------------------------------------------------------------- + +class TestFormatMessage: + """WhatsApp markdown conversion.""" + + def test_bold_double_asterisk(self): + adapter = _make_adapter() + assert adapter.format_message("**hello**") == "*hello*" + + def test_bold_double_underscore(self): + adapter = _make_adapter() + assert adapter.format_message("__hello__") == "*hello*" + + def test_strikethrough(self): + adapter = _make_adapter() + assert adapter.format_message("~~deleted~~") == "~deleted~" + + def test_headers_converted_to_bold(self): + adapter = _make_adapter() + assert adapter.format_message("# Title") == "*Title*" + assert adapter.format_message("## Subtitle") == "*Subtitle*" + assert adapter.format_message("### Deep") == "*Deep*" + + def test_links_converted(self): + adapter = _make_adapter() + result = adapter.format_message("[click here](https://example.com)") + assert result == "click here (https://example.com)" + + def test_code_blocks_protected(self): + """Code blocks should not have their content reformatted.""" + adapter = _make_adapter() + content = "before **bold** ```python\n**not bold**\n``` after **bold**" + result = adapter.format_message(content) + assert "```python\n**not bold**\n```" in result + assert result.startswith("before *bold*") + assert result.endswith("after *bold*") + + def test_inline_code_protected(self): + """Inline code should not have its content reformatted.""" + adapter = _make_adapter() + content = "use `**raw**` here" + result = adapter.format_message(content) + assert "`**raw**`" in result + assert result.startswith("use ") + + def test_empty_content(self): + adapter = _make_adapter() + assert adapter.format_message("") == "" + assert adapter.format_message(None) is None + + def test_plain_text_unchanged(self): + adapter = _make_adapter() + assert adapter.format_message("hello world") == "hello world" + + def test_already_whatsapp_italic(self): + """Single *italic* should pass through unchanged.""" + adapter = _make_adapter() + # After bold conversion, *text* is WhatsApp italic + assert adapter.format_message("*italic*") == "*italic*" + + def test_multiline_mixed(self): + adapter = _make_adapter() + content = "# Header\n\n**Bold text** and ~~strike~~\n\n```\ncode\n```" + result = adapter.format_message(content) + assert "*Header*" in result + assert "*Bold text*" in result + assert "~strike~" in result + assert "```\ncode\n```" in result + + +# --------------------------------------------------------------------------- +# MAX_MESSAGE_LENGTH tests +# --------------------------------------------------------------------------- + +class TestMessageLimits: + """WhatsApp message length limits.""" + + def test_max_message_length_is_practical(self): + from gateway.platforms.whatsapp import WhatsAppAdapter + assert WhatsAppAdapter.MAX_MESSAGE_LENGTH == 4096 + + +# --------------------------------------------------------------------------- +# send() chunking tests +# --------------------------------------------------------------------------- + +class TestSendChunking: + """WhatsApp send() splits long messages into chunks.""" + + @pytest.mark.asyncio + async def test_short_message_single_send(self): + adapter = _make_adapter() + resp = MagicMock(status=200) + resp.json = AsyncMock(return_value={"messageId": "msg1"}) + adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp)) + + result = await adapter.send("chat1", "short message") + assert result.success + # Only one call to bridge /send + assert adapter._http_session.post.call_count == 1 + + @pytest.mark.asyncio + async def test_long_message_chunked(self): + adapter = _make_adapter() + resp = MagicMock(status=200) + resp.json = AsyncMock(return_value={"messageId": "msg1"}) + adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp)) + + # Create a message longer than MAX_MESSAGE_LENGTH (4096) + long_msg = "a " * 3000 # ~6000 chars + + result = await adapter.send("chat1", long_msg) + assert result.success + # Should have made multiple calls + assert adapter._http_session.post.call_count > 1 + + @pytest.mark.asyncio + async def test_empty_message_no_send(self): + adapter = _make_adapter() + result = await adapter.send("chat1", "") + assert result.success + assert adapter._http_session.post.call_count == 0 + + @pytest.mark.asyncio + async def test_whitespace_only_no_send(self): + adapter = _make_adapter() + result = await adapter.send("chat1", " \n ") + assert result.success + assert adapter._http_session.post.call_count == 0 + + @pytest.mark.asyncio + async def test_format_applied_before_send(self): + """Markdown should be converted to WhatsApp format before sending.""" + adapter = _make_adapter() + resp = MagicMock(status=200) + resp.json = AsyncMock(return_value={"messageId": "msg1"}) + adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp)) + + await adapter.send("chat1", "**bold text**") + + # Check the payload sent to the bridge + call_args = adapter._http_session.post.call_args + payload = call_args.kwargs.get("json") or call_args[1].get("json") + assert payload["message"] == "*bold text*" + + @pytest.mark.asyncio + async def test_reply_to_only_on_first_chunk(self): + """reply_to should only be set on the first chunk.""" + adapter = _make_adapter() + resp = MagicMock(status=200) + resp.json = AsyncMock(return_value={"messageId": "msg1"}) + adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp)) + + long_msg = "word " * 2000 # ~10000 chars, multiple chunks + + await adapter.send("chat1", long_msg, reply_to="orig123") + + calls = adapter._http_session.post.call_args_list + assert len(calls) > 1 + + # First chunk should have replyTo + first_payload = calls[0].kwargs.get("json") or calls[0][1].get("json") + assert first_payload.get("replyTo") == "orig123" + + # Subsequent chunks should NOT have replyTo + for call in calls[1:]: + payload = call.kwargs.get("json") or call[1].get("json") + assert "replyTo" not in payload + + @pytest.mark.asyncio + async def test_bridge_error_returns_failure(self): + adapter = _make_adapter() + resp = MagicMock(status=500) + resp.text = AsyncMock(return_value="Internal Server Error") + adapter._http_session.post = MagicMock(return_value=_AsyncCM(resp)) + + result = await adapter.send("chat1", "hello") + assert not result.success + assert "Internal Server Error" in result.error + + @pytest.mark.asyncio + async def test_not_connected_returns_failure(self): + adapter = _make_adapter() + adapter._running = False + + result = await adapter.send("chat1", "hello") + assert not result.success + assert "Not connected" in result.error + + +# --------------------------------------------------------------------------- +# display_config tier classification +# --------------------------------------------------------------------------- + +class TestWhatsAppTier: + """WhatsApp should be classified as TIER_MEDIUM.""" + + def test_whatsapp_streaming_follows_global(self): + from gateway.display_config import resolve_display_setting + # TIER_MEDIUM has streaming: None (follow global), not False + assert resolve_display_setting({}, "whatsapp", "streaming") is None + + def test_whatsapp_tool_progress_is_new(self): + from gateway.display_config import resolve_display_setting + assert resolve_display_setting({}, "whatsapp", "tool_progress") == "new" diff --git a/tests/hermes_cli/test_api_key_providers.py b/tests/hermes_cli/test_api_key_providers.py index 039799d42..0e1183471 100644 --- a/tests/hermes_cli/test_api_key_providers.py +++ b/tests/hermes_cli/test_api_key_providers.py @@ -23,9 +23,9 @@ from hermes_cli.auth import ( get_auth_status, AuthError, KIMI_CODE_BASE_URL, - _try_gh_cli_token, _resolve_kimi_base_url, ) +from hermes_cli.copilot_auth import _try_gh_cli_token # ============================================================================= @@ -68,7 +68,7 @@ class TestProviderRegistry: def test_copilot_env_vars(self): pconfig = PROVIDER_REGISTRY["copilot"] assert pconfig.api_key_env_vars == ("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN") - assert pconfig.base_url_env_var == "" + assert pconfig.base_url_env_var == "COPILOT_API_BASE_URL" def test_kimi_env_vars(self): pconfig = PROVIDER_REGISTRY["kimi-coding"] @@ -381,13 +381,13 @@ class TestResolveApiKeyProviderCredentials: assert creds["source"] == "gh auth token" def test_try_gh_cli_token_uses_homebrew_path_when_not_on_path(self, monkeypatch): - monkeypatch.setattr("hermes_cli.auth.shutil.which", lambda command: None) + monkeypatch.setattr("hermes_cli.copilot_auth.shutil.which", lambda command: None) monkeypatch.setattr( - "hermes_cli.auth.os.path.isfile", + "hermes_cli.copilot_auth.os.path.isfile", lambda path: path == "/opt/homebrew/bin/gh", ) monkeypatch.setattr( - "hermes_cli.auth.os.access", + "hermes_cli.copilot_auth.os.access", lambda path, mode: path == "/opt/homebrew/bin/gh" and mode == os.X_OK, ) @@ -397,11 +397,11 @@ class TestResolveApiKeyProviderCredentials: returncode = 0 stdout = "gh-cli-secret\n" - def _fake_run(cmd, capture_output, text, timeout): + def _fake_run(cmd, **kwargs): calls.append(cmd) return _Result() - monkeypatch.setattr("hermes_cli.auth.subprocess.run", _fake_run) + monkeypatch.setattr("hermes_cli.copilot_auth.subprocess.run", _fake_run) assert _try_gh_cli_token() == "gh-cli-secret" assert calls == [["/opt/homebrew/bin/gh", "auth", "token"]] diff --git a/tests/hermes_cli/test_backup.py b/tests/hermes_cli/test_backup.py index 8ef385896..b4589dc91 100644 --- a/tests/hermes_cli/test_backup.py +++ b/tests/hermes_cli/test_backup.py @@ -1,6 +1,8 @@ """Tests for hermes backup and import commands.""" +import json import os +import sqlite3 import zipfile from argparse import Namespace from pathlib import Path @@ -232,6 +234,44 @@ class TestBackup: assert len(zips) == 1 +# --------------------------------------------------------------------------- +# _validate_backup_zip tests +# --------------------------------------------------------------------------- + +class TestValidateBackupZip: + def _make_zip(self, zip_path: Path, filenames: list[str]) -> None: + with zipfile.ZipFile(zip_path, "w") as zf: + for name in filenames: + zf.writestr(name, "dummy") + + def test_state_db_passes(self, tmp_path): + """A zip containing state.db is accepted as a valid Hermes backup.""" + from hermes_cli.backup import _validate_backup_zip + zip_path = tmp_path / "backup.zip" + self._make_zip(zip_path, ["state.db", "sessions/abc.json"]) + with zipfile.ZipFile(zip_path, "r") as zf: + ok, reason = _validate_backup_zip(zf) + assert ok, reason + + def test_old_wrong_db_name_fails(self, tmp_path): + """A zip with only hermes_state.db (old wrong name) is rejected.""" + from hermes_cli.backup import _validate_backup_zip + zip_path = tmp_path / "old.zip" + self._make_zip(zip_path, ["hermes_state.db", "memory_store.db"]) + with zipfile.ZipFile(zip_path, "r") as zf: + ok, reason = _validate_backup_zip(zf) + assert not ok + + def test_config_yaml_passes(self, tmp_path): + """A zip containing config.yaml is accepted (existing behaviour preserved).""" + from hermes_cli.backup import _validate_backup_zip + zip_path = tmp_path / "backup.zip" + self._make_zip(zip_path, ["config.yaml", "skills/x/SKILL.md"]) + with zipfile.ZipFile(zip_path, "r") as zf: + ok, reason = _validate_backup_zip(zf) + assert ok, reason + + # --------------------------------------------------------------------------- # Import tests # --------------------------------------------------------------------------- @@ -895,3 +935,181 @@ class TestProfileRestoration: # Files should still be restored even if wrappers can't be created assert (hermes_home / "profiles" / "coder" / "config.yaml").exists() + + +# --------------------------------------------------------------------------- +# SQLite safe copy tests +# --------------------------------------------------------------------------- + +class TestSafeCopyDb: + def test_copies_valid_database(self, tmp_path): + from hermes_cli.backup import _safe_copy_db + src = tmp_path / "test.db" + dst = tmp_path / "copy.db" + + conn = sqlite3.connect(str(src)) + conn.execute("CREATE TABLE t (x INTEGER)") + conn.execute("INSERT INTO t VALUES (42)") + conn.commit() + conn.close() + + result = _safe_copy_db(src, dst) + assert result is True + + conn = sqlite3.connect(str(dst)) + rows = conn.execute("SELECT x FROM t").fetchall() + conn.close() + assert rows == [(42,)] + + def test_copies_wal_mode_database(self, tmp_path): + from hermes_cli.backup import _safe_copy_db + src = tmp_path / "wal.db" + dst = tmp_path / "copy.db" + + conn = sqlite3.connect(str(src)) + conn.execute("PRAGMA journal_mode=WAL") + conn.execute("CREATE TABLE t (x TEXT)") + conn.execute("INSERT INTO t VALUES ('wal-test')") + conn.commit() + conn.close() + + result = _safe_copy_db(src, dst) + assert result is True + + conn = sqlite3.connect(str(dst)) + rows = conn.execute("SELECT x FROM t").fetchall() + conn.close() + assert rows == [("wal-test",)] + + +# --------------------------------------------------------------------------- +# Quick state snapshot tests +# --------------------------------------------------------------------------- + +class TestQuickSnapshot: + @pytest.fixture + def hermes_home(self, tmp_path): + """Create a fake HERMES_HOME with critical state files.""" + home = tmp_path / ".hermes" + home.mkdir() + (home / "config.yaml").write_text("model:\n provider: openrouter\n") + (home / ".env").write_text("OPENROUTER_API_KEY=test-key-123\n") + (home / "auth.json").write_text('{"providers": {}}\n') + (home / "cron").mkdir() + (home / "cron" / "jobs.json").write_text('{"jobs": []}\n') + + # Real SQLite database + db_path = home / "state.db" + conn = sqlite3.connect(str(db_path)) + conn.execute("CREATE TABLE sessions (id TEXT PRIMARY KEY, data TEXT)") + conn.execute("INSERT INTO sessions VALUES ('s1', 'hello world')") + conn.commit() + conn.close() + return home + + def test_creates_snapshot(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot + snap_id = create_quick_snapshot(hermes_home=hermes_home) + assert snap_id is not None + snap_dir = hermes_home / "state-snapshots" / snap_id + assert snap_dir.is_dir() + assert (snap_dir / "manifest.json").exists() + + def test_label_in_id(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot + snap_id = create_quick_snapshot(label="before-upgrade", hermes_home=hermes_home) + assert "before-upgrade" in snap_id + + def test_state_db_safely_copied(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot + snap_id = create_quick_snapshot(hermes_home=hermes_home) + db_copy = hermes_home / "state-snapshots" / snap_id / "state.db" + assert db_copy.exists() + + conn = sqlite3.connect(str(db_copy)) + rows = conn.execute("SELECT * FROM sessions").fetchall() + conn.close() + assert len(rows) == 1 + assert rows[0] == ("s1", "hello world") + + def test_copies_nested_files(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot + snap_id = create_quick_snapshot(hermes_home=hermes_home) + assert (hermes_home / "state-snapshots" / snap_id / "cron" / "jobs.json").exists() + + def test_missing_files_skipped(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot + snap_id = create_quick_snapshot(hermes_home=hermes_home) + with open(hermes_home / "state-snapshots" / snap_id / "manifest.json") as f: + meta = json.load(f) + # gateway_state.json etc. don't exist in fixture + assert "gateway_state.json" not in meta["files"] + + def test_empty_home_returns_none(self, tmp_path): + from hermes_cli.backup import create_quick_snapshot + empty = tmp_path / "empty" + empty.mkdir() + assert create_quick_snapshot(hermes_home=empty) is None + + def test_list_snapshots(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot, list_quick_snapshots + id1 = create_quick_snapshot(label="first", hermes_home=hermes_home) + id2 = create_quick_snapshot(label="second", hermes_home=hermes_home) + + snaps = list_quick_snapshots(hermes_home=hermes_home) + assert len(snaps) == 2 + assert snaps[0]["id"] == id2 # most recent first + assert snaps[1]["id"] == id1 + + def test_list_limit(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot, list_quick_snapshots + for i in range(5): + create_quick_snapshot(label=f"s{i}", hermes_home=hermes_home) + snaps = list_quick_snapshots(limit=3, hermes_home=hermes_home) + assert len(snaps) == 3 + + def test_restore_config(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot, restore_quick_snapshot + snap_id = create_quick_snapshot(hermes_home=hermes_home) + + (hermes_home / "config.yaml").write_text("model:\n provider: anthropic\n") + assert "anthropic" in (hermes_home / "config.yaml").read_text() + + result = restore_quick_snapshot(snap_id, hermes_home=hermes_home) + assert result is True + assert "openrouter" in (hermes_home / "config.yaml").read_text() + + def test_restore_state_db(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot, restore_quick_snapshot + snap_id = create_quick_snapshot(hermes_home=hermes_home) + + conn = sqlite3.connect(str(hermes_home / "state.db")) + conn.execute("INSERT INTO sessions VALUES ('s2', 'new')") + conn.commit() + conn.close() + + restore_quick_snapshot(snap_id, hermes_home=hermes_home) + + conn = sqlite3.connect(str(hermes_home / "state.db")) + rows = conn.execute("SELECT * FROM sessions").fetchall() + conn.close() + assert len(rows) == 1 + + def test_restore_nonexistent(self, hermes_home): + from hermes_cli.backup import restore_quick_snapshot + assert restore_quick_snapshot("nonexistent", hermes_home=hermes_home) is False + + def test_auto_prune(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot, list_quick_snapshots, _QUICK_DEFAULT_KEEP + for i in range(_QUICK_DEFAULT_KEEP + 5): + create_quick_snapshot(label=f"snap-{i:03d}", hermes_home=hermes_home) + snaps = list_quick_snapshots(limit=100, hermes_home=hermes_home) + assert len(snaps) <= _QUICK_DEFAULT_KEEP + + def test_manual_prune(self, hermes_home): + from hermes_cli.backup import create_quick_snapshot, prune_quick_snapshots, list_quick_snapshots + for i in range(10): + create_quick_snapshot(label=f"s{i}", hermes_home=hermes_home) + deleted = prune_quick_snapshots(keep=3, hermes_home=hermes_home) + assert deleted == 7 + assert len(list_quick_snapshots(hermes_home=hermes_home)) == 3 diff --git a/tests/hermes_cli/test_claw.py b/tests/hermes_cli/test_claw.py index d7528890e..e32c4a1df 100644 --- a/tests/hermes_cli/test_claw.py +++ b/tests/hermes_cli/test_claw.py @@ -1,6 +1,7 @@ """Tests for hermes claw commands.""" from argparse import Namespace +import subprocess from types import ModuleType from unittest.mock import MagicMock, patch @@ -197,6 +198,11 @@ class TestClawCommand: class TestCmdMigrate: """Test the migrate command handler.""" + @pytest.fixture(autouse=True) + def _mock_openclaw_running(self): + with patch.object(claw_mod, "_detect_openclaw_processes", return_value=[]): + yield + def test_error_when_source_missing(self, tmp_path, capsys): args = Namespace( source=str(tmp_path / "nonexistent"), @@ -626,3 +632,120 @@ class TestPrintMigrationReport: claw_mod._print_migration_report(report, dry_run=False) captured = capsys.readouterr() assert "Nothing to migrate" in captured.out + + +class TestDetectOpenclawProcesses: + def test_returns_match_when_pgrep_finds_openclaw(self): + with patch.object(claw_mod, "sys") as mock_sys: + mock_sys.platform = "linux" + with patch.object(claw_mod, "subprocess") as mock_subprocess: + # systemd check misses, pgrep finds openclaw + mock_subprocess.run.side_effect = [ + MagicMock(returncode=1, stdout=""), # systemctl + MagicMock(returncode=0, stdout="1234\n"), # pgrep + ] + mock_subprocess.TimeoutExpired = subprocess.TimeoutExpired + result = claw_mod._detect_openclaw_processes() + assert len(result) == 1 + assert "1234" in result[0] + + def test_returns_empty_when_pgrep_finds_nothing(self): + with patch.object(claw_mod, "sys") as mock_sys: + mock_sys.platform = "darwin" + with patch.object(claw_mod, "subprocess") as mock_subprocess: + mock_subprocess.run.side_effect = [ + MagicMock(returncode=1, stdout=""), # systemctl (not found) + MagicMock(returncode=1, stdout=""), # pgrep + ] + mock_subprocess.TimeoutExpired = subprocess.TimeoutExpired + result = claw_mod._detect_openclaw_processes() + assert result == [] + + def test_detects_systemd_service(self): + with patch.object(claw_mod, "sys") as mock_sys: + mock_sys.platform = "linux" + with patch.object(claw_mod, "subprocess") as mock_subprocess: + mock_subprocess.run.side_effect = [ + MagicMock(returncode=0, stdout="active\n"), # systemctl + MagicMock(returncode=1, stdout=""), # pgrep + ] + mock_subprocess.TimeoutExpired = subprocess.TimeoutExpired + result = claw_mod._detect_openclaw_processes() + assert len(result) == 1 + assert "systemd" in result[0] + + def test_returns_match_on_windows_when_openclaw_exe_running(self): + with patch.object(claw_mod, "sys") as mock_sys: + mock_sys.platform = "win32" + with patch.object(claw_mod, "subprocess") as mock_subprocess: + mock_subprocess.run.side_effect = [ + MagicMock(returncode=0, stdout="openclaw.exe 1234 Console 1 45,056 K\n"), + ] + result = claw_mod._detect_openclaw_processes() + assert len(result) >= 1 + assert any("openclaw.exe" in r for r in result) + + def test_returns_match_on_windows_when_node_exe_has_openclaw_in_cmdline(self): + with patch.object(claw_mod, "sys") as mock_sys: + mock_sys.platform = "win32" + with patch.object(claw_mod, "subprocess") as mock_subprocess: + mock_subprocess.run.side_effect = [ + MagicMock(returncode=0, stdout=""), # tasklist openclaw.exe + MagicMock(returncode=0, stdout=""), # tasklist clawd.exe + MagicMock(returncode=0, stdout="1234\n"), # PowerShell + ] + result = claw_mod._detect_openclaw_processes() + assert len(result) >= 1 + assert any("node.exe" in r for r in result) + + def test_returns_empty_on_windows_when_nothing_found(self): + with patch.object(claw_mod, "sys") as mock_sys: + mock_sys.platform = "win32" + with patch.object(claw_mod, "subprocess") as mock_subprocess: + mock_subprocess.run.side_effect = [ + MagicMock(returncode=0, stdout=""), + MagicMock(returncode=0, stdout=""), + MagicMock(returncode=0, stdout=""), + ] + result = claw_mod._detect_openclaw_processes() + assert result == [] + + +class TestWarnIfOpenclawRunning: + def test_noop_when_not_running(self, capsys): + with patch.object(claw_mod, "_detect_openclaw_processes", return_value=[]): + claw_mod._warn_if_openclaw_running(auto_yes=False) + captured = capsys.readouterr() + assert captured.out == "" + + def test_warns_and_exits_when_running_and_user_declines(self, capsys): + with patch.object(claw_mod, "_detect_openclaw_processes", return_value=["openclaw process(es) (PIDs: 1234)"]): + with patch.object(claw_mod, "prompt_yes_no", return_value=False): + with patch.object(claw_mod.sys.stdin, "isatty", return_value=True): + with pytest.raises(SystemExit) as exc_info: + claw_mod._warn_if_openclaw_running(auto_yes=False) + assert exc_info.value.code == 0 + captured = capsys.readouterr() + assert "OpenClaw appears to be running" in captured.out + + def test_warns_and_continues_when_running_and_user_accepts(self, capsys): + with patch.object(claw_mod, "_detect_openclaw_processes", return_value=["openclaw process(es) (PIDs: 1234)"]): + with patch.object(claw_mod, "prompt_yes_no", return_value=True): + with patch.object(claw_mod.sys.stdin, "isatty", return_value=True): + claw_mod._warn_if_openclaw_running(auto_yes=False) + captured = capsys.readouterr() + assert "OpenClaw appears to be running" in captured.out + + def test_warns_and_continues_in_auto_yes_mode(self, capsys): + with patch.object(claw_mod, "_detect_openclaw_processes", return_value=["openclaw process(es) (PIDs: 1234)"]): + claw_mod._warn_if_openclaw_running(auto_yes=True) + captured = capsys.readouterr() + assert "OpenClaw appears to be running" in captured.out + + def test_warns_and_continues_in_non_interactive_session(self, capsys): + with patch.object(claw_mod, "_detect_openclaw_processes", return_value=["openclaw process(es) (PIDs: 1234)"]): + with patch.object(claw_mod.sys.stdin, "isatty", return_value=False): + claw_mod._warn_if_openclaw_running(auto_yes=False) + captured = capsys.readouterr() + assert "OpenClaw appears to be running" in captured.out + assert "Non-interactive session" in captured.out diff --git a/tests/hermes_cli/test_config.py b/tests/hermes_cli/test_config.py index d934a8012..397027d3a 100644 --- a/tests/hermes_cli/test_config.py +++ b/tests/hermes_cli/test_config.py @@ -10,6 +10,7 @@ from hermes_cli.config import ( DEFAULT_CONFIG, get_hermes_home, ensure_hermes_home, + get_compatible_custom_providers, load_config, load_env, migrate_config, @@ -424,6 +425,146 @@ class TestAnthropicTokenMigration: assert load_env().get("ANTHROPIC_TOKEN") == "current-token" +class TestCustomProviderCompatibility: + """Custom provider compatibility across legacy and v12+ config schemas.""" + + def test_v11_upgrade_moves_custom_providers_into_providers(self, tmp_path): + config_path = tmp_path / "config.yaml" + config_path.write_text( + yaml.safe_dump( + { + "_config_version": 11, + "model": { + "default": "openai/gpt-5.4", + "provider": "openrouter", + }, + "custom_providers": [ + { + "name": "OpenAI Direct", + "base_url": "https://api.openai.com/v1", + "api_key": "test-key", + "api_mode": "codex_responses", + "model": "gpt-5-mini", + } + ], + "fallback_providers": [ + {"provider": "openai-direct", "model": "gpt-5-mini"} + ], + } + ), + encoding="utf-8", + ) + + with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}): + migrate_config(interactive=False, quiet=True) + raw = yaml.safe_load(config_path.read_text(encoding="utf-8")) + + assert raw["_config_version"] == 17 + assert raw["providers"]["openai-direct"] == { + "api": "https://api.openai.com/v1", + "api_key": "test-key", + "default_model": "gpt-5-mini", + "name": "OpenAI Direct", + "transport": "codex_responses", + } + # custom_providers removed by migration โ€” runtime reads via compat layer + assert "custom_providers" not in raw + + def test_providers_dict_resolves_at_runtime(self, tmp_path): + """After migration deleted custom_providers, get_compatible_custom_providers + still finds entries from the providers dict.""" + config_path = tmp_path / "config.yaml" + config_path.write_text( + yaml.safe_dump( + { + "_config_version": 17, + "providers": { + "openai-direct": { + "api": "https://api.openai.com/v1", + "api_key": "test-key", + "default_model": "gpt-5-mini", + "name": "OpenAI Direct", + "transport": "codex_responses", + } + }, + } + ), + encoding="utf-8", + ) + + with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}): + compatible = get_compatible_custom_providers() + + assert len(compatible) == 1 + assert compatible[0]["name"] == "OpenAI Direct" + assert compatible[0]["base_url"] == "https://api.openai.com/v1" + assert compatible[0]["provider_key"] == "openai-direct" + assert compatible[0]["api_mode"] == "codex_responses" + + def test_compatible_custom_providers_prefers_api_then_url_then_base_url(self, tmp_path): + config_path = tmp_path / "config.yaml" + config_path.write_text( + yaml.safe_dump( + { + "_config_version": 17, + "providers": { + "my-provider": { + "name": "My Provider", + "api": "https://api.example.com/v1", + "url": "https://url.example.com/v1", + "base_url": "https://base.example.com/v1", + } + }, + } + ), + encoding="utf-8", + ) + + with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}): + compatible = get_compatible_custom_providers() + + assert compatible == [ + { + "name": "My Provider", + "base_url": "https://api.example.com/v1", + "provider_key": "my-provider", + } + ] + + def test_dedup_across_legacy_and_providers(self, tmp_path): + """Same name+url in both schemas should not produce duplicates.""" + config_path = tmp_path / "config.yaml" + config_path.write_text( + yaml.safe_dump( + { + "_config_version": 17, + "custom_providers": [ + { + "name": "OpenAI Direct", + "base_url": "https://api.openai.com/v1", + "api_key": "legacy-key", + } + ], + "providers": { + "openai-direct": { + "api": "https://api.openai.com/v1", + "api_key": "new-key", + "name": "OpenAI Direct", + } + }, + } + ), + encoding="utf-8", + ) + + with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}): + compatible = get_compatible_custom_providers() + + assert len(compatible) == 1 + # Legacy entry wins (read first) + assert compatible[0]["api_key"] == "legacy-key" + + class TestInterimAssistantMessageConfig: """Test the explicit gateway interim-message config gate.""" @@ -441,6 +582,6 @@ class TestInterimAssistantMessageConfig: migrate_config(interactive=False, quiet=True) raw = yaml.safe_load(config_path.read_text(encoding="utf-8")) - assert raw["_config_version"] == 16 + assert raw["_config_version"] == 17 assert raw["display"]["tool_progress"] == "off" assert raw["display"]["interim_assistant_messages"] is True diff --git a/tests/hermes_cli/test_container_aware_cli.py b/tests/hermes_cli/test_container_aware_cli.py index 9e21c0b8d..4422df845 100644 --- a/tests/hermes_cli/test_container_aware_cli.py +++ b/tests/hermes_cli/test_container_aware_cli.py @@ -12,49 +12,10 @@ from unittest.mock import MagicMock, patch import pytest from hermes_cli.config import ( - _is_inside_container, get_container_exec_info, ) -# ============================================================================= -# _is_inside_container -# ============================================================================= - - -def test_is_inside_container_dockerenv(): - """Detects /.dockerenv marker file.""" - with patch("os.path.exists") as mock_exists: - mock_exists.side_effect = lambda p: p == "/.dockerenv" - assert _is_inside_container() is True - - -def test_is_inside_container_containerenv(): - """Detects Podman's /run/.containerenv marker.""" - with patch("os.path.exists") as mock_exists: - mock_exists.side_effect = lambda p: p == "/run/.containerenv" - assert _is_inside_container() is True - - -def test_is_inside_container_cgroup_docker(): - """Detects 'docker' in /proc/1/cgroup.""" - with patch("os.path.exists", return_value=False), \ - patch("builtins.open", create=True) as mock_open: - mock_open.return_value.__enter__ = lambda s: s - mock_open.return_value.__exit__ = MagicMock(return_value=False) - mock_open.return_value.read = MagicMock( - return_value="12:memory:/docker/abc123\n" - ) - assert _is_inside_container() is True - - -def test_is_inside_container_false_on_host(): - """Returns False when none of the container indicators are present.""" - with patch("os.path.exists", return_value=False), \ - patch("builtins.open", side_effect=OSError("no such file")): - assert _is_inside_container() is False - - # ============================================================================= # get_container_exec_info # ============================================================================= @@ -81,7 +42,7 @@ def container_env(tmp_path, monkeypatch): def test_get_container_exec_info_returns_metadata(container_env): """Reads .container-mode and returns all fields including exec_user.""" - with patch("hermes_cli.config._is_inside_container", return_value=False): + with patch("hermes_constants.is_container", return_value=False): info = get_container_exec_info() assert info is not None @@ -93,7 +54,7 @@ def test_get_container_exec_info_returns_metadata(container_env): def test_get_container_exec_info_none_inside_container(container_env): """Returns None when we're already inside a container.""" - with patch("hermes_cli.config._is_inside_container", return_value=True): + with patch("hermes_constants.is_container", return_value=True): info = get_container_exec_info() assert info is None @@ -106,7 +67,7 @@ def test_get_container_exec_info_none_without_file(tmp_path, monkeypatch): monkeypatch.setenv("HERMES_HOME", str(hermes_home)) monkeypatch.delenv("HERMES_DEV", raising=False) - with patch("hermes_cli.config._is_inside_container", return_value=False): + with patch("hermes_constants.is_container", return_value=False): info = get_container_exec_info() assert info is None @@ -116,7 +77,7 @@ def test_get_container_exec_info_skipped_when_hermes_dev(container_env, monkeypa """Returns None when HERMES_DEV=1 is set (dev mode bypass).""" monkeypatch.setenv("HERMES_DEV", "1") - with patch("hermes_cli.config._is_inside_container", return_value=False): + with patch("hermes_constants.is_container", return_value=False): info = get_container_exec_info() assert info is None @@ -126,7 +87,7 @@ def test_get_container_exec_info_not_skipped_when_hermes_dev_zero(container_env, """HERMES_DEV=0 does NOT trigger bypass โ€” only '1' does.""" monkeypatch.setenv("HERMES_DEV", "0") - with patch("hermes_cli.config._is_inside_container", return_value=False): + with patch("hermes_constants.is_container", return_value=False): info = get_container_exec_info() assert info is not None @@ -143,7 +104,7 @@ def test_get_container_exec_info_defaults(): "# minimal file with no keys\n" ) - with patch("hermes_cli.config._is_inside_container", return_value=False), \ + with patch("hermes_constants.is_container", return_value=False), \ patch("hermes_cli.config.get_hermes_home", return_value=hermes_home), \ patch.dict(os.environ, {}, clear=False): os.environ.pop("HERMES_DEV", None) @@ -165,7 +126,7 @@ def test_get_container_exec_info_docker_backend(container_env): "hermes_bin=/opt/hermes/bin/hermes\n" ) - with patch("hermes_cli.config._is_inside_container", return_value=False): + with patch("hermes_constants.is_container", return_value=False): info = get_container_exec_info() assert info["backend"] == "docker" @@ -176,7 +137,7 @@ def test_get_container_exec_info_docker_backend(container_env): def test_get_container_exec_info_crashes_on_permission_error(container_env): """PermissionError propagates instead of being silently swallowed.""" - with patch("hermes_cli.config._is_inside_container", return_value=False), \ + with patch("hermes_constants.is_container", return_value=False), \ patch("builtins.open", side_effect=PermissionError("permission denied")): with pytest.raises(PermissionError): get_container_exec_info() diff --git a/tests/hermes_cli/test_debug.py b/tests/hermes_cli/test_debug.py new file mode 100644 index 000000000..f733c8ab6 --- /dev/null +++ b/tests/hermes_cli/test_debug.py @@ -0,0 +1,461 @@ +"""Tests for ``hermes debug`` CLI command and debug utilities.""" + +import os +import sys +import urllib.error +from pathlib import Path +from unittest.mock import MagicMock, patch, call + +import pytest + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +@pytest.fixture +def hermes_home(tmp_path, monkeypatch): + """Set up an isolated HERMES_HOME with minimal logs.""" + home = tmp_path / ".hermes" + home.mkdir() + monkeypatch.setenv("HERMES_HOME", str(home)) + + # Create log files + logs_dir = home / "logs" + logs_dir.mkdir() + (logs_dir / "agent.log").write_text( + "2026-04-12 17:00:00 INFO agent: session started\n" + "2026-04-12 17:00:01 INFO tools.terminal: running ls\n" + "2026-04-12 17:00:02 WARNING agent: high token usage\n" + ) + (logs_dir / "errors.log").write_text( + "2026-04-12 17:00:05 ERROR gateway.run: connection lost\n" + ) + (logs_dir / "gateway.log").write_text( + "2026-04-12 17:00:10 INFO gateway.run: started\n" + ) + + return home + + +# --------------------------------------------------------------------------- +# Unit tests for upload helpers +# --------------------------------------------------------------------------- + +class TestUploadPasteRs: + """Test paste.rs upload path.""" + + def test_upload_paste_rs_success(self): + from hermes_cli.debug import _upload_paste_rs + + mock_resp = MagicMock() + mock_resp.read.return_value = b"https://paste.rs/abc123\n" + mock_resp.__enter__ = lambda s: s + mock_resp.__exit__ = MagicMock(return_value=False) + + with patch("hermes_cli.debug.urllib.request.urlopen", return_value=mock_resp): + url = _upload_paste_rs("hello world") + + assert url == "https://paste.rs/abc123" + + def test_upload_paste_rs_bad_response(self): + from hermes_cli.debug import _upload_paste_rs + + mock_resp = MagicMock() + mock_resp.read.return_value = b"error" + mock_resp.__enter__ = lambda s: s + mock_resp.__exit__ = MagicMock(return_value=False) + + with patch("hermes_cli.debug.urllib.request.urlopen", return_value=mock_resp): + with pytest.raises(ValueError, match="Unexpected response"): + _upload_paste_rs("test") + + def test_upload_paste_rs_network_error(self): + from hermes_cli.debug import _upload_paste_rs + + with patch( + "hermes_cli.debug.urllib.request.urlopen", + side_effect=urllib.error.URLError("connection refused"), + ): + with pytest.raises(urllib.error.URLError): + _upload_paste_rs("test") + + +class TestUploadDpasteCom: + """Test dpaste.com fallback upload path.""" + + def test_upload_dpaste_com_success(self): + from hermes_cli.debug import _upload_dpaste_com + + mock_resp = MagicMock() + mock_resp.read.return_value = b"https://dpaste.com/ABCDEFG\n" + mock_resp.__enter__ = lambda s: s + mock_resp.__exit__ = MagicMock(return_value=False) + + with patch("hermes_cli.debug.urllib.request.urlopen", return_value=mock_resp): + url = _upload_dpaste_com("hello world", expiry_days=7) + + assert url == "https://dpaste.com/ABCDEFG" + + +class TestUploadToPastebin: + """Test the combined upload with fallback.""" + + def test_tries_paste_rs_first(self): + from hermes_cli.debug import upload_to_pastebin + + with patch("hermes_cli.debug._upload_paste_rs", + return_value="https://paste.rs/test") as prs: + url = upload_to_pastebin("content") + + assert url == "https://paste.rs/test" + prs.assert_called_once() + + def test_falls_back_to_dpaste_com(self): + from hermes_cli.debug import upload_to_pastebin + + with patch("hermes_cli.debug._upload_paste_rs", + side_effect=Exception("down")), \ + patch("hermes_cli.debug._upload_dpaste_com", + return_value="https://dpaste.com/TEST") as dp: + url = upload_to_pastebin("content") + + assert url == "https://dpaste.com/TEST" + dp.assert_called_once() + + def test_raises_when_both_fail(self): + from hermes_cli.debug import upload_to_pastebin + + with patch("hermes_cli.debug._upload_paste_rs", + side_effect=Exception("err1")), \ + patch("hermes_cli.debug._upload_dpaste_com", + side_effect=Exception("err2")): + with pytest.raises(RuntimeError, match="Failed to upload"): + upload_to_pastebin("content") + + +# --------------------------------------------------------------------------- +# Log reading +# --------------------------------------------------------------------------- + +class TestReadFullLog: + """Test _read_full_log for standalone log uploads.""" + + def test_reads_small_file(self, hermes_home): + from hermes_cli.debug import _read_full_log + + content = _read_full_log("agent") + assert content is not None + assert "session started" in content + + def test_returns_none_for_missing(self, tmp_path, monkeypatch): + home = tmp_path / ".hermes" + home.mkdir() + monkeypatch.setenv("HERMES_HOME", str(home)) + + from hermes_cli.debug import _read_full_log + assert _read_full_log("agent") is None + + def test_returns_none_for_empty(self, hermes_home): + # Truncate agent.log to empty + (hermes_home / "logs" / "agent.log").write_text("") + + from hermes_cli.debug import _read_full_log + assert _read_full_log("agent") is None + + def test_truncates_large_file(self, hermes_home): + """Files larger than max_bytes get tail-truncated.""" + from hermes_cli.debug import _read_full_log + + # Write a file larger than 1KB + big_content = "x" * 100 + "\n" + (hermes_home / "logs" / "agent.log").write_text(big_content * 200) + + content = _read_full_log("agent", max_bytes=1024) + assert content is not None + assert "truncated" in content + + def test_unknown_log_returns_none(self, hermes_home): + from hermes_cli.debug import _read_full_log + assert _read_full_log("nonexistent") is None + + def test_falls_back_to_rotated_file(self, hermes_home): + """When gateway.log doesn't exist, falls back to gateway.log.1.""" + from hermes_cli.debug import _read_full_log + + logs_dir = hermes_home / "logs" + # Remove the primary (if any) and create a .1 rotation + (logs_dir / "gateway.log").unlink(missing_ok=True) + (logs_dir / "gateway.log.1").write_text( + "2026-04-12 10:00:00 INFO gateway.run: rotated content\n" + ) + + content = _read_full_log("gateway") + assert content is not None + assert "rotated content" in content + + def test_prefers_primary_over_rotated(self, hermes_home): + """Primary log is used when it exists, even if .1 also exists.""" + from hermes_cli.debug import _read_full_log + + logs_dir = hermes_home / "logs" + (logs_dir / "gateway.log").write_text("primary content\n") + (logs_dir / "gateway.log.1").write_text("rotated content\n") + + content = _read_full_log("gateway") + assert "primary content" in content + assert "rotated" not in content + + def test_falls_back_when_primary_empty(self, hermes_home): + """Empty primary log falls back to .1 rotation.""" + from hermes_cli.debug import _read_full_log + + logs_dir = hermes_home / "logs" + (logs_dir / "agent.log").write_text("") + (logs_dir / "agent.log.1").write_text("rotated agent data\n") + + content = _read_full_log("agent") + assert content is not None + assert "rotated agent data" in content + + +# --------------------------------------------------------------------------- +# Debug report collection +# --------------------------------------------------------------------------- + +class TestCollectDebugReport: + """Test the debug report builder.""" + + def test_report_includes_dump_output(self, hermes_home): + from hermes_cli.debug import collect_debug_report + + with patch("hermes_cli.dump.run_dump") as mock_dump: + mock_dump.side_effect = lambda args: print( + "--- hermes dump ---\nversion: 0.8.0\n--- end dump ---" + ) + report = collect_debug_report(log_lines=50) + + assert "--- hermes dump ---" in report + assert "version: 0.8.0" in report + + def test_report_includes_agent_log(self, hermes_home): + from hermes_cli.debug import collect_debug_report + + with patch("hermes_cli.dump.run_dump"): + report = collect_debug_report(log_lines=50) + + assert "--- agent.log" in report + assert "session started" in report + + def test_report_includes_errors_log(self, hermes_home): + from hermes_cli.debug import collect_debug_report + + with patch("hermes_cli.dump.run_dump"): + report = collect_debug_report(log_lines=50) + + assert "--- errors.log" in report + assert "connection lost" in report + + def test_report_includes_gateway_log(self, hermes_home): + from hermes_cli.debug import collect_debug_report + + with patch("hermes_cli.dump.run_dump"): + report = collect_debug_report(log_lines=50) + + assert "--- gateway.log" in report + + def test_missing_logs_handled(self, tmp_path, monkeypatch): + home = tmp_path / ".hermes" + home.mkdir() + monkeypatch.setenv("HERMES_HOME", str(home)) + + from hermes_cli.debug import collect_debug_report + + with patch("hermes_cli.dump.run_dump"): + report = collect_debug_report(log_lines=50) + + assert "(file not found)" in report + + +# --------------------------------------------------------------------------- +# CLI entry point โ€” run_debug_share +# --------------------------------------------------------------------------- + +class TestRunDebugShare: + """Test the run_debug_share CLI handler.""" + + def test_local_flag_prints_full_logs(self, hermes_home, capsys): + """--local prints the report plus full log contents.""" + from hermes_cli.debug import run_debug_share + + args = MagicMock() + args.lines = 50 + args.expire = 7 + args.local = True + + with patch("hermes_cli.dump.run_dump"): + run_debug_share(args) + + out = capsys.readouterr().out + assert "--- agent.log" in out + assert "FULL agent.log" in out + assert "FULL gateway.log" in out + + def test_share_uploads_three_pastes(self, hermes_home, capsys): + """Successful share uploads report + agent.log + gateway.log.""" + from hermes_cli.debug import run_debug_share + + args = MagicMock() + args.lines = 50 + args.expire = 7 + args.local = False + + call_count = [0] + uploaded_content = [] + def _mock_upload(content, expiry_days=7): + call_count[0] += 1 + uploaded_content.append(content) + return f"https://paste.rs/paste{call_count[0]}" + + with patch("hermes_cli.dump.run_dump") as mock_dump, \ + patch("hermes_cli.debug.upload_to_pastebin", + side_effect=_mock_upload): + mock_dump.side_effect = lambda a: print("--- hermes dump ---\nversion: test\n--- end dump ---") + run_debug_share(args) + + out = capsys.readouterr().out + # Should have 3 uploads: report, agent.log, gateway.log + assert call_count[0] == 3 + assert "paste.rs/paste1" in out # Report + assert "paste.rs/paste2" in out # agent.log + assert "paste.rs/paste3" in out # gateway.log + assert "Report" in out + assert "agent.log" in out + assert "gateway.log" in out + + # Each log paste should start with the dump header + agent_paste = uploaded_content[1] + assert "--- hermes dump ---" in agent_paste + assert "--- full agent.log ---" in agent_paste + gateway_paste = uploaded_content[2] + assert "--- hermes dump ---" in gateway_paste + assert "--- full gateway.log ---" in gateway_paste + + def test_share_skips_missing_logs(self, tmp_path, monkeypatch, capsys): + """Only uploads logs that exist.""" + home = tmp_path / ".hermes" + home.mkdir() + monkeypatch.setenv("HERMES_HOME", str(home)) + + from hermes_cli.debug import run_debug_share + + args = MagicMock() + args.lines = 50 + args.expire = 7 + args.local = False + + call_count = [0] + def _mock_upload(content, expiry_days=7): + call_count[0] += 1 + return f"https://paste.rs/paste{call_count[0]}" + + with patch("hermes_cli.dump.run_dump"), \ + patch("hermes_cli.debug.upload_to_pastebin", + side_effect=_mock_upload): + run_debug_share(args) + + out = capsys.readouterr().out + # Only the report should be uploaded (no log files exist) + assert call_count[0] == 1 + assert "Report" in out + + def test_share_continues_on_log_upload_failure(self, hermes_home, capsys): + """Log upload failure doesn't stop the report from being shared.""" + from hermes_cli.debug import run_debug_share + + args = MagicMock() + args.lines = 50 + args.expire = 7 + args.local = False + + call_count = [0] + def _mock_upload(content, expiry_days=7): + call_count[0] += 1 + if call_count[0] > 1: + raise RuntimeError("upload failed") + return "https://paste.rs/report" + + with patch("hermes_cli.dump.run_dump"), \ + patch("hermes_cli.debug.upload_to_pastebin", + side_effect=_mock_upload): + run_debug_share(args) + + out = capsys.readouterr().out + assert "Report" in out + assert "paste.rs/report" in out + assert "failed to upload" in out + + def test_share_exits_on_report_upload_failure(self, hermes_home, capsys): + """If the main report fails to upload, exit with code 1.""" + from hermes_cli.debug import run_debug_share + + args = MagicMock() + args.lines = 50 + args.expire = 7 + args.local = False + + with patch("hermes_cli.dump.run_dump"), \ + patch("hermes_cli.debug.upload_to_pastebin", + side_effect=RuntimeError("all failed")): + with pytest.raises(SystemExit) as exc_info: + run_debug_share(args) + + assert exc_info.value.code == 1 + out = capsys.readouterr() + assert "all failed" in out.err + + +# --------------------------------------------------------------------------- +# run_debug router +# --------------------------------------------------------------------------- + +class TestRunDebug: + def test_no_subcommand_shows_usage(self, capsys): + from hermes_cli.debug import run_debug + + args = MagicMock() + args.debug_command = None + + run_debug(args) + + out = capsys.readouterr().out + assert "hermes debug share" in out + + def test_share_subcommand_routes(self, hermes_home): + from hermes_cli.debug import run_debug + + args = MagicMock() + args.debug_command = "share" + args.lines = 200 + args.expire = 7 + args.local = True + + with patch("hermes_cli.dump.run_dump"): + run_debug(args) + + +# --------------------------------------------------------------------------- +# Argparse integration +# --------------------------------------------------------------------------- + +class TestArgparseIntegration: + def test_module_imports_clean(self): + from hermes_cli.debug import run_debug, run_debug_share + assert callable(run_debug) + assert callable(run_debug_share) + + def test_cmd_debug_dispatches(self): + from hermes_cli.main import cmd_debug + + args = MagicMock() + args.debug_command = None + cmd_debug(args) diff --git a/tests/hermes_cli/test_env_sanitize_on_load.py b/tests/hermes_cli/test_env_sanitize_on_load.py new file mode 100644 index 000000000..6ac7c2cef --- /dev/null +++ b/tests/hermes_cli/test_env_sanitize_on_load.py @@ -0,0 +1,91 @@ +"""Tests for .env sanitization during load to prevent token duplication (#8908).""" + +import tempfile +from pathlib import Path +from unittest.mock import patch + + +def test_load_env_sanitizes_concatenated_lines(): + """Verify load_env() splits concatenated KEY=VALUE pairs. + + Reproduces the scenario from #8908 where a corrupted .env file + contained multiple tokens on a single line, causing the bot token + to be duplicated 8 times. + """ + from hermes_cli.config import load_env + + token = "8356550917:AAGGEkzg06Hrc3Hjb3Sa1jkGVDOdU_lYy2Q" + # Simulate concatenated line: TOKEN=xxx followed immediately by another key + corrupted = f"TELEGRAM_BOT_TOKEN={token}ANTHROPIC_API_KEY=sk-ant-test123\n" + + with tempfile.NamedTemporaryFile( + mode="w", suffix=".env", delete=False, encoding="utf-8" + ) as f: + f.write(corrupted) + env_path = Path(f.name) + + try: + with patch("hermes_cli.config.get_env_path", return_value=env_path): + result = load_env() + assert result.get("TELEGRAM_BOT_TOKEN") == token, ( + f"Token should be exactly '{token}', got '{result.get('TELEGRAM_BOT_TOKEN')}'" + ) + assert result.get("ANTHROPIC_API_KEY") == "sk-ant-test123" + finally: + env_path.unlink(missing_ok=True) + + +def test_load_env_normal_file_unchanged(): + """A well-formed .env file should be parsed identically.""" + from hermes_cli.config import load_env + + content = ( + "TELEGRAM_BOT_TOKEN=mytoken123\n" + "ANTHROPIC_API_KEY=sk-ant-key\n" + "# comment\n" + "\n" + "OPENAI_API_KEY=sk-openai\n" + ) + + with tempfile.NamedTemporaryFile( + mode="w", suffix=".env", delete=False, encoding="utf-8" + ) as f: + f.write(content) + env_path = Path(f.name) + + try: + with patch("hermes_cli.config.get_env_path", return_value=env_path): + result = load_env() + assert result["TELEGRAM_BOT_TOKEN"] == "mytoken123" + assert result["ANTHROPIC_API_KEY"] == "sk-ant-key" + assert result["OPENAI_API_KEY"] == "sk-openai" + finally: + env_path.unlink(missing_ok=True) + + +def test_env_loader_sanitizes_before_dotenv(): + """Verify env_loader._sanitize_env_file_if_needed fixes corrupted files.""" + from hermes_cli.env_loader import _sanitize_env_file_if_needed + + token = "8356550917:AAGGEkzg06Hrc3Hjb3Sa1jkGVDOdU_lYy2Q" + corrupted = f"TELEGRAM_BOT_TOKEN={token}ANTHROPIC_API_KEY=sk-ant-test\n" + + with tempfile.NamedTemporaryFile( + mode="w", suffix=".env", delete=False, encoding="utf-8" + ) as f: + f.write(corrupted) + env_path = Path(f.name) + + try: + _sanitize_env_file_if_needed(env_path) + with open(env_path, encoding="utf-8") as f: + lines = f.readlines() + # Should be split into two separate lines + assert len(lines) == 2, f"Expected 2 lines, got {len(lines)}: {lines}" + assert lines[0].startswith("TELEGRAM_BOT_TOKEN=") + assert lines[1].startswith("ANTHROPIC_API_KEY=") + # Token should not contain the second key + parsed_token = lines[0].strip().split("=", 1)[1] + assert parsed_token == token + finally: + env_path.unlink(missing_ok=True) diff --git a/tests/hermes_cli/test_gateway_service.py b/tests/hermes_cli/test_gateway_service.py index cba3a8192..ec35aa997 100644 --- a/tests/hermes_cli/test_gateway_service.py +++ b/tests/hermes_cli/test_gateway_service.py @@ -394,6 +394,21 @@ class TestLaunchdServiceRecovery: class TestGatewayServiceDetection: + def test_supports_systemd_services_requires_systemctl_binary(self, monkeypatch): + monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) + monkeypatch.setattr(gateway_cli.shutil, "which", lambda name: None) + + assert gateway_cli.supports_systemd_services() is False + + def test_supports_systemd_services_returns_true_when_systemctl_present(self, monkeypatch): + monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) + monkeypatch.setattr(gateway_cli, "is_wsl", lambda: False) + monkeypatch.setattr(gateway_cli.shutil, "which", lambda name: "/usr/bin/systemctl") + + assert gateway_cli.supports_systemd_services() is True + def test_is_service_running_checks_system_scope_when_user_scope_is_inactive(self, monkeypatch): user_unit = SimpleNamespace(exists=lambda: True) system_unit = SimpleNamespace(exists=lambda: True) @@ -418,6 +433,23 @@ class TestGatewayServiceDetection: assert gateway_cli._is_service_running() is True + def test_is_service_running_returns_false_when_systemctl_missing(self, monkeypatch): + unit = SimpleNamespace(exists=lambda: True) + + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr( + gateway_cli, + "get_systemd_unit_path", + lambda system=False: unit, + ) + + def fake_run(*args, **kwargs): + raise FileNotFoundError("systemctl") + + monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run) + + assert gateway_cli._is_service_running() is False + class TestGatewaySystemServiceRouting: def test_systemd_restart_self_requests_graceful_restart_without_reload_or_restart(self, monkeypatch, capsys): @@ -1001,3 +1033,91 @@ class TestSystemUnitPathRemapping: # Target user paths should be present assert "/home/alice" in unit assert "WorkingDirectory=/home/alice/.hermes/hermes-agent" in unit + + +class TestDockerAwareGateway: + """Tests for Docker container awareness in gateway commands.""" + + def test_run_systemctl_raises_runtimeerror_when_missing(self, monkeypatch): + """_run_systemctl raises RuntimeError with container guidance when systemctl is absent.""" + import pytest + + def fake_run(cmd, **kwargs): + raise FileNotFoundError("systemctl") + + monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run) + + with pytest.raises(RuntimeError, match="systemctl is not available"): + gateway_cli._run_systemctl(["start", "hermes-gateway"]) + + def test_run_systemctl_passes_through_on_success(self, monkeypatch): + """_run_systemctl delegates to subprocess.run when systemctl exists.""" + calls = [] + + def fake_run(cmd, **kwargs): + calls.append(cmd) + return SimpleNamespace(returncode=0, stdout="", stderr="") + + monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run) + + result = gateway_cli._run_systemctl(["status", "hermes-gateway"]) + assert result.returncode == 0 + assert len(calls) == 1 + assert "status" in calls[0] + + def test_install_in_container_prints_docker_guidance(self, monkeypatch, capsys): + """'hermes gateway install' inside Docker exits 0 with container guidance.""" + import pytest + + monkeypatch.setattr(gateway_cli, "is_managed", lambda: False) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False) + monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) + monkeypatch.setattr(gateway_cli, "is_wsl", lambda: False) + monkeypatch.setattr(gateway_cli, "is_container", lambda: True) + + args = SimpleNamespace(gateway_command="install", force=False, system=False, run_as_user=None) + with pytest.raises(SystemExit) as exc_info: + gateway_cli.gateway_command(args) + + assert exc_info.value.code == 0 + out = capsys.readouterr().out + assert "Docker" in out or "docker" in out + assert "restart" in out.lower() + + def test_uninstall_in_container_prints_docker_guidance(self, monkeypatch, capsys): + """'hermes gateway uninstall' inside Docker exits 0 with container guidance.""" + import pytest + + monkeypatch.setattr(gateway_cli, "is_managed", lambda: False) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False) + monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) + monkeypatch.setattr(gateway_cli, "is_container", lambda: True) + + args = SimpleNamespace(gateway_command="uninstall", system=False) + with pytest.raises(SystemExit) as exc_info: + gateway_cli.gateway_command(args) + + assert exc_info.value.code == 0 + out = capsys.readouterr().out + assert "docker" in out.lower() + + def test_start_in_container_prints_docker_guidance(self, monkeypatch, capsys): + """'hermes gateway start' inside Docker exits 0 with container guidance.""" + import pytest + + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False) + monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) + monkeypatch.setattr(gateway_cli, "is_wsl", lambda: False) + monkeypatch.setattr(gateway_cli, "is_container", lambda: True) + + args = SimpleNamespace(gateway_command="start", system=False) + with pytest.raises(SystemExit) as exc_info: + gateway_cli.gateway_command(args) + + assert exc_info.value.code == 0 + out = capsys.readouterr().out + assert "docker" in out.lower() + assert "hermes gateway run" in out diff --git a/tests/hermes_cli/test_model_normalize.py b/tests/hermes_cli/test_model_normalize.py index 0bca8d52e..14861c37a 100644 --- a/tests/hermes_cli/test_model_normalize.py +++ b/tests/hermes_cli/test_model_normalize.py @@ -54,14 +54,19 @@ class TestAnthropicDotToHyphen: # โ”€โ”€ OpenCode Zen regression โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ -class TestOpenCodeZenDotToHyphen: - """OpenCode Zen follows Anthropic convention (dotsโ†’hyphens).""" +class TestOpenCodeZenModelNormalization: + """OpenCode Zen preserves dots for most models, but Claude stays hyphenated.""" @pytest.mark.parametrize("model,expected", [ ("claude-sonnet-4.6", "claude-sonnet-4-6"), - ("glm-4.5", "glm-4-5"), + ("opencode-zen/claude-opus-4.5", "claude-opus-4-5"), + ("glm-4.5", "glm-4.5"), + ("glm-5.1", "glm-5.1"), + ("gpt-5.4", "gpt-5.4"), + ("minimax-m2.5-free", "minimax-m2.5-free"), + ("kimi-k2.5", "kimi-k2.5"), ]) - def test_zen_converts_dots(self, model, expected): + def test_zen_normalizes_models(self, model, expected): result = normalize_model_for_provider(model, "opencode-zen") assert result == expected @@ -69,6 +74,10 @@ class TestOpenCodeZenDotToHyphen: result = normalize_model_for_provider("opencode-zen/claude-sonnet-4.6", "opencode-zen") assert result == "claude-sonnet-4-6" + def test_zen_strips_vendor_prefix_for_non_claude(self): + result = normalize_model_for_provider("opencode-zen/glm-5.1", "opencode-zen") + assert result == "glm-5.1" + # โ”€โ”€ Copilot dot preservation (regression) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ diff --git a/tests/hermes_cli/test_nous_hermes_non_agentic.py b/tests/hermes_cli/test_nous_hermes_non_agentic.py new file mode 100644 index 000000000..179d26b7c --- /dev/null +++ b/tests/hermes_cli/test_nous_hermes_non_agentic.py @@ -0,0 +1,84 @@ +"""Tests for the Nous-Hermes-3/4 non-agentic warning detector. + +Prior to this check, the warning fired on any model whose name contained +``"hermes"`` anywhere (case-insensitive). That false-positived on unrelated +local Modelfiles such as ``hermes-brain:qwen3-14b-ctx16k`` โ€” a tool-capable +Qwen3 wrapper that happens to live under the "hermes" tag namespace. + +``is_nous_hermes_non_agentic`` should only match the actual Nous Research +Hermes-3 / Hermes-4 chat family. +""" + +from __future__ import annotations + +import pytest + +from hermes_cli.model_switch import ( + _HERMES_MODEL_WARNING, + _check_hermes_model_warning, + is_nous_hermes_non_agentic, +) + + +@pytest.mark.parametrize( + "model_name", + [ + "NousResearch/Hermes-3-Llama-3.1-70B", + "NousResearch/Hermes-3-Llama-3.1-405B", + "hermes-3", + "Hermes-3", + "hermes-4", + "hermes-4-405b", + "hermes_4_70b", + "openrouter/hermes3:70b", + "openrouter/nousresearch/hermes-4-405b", + "NousResearch/Hermes3", + "hermes-3.1", + ], +) +def test_matches_real_nous_hermes_chat_models(model_name: str) -> None: + assert is_nous_hermes_non_agentic(model_name), ( + f"expected {model_name!r} to be flagged as Nous Hermes 3/4" + ) + assert _check_hermes_model_warning(model_name) == _HERMES_MODEL_WARNING + + +@pytest.mark.parametrize( + "model_name", + [ + # Kyle's local Modelfile โ€” qwen3:14b under a custom tag + "hermes-brain:qwen3-14b-ctx16k", + "hermes-brain:qwen3-14b-ctx32k", + "hermes-honcho:qwen3-8b-ctx8k", + # Plain unrelated models + "qwen3:14b", + "qwen3-coder:30b", + "qwen2.5:14b", + "claude-opus-4-6", + "anthropic/claude-sonnet-4.5", + "gpt-5", + "openai/gpt-4o", + "google/gemini-2.5-flash", + "deepseek-chat", + # Non-chat Hermes models we don't warn about + "hermes-llm-2", + "hermes2-pro", + "nous-hermes-2-mistral", + # Edge cases + "", + "hermes", # bare "hermes" isn't the 3/4 family + "hermes-brain", + "brain-hermes-3-impostor", # "3" not preceded by /: boundary + ], +) +def test_does_not_match_unrelated_models(model_name: str) -> None: + assert not is_nous_hermes_non_agentic(model_name), ( + f"expected {model_name!r} NOT to be flagged as Nous Hermes 3/4" + ) + assert _check_hermes_model_warning(model_name) == "" + + +def test_none_like_inputs_are_safe() -> None: + assert is_nous_hermes_non_agentic("") is False + # Defensive: the helper shouldn't crash on None-ish falsy input either. + assert _check_hermes_model_warning("") == "" diff --git a/tests/hermes_cli/test_profiles.py b/tests/hermes_cli/test_profiles.py index c970cb6c5..e6de2f67f 100644 --- a/tests/hermes_cli/test_profiles.py +++ b/tests/hermes_cli/test_profiles.py @@ -177,7 +177,8 @@ class TestCreateProfile: # No error; optional files just not copied assert not (profile_dir / "config.yaml").exists() assert not (profile_dir / ".env").exists() - assert not (profile_dir / "SOUL.md").exists() + # SOUL.md is always seeded with the default even when clone source lacks it + assert (profile_dir / "SOUL.md").exists() # =================================================================== diff --git a/tests/hermes_cli/test_runtime_provider_resolution.py b/tests/hermes_cli/test_runtime_provider_resolution.py index 20486a805..c7510a55b 100644 --- a/tests/hermes_cli/test_runtime_provider_resolution.py +++ b/tests/hermes_cli/test_runtime_provider_resolution.py @@ -119,6 +119,11 @@ def test_resolve_runtime_provider_falls_back_when_pool_empty(monkeypatch): def test_resolve_runtime_provider_codex(monkeypatch): + monkeypatch.setattr( + rp, + "load_pool", + lambda provider: type("P", (), {"has_credentials": lambda self: False})(), + ) monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openai-codex") monkeypatch.setattr( rp, @@ -567,6 +572,87 @@ def test_named_custom_provider_uses_saved_credentials(monkeypatch): assert resolved["source"] == "custom_provider:Local" +def test_named_custom_provider_uses_providers_dict_when_list_missing(monkeypatch): + """After v11โ†’v12 migration deletes custom_providers, resolution should + still find entries in the providers dict via get_compatible_custom_providers.""" + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) + monkeypatch.setattr( + rp, + "load_config", + lambda: { + "providers": { + "openai-direct-primary": { + "api": "https://api.openai.com/v1", + "api_key": "dir-key", + "default_model": "gpt-5-mini", + "name": "OpenAI Direct (Primary)", + "transport": "codex_responses", + } + } + }, + ) + monkeypatch.setattr( + rp, + "resolve_provider", + lambda *a, **k: (_ for _ in ()).throw( + AssertionError( + "resolve_provider should not be called for named custom providers" + ) + ), + ) + + resolved = rp.resolve_runtime_provider(requested="openai-direct-primary") + + assert resolved["provider"] == "custom" + assert resolved["api_mode"] == "codex_responses" + assert resolved["base_url"] == "https://api.openai.com/v1" + assert resolved["api_key"] == "dir-key" + assert resolved["requested_provider"] == "openai-direct-primary" + assert resolved["source"] == "custom_provider:OpenAI Direct (Primary)" + assert resolved["model"] == "gpt-5-mini" + + +def test_named_custom_provider_uses_key_env_from_providers_dict(monkeypatch): + """providers dict entries with key_env should resolve API key from env var.""" + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) + monkeypatch.setenv("MYCORP_API_KEY", "env-secret") + monkeypatch.setattr( + rp, + "load_config", + lambda: { + "providers": { + "mycorp-proxy": { + "base_url": "https://proxy.example.com/v1", + "default_model": "acme-large", + "key_env": "MYCORP_API_KEY", + "name": "MyCorp Proxy", + } + } + }, + ) + monkeypatch.setattr( + rp, + "resolve_provider", + lambda *a, **k: (_ for _ in ()).throw( + AssertionError( + "resolve_provider should not be called for named custom providers" + ) + ), + ) + + resolved = rp.resolve_runtime_provider(requested="mycorp-proxy") + + assert resolved["provider"] == "custom" + assert resolved["api_mode"] == "chat_completions" + assert resolved["base_url"] == "https://proxy.example.com/v1" + assert resolved["api_key"] == "env-secret" + assert resolved["requested_provider"] == "mycorp-proxy" + assert resolved["source"] == "custom_provider:MyCorp Proxy" + assert resolved["model"] == "acme-large" + + def test_named_custom_provider_falls_back_to_openai_api_key(monkeypatch): monkeypatch.setenv("OPENAI_API_KEY", "env-openai-key") monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) diff --git a/tests/hermes_cli/test_setup.py b/tests/hermes_cli/test_setup.py index 4a3f5151f..2c07d3d66 100644 --- a/tests/hermes_cli/test_setup.py +++ b/tests/hermes_cli/test_setup.py @@ -1,5 +1,4 @@ -"""Tests for setup_model_provider โ€” verifies the delegation to -select_provider_and_model() and config dict sync.""" +"""Tests for setup.py configuration flows.""" import json import sys import types @@ -8,6 +7,7 @@ import pytest from hermes_cli.auth import get_active_provider from hermes_cli.config import load_config, save_config +from hermes_cli import setup as setup_mod from hermes_cli.setup import setup_model_provider @@ -144,6 +144,85 @@ def test_setup_custom_providers_synced(tmp_path, monkeypatch): assert reloaded.get("custom_providers") == [{"name": "Local", "base_url": "http://localhost:8080/v1"}] +def test_setup_gateway_skips_service_install_when_systemctl_missing(monkeypatch, capsys): + env = { + "TELEGRAM_BOT_TOKEN": "", + "TELEGRAM_HOME_CHANNEL": "", + "DISCORD_BOT_TOKEN": "", + "DISCORD_HOME_CHANNEL": "", + "SLACK_BOT_TOKEN": "", + "SLACK_HOME_CHANNEL": "", + "MATRIX_HOMESERVER": "https://matrix.example.com", + "MATRIX_USER_ID": "@alice:example.com", + "MATRIX_PASSWORD": "", + "MATRIX_ACCESS_TOKEN": "token", + "BLUEBUBBLES_SERVER_URL": "", + "BLUEBUBBLES_HOME_CHANNEL": "", + "WHATSAPP_ENABLED": "", + "WEBHOOK_ENABLED": "", + } + + monkeypatch.setattr(setup_mod, "get_env_value", lambda key: env.get(key, "")) + monkeypatch.setattr(setup_mod, "prompt_yes_no", lambda *args, **kwargs: False) + monkeypatch.setattr("platform.system", lambda: "Linux") + + import hermes_cli.gateway as gateway_mod + + monkeypatch.setattr(gateway_mod, "supports_systemd_services", lambda: False) + monkeypatch.setattr(gateway_mod, "is_macos", lambda: False) + monkeypatch.setattr(gateway_mod, "_is_service_installed", lambda: False) + monkeypatch.setattr(gateway_mod, "_is_service_running", lambda: False) + + setup_mod.setup_gateway({}) + + out = capsys.readouterr().out + assert "Messaging platforms configured!" in out + assert "Start the gateway to bring your bots online:" in out + assert "hermes gateway" in out + + +def test_setup_gateway_in_container_shows_docker_guidance(monkeypatch, capsys): + """setup_gateway() in a Docker container shows Docker-specific restart instructions.""" + env = { + "TELEGRAM_BOT_TOKEN": "", + "TELEGRAM_HOME_CHANNEL": "", + "DISCORD_BOT_TOKEN": "", + "DISCORD_HOME_CHANNEL": "", + "SLACK_BOT_TOKEN": "", + "SLACK_HOME_CHANNEL": "", + "MATRIX_HOMESERVER": "https://matrix.example.com", + "MATRIX_USER_ID": "@alice:example.com", + "MATRIX_PASSWORD": "", + "MATRIX_ACCESS_TOKEN": "token", + "BLUEBUBBLES_SERVER_URL": "", + "BLUEBUBBLES_HOME_CHANNEL": "", + "WHATSAPP_ENABLED": "", + "WEBHOOK_ENABLED": "", + } + + monkeypatch.setattr(setup_mod, "get_env_value", lambda key: env.get(key, "")) + monkeypatch.setattr(setup_mod, "prompt_yes_no", lambda *args, **kwargs: False) + monkeypatch.setattr("platform.system", lambda: "Linux") + + import hermes_cli.gateway as gateway_mod + + monkeypatch.setattr(gateway_mod, "supports_systemd_services", lambda: False) + monkeypatch.setattr(gateway_mod, "is_macos", lambda: False) + monkeypatch.setattr(gateway_mod, "_is_service_installed", lambda: False) + monkeypatch.setattr(gateway_mod, "_is_service_running", lambda: False) + + # Patch is_container at the import location in setup.py + import hermes_constants + monkeypatch.setattr(hermes_constants, "is_container", lambda: True) + + setup_mod.setup_gateway({}) + + out = capsys.readouterr().out + assert "Messaging platforms configured!" in out + assert "docker" in out.lower() or "Docker" in out + assert "restart" in out.lower() + + def test_setup_syncs_custom_provider_removal_from_disk(tmp_path, monkeypatch): """Removing the last custom provider in model setup should persist.""" monkeypatch.setenv("HERMES_HOME", str(tmp_path)) diff --git a/tests/hermes_cli/test_tools_config.py b/tests/hermes_cli/test_tools_config.py index 2c2bb3919..ed79559d2 100644 --- a/tests/hermes_cli/test_tools_config.py +++ b/tests/hermes_cli/test_tools_config.py @@ -119,8 +119,7 @@ def test_toolset_has_keys_for_vision_accepts_codex_auth(tmp_path, monkeypatch): monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) monkeypatch.delenv("OPENAI_BASE_URL", raising=False) monkeypatch.delenv("OPENAI_API_KEY", raising=False) - monkeypatch.delenv("AUXILIARY_VISION_PROVIDER", raising=False) - monkeypatch.delenv("CONTEXT_VISION_PROVIDER", raising=False) + monkeypatch.setattr( "agent.auxiliary_client.resolve_vision_provider_client", lambda: ("openai-codex", object(), "gpt-4.1"), diff --git a/tests/hermes_cli/test_user_providers_model_switch.py b/tests/hermes_cli/test_user_providers_model_switch.py new file mode 100644 index 000000000..222b53904 --- /dev/null +++ b/tests/hermes_cli/test_user_providers_model_switch.py @@ -0,0 +1,280 @@ +"""Tests for user-defined providers (providers: dict) in /model. + +These tests ensure that providers defined in the config.yaml ``providers:`` section +are properly resolved for model switching and that their full ``models:`` lists +are exposed in the model picker. +""" + +import pytest +from hermes_cli.model_switch import list_authenticated_providers, switch_model +from hermes_cli import runtime_provider as rp + + +# ============================================================================= +# Tests for list_authenticated_providers including full models list +# ============================================================================= + +def test_list_authenticated_providers_includes_full_models_list_from_user_providers(monkeypatch): + """User-defined providers should expose both default_model and full models list. + + Regression test: previously only default_model was shown in /model picker. + """ + monkeypatch.setattr("agent.models_dev.fetch_models_dev", lambda: {}) + monkeypatch.setattr("hermes_cli.providers.HERMES_OVERLAYS", {}) + + user_providers = { + "local-ollama": { + "name": "Local Ollama", + "api": "http://localhost:11434/v1", + "default_model": "minimax-m2.7:cloud", + "models": [ + "minimax-m2.7:cloud", + "kimi-k2.5:cloud", + "glm-5.1:cloud", + "qwen3.5:cloud", + ], + } + } + + providers = list_authenticated_providers( + current_provider="local-ollama", + user_providers=user_providers, + custom_providers=[], + max_models=50, + ) + + # Find our user provider + user_prov = next( + (p for p in providers if p.get("is_user_defined") and p["slug"] == "local-ollama"), + None + ) + + assert user_prov is not None, "User provider 'local-ollama' should be in results" + assert user_prov["total_models"] == 4, f"Expected 4 models, got {user_prov['total_models']}" + assert "minimax-m2.7:cloud" in user_prov["models"] + assert "kimi-k2.5:cloud" in user_prov["models"] + assert "glm-5.1:cloud" in user_prov["models"] + assert "qwen3.5:cloud" in user_prov["models"] + + +def test_list_authenticated_providers_dedupes_models_when_default_in_list(monkeypatch): + """When default_model is also in models list, don't duplicate.""" + monkeypatch.setattr("agent.models_dev.fetch_models_dev", lambda: {}) + monkeypatch.setattr("hermes_cli.providers.HERMES_OVERLAYS", {}) + + user_providers = { + "my-provider": { + "api": "http://example.com/v1", + "default_model": "model-a", # Included in models list below + "models": ["model-a", "model-b", "model-c"], + } + } + + providers = list_authenticated_providers( + current_provider="my-provider", + user_providers=user_providers, + custom_providers=[], + ) + + user_prov = next( + (p for p in providers if p.get("is_user_defined")), + None + ) + + assert user_prov is not None + assert user_prov["total_models"] == 3, "Should have 3 unique models, not 4" + assert user_prov["models"].count("model-a") == 1, "model-a should not be duplicated" + + +def test_list_authenticated_providers_fallback_to_default_only(monkeypatch): + """When no models array is provided, should fall back to default_model.""" + monkeypatch.setattr("agent.models_dev.fetch_models_dev", lambda: {}) + monkeypatch.setattr("hermes_cli.providers.HERMES_OVERLAYS", {}) + + user_providers = { + "simple-provider": { + "name": "Simple Provider", + "api": "http://example.com/v1", + "default_model": "single-model", + # No 'models' key + } + } + + providers = list_authenticated_providers( + current_provider="", + user_providers=user_providers, + custom_providers=[], + ) + + user_prov = next( + (p for p in providers if p.get("is_user_defined")), + None + ) + + assert user_prov is not None + assert user_prov["total_models"] == 1 + assert user_prov["models"] == ["single-model"] + + +# ============================================================================= +# Tests for _get_named_custom_provider with providers: dict +# ============================================================================= + +def test_get_named_custom_provider_finds_user_providers_by_key(monkeypatch, tmp_path): + """Should resolve providers from providers: dict (new-style), not just custom_providers.""" + config = { + "providers": { + "local-localhost:11434": { + "api": "http://localhost:11434/v1", + "name": "Local (localhost:11434)", + "default_model": "minimax-m2.7:cloud", + } + } + } + + import yaml + config_file = tmp_path / "config.yaml" + config_file.write_text(yaml.dump(config)) + + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + + result = rp._get_named_custom_provider("local-localhost:11434") + + assert result is not None + assert result["base_url"] == "http://localhost:11434/v1" + assert result["name"] == "Local (localhost:11434)" + + +def test_get_named_custom_provider_finds_by_display_name(monkeypatch, tmp_path): + """Should match providers by their 'name' field as well as key.""" + config = { + "providers": { + "my-ollama-xyz": { + "api": "http://ollama.example.com/v1", + "name": "My Production Ollama", + "default_model": "llama3", + } + } + } + + import yaml + config_file = tmp_path / "config.yaml" + config_file.write_text(yaml.dump(config)) + + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + + # Should find by display name (normalized) + result = rp._get_named_custom_provider("my-production-ollama") + + assert result is not None + assert result["base_url"] == "http://ollama.example.com/v1" + + +def test_get_named_custom_provider_falls_back_to_legacy_format(monkeypatch, tmp_path): + """Should still work with custom_providers: list format.""" + config = { + "providers": {}, + "custom_providers": [ + { + "name": "Custom Endpoint", + "base_url": "http://custom.example.com/v1", + } + ] + } + + import yaml + config_file = tmp_path / "config.yaml" + config_file.write_text(yaml.dump(config)) + + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + + result = rp._get_named_custom_provider("custom-endpoint") + + assert result is not None + + +def test_get_named_custom_provider_returns_none_for_unknown(monkeypatch, tmp_path): + """Should return None for providers that don't exist.""" + config = { + "providers": { + "known-provider": { + "api": "http://known.example.com/v1", + } + } + } + + import yaml + config_file = tmp_path / "config.yaml" + config_file.write_text(yaml.dump(config)) + + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + + result = rp._get_named_custom_provider("other-provider") + + # "unknown-provider" partial-matches "known-provider" because "unknown" doesn't match + # but our matching is loose (substring). Let's verify a truly non-matching provider + result = rp._get_named_custom_provider("completely-different-name") + assert result is None + + +def test_get_named_custom_provider_skips_empty_base_url(monkeypatch, tmp_path): + """Should skip providers without a base_url.""" + config = { + "providers": { + "incomplete-provider": { + "name": "Incomplete", + # No api/base_url field + } + } + } + + import yaml + config_file = tmp_path / "config.yaml" + config_file.write_text(yaml.dump(config)) + + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + + result = rp._get_named_custom_provider("incomplete-provider") + + assert result is None + + +# ============================================================================= +# Integration test for switch_model with user providers +# ============================================================================= + +def test_switch_model_resolves_user_provider_credentials(monkeypatch, tmp_path): + """/model switch should resolve credentials for providers: dict providers.""" + import yaml + + config = { + "providers": { + "local-ollama": { + "api": "http://localhost:11434/v1", + "name": "Local Ollama", + "default_model": "minimax-m2.7:cloud", + } + } + } + + config_file = tmp_path / "config.yaml" + config_file.write_text(yaml.dump(config)) + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + + # Mock validation to pass + monkeypatch.setattr( + "hermes_cli.models.validate_requested_model", + lambda *a, **k: {"accepted": True, "persist": True, "recognized": True, "message": None} + ) + + result = switch_model( + raw_input="kimi-k2.5:cloud", + current_provider="local-ollama", + current_model="minimax-m2.7:cloud", + current_base_url="http://localhost:11434/v1", + is_global=False, + user_providers=config["providers"], + ) + + assert result.success is True + assert result.error_message == "" diff --git a/tests/hermes_cli/test_web_server.py b/tests/hermes_cli/test_web_server.py new file mode 100644 index 000000000..ffa614cd9 --- /dev/null +++ b/tests/hermes_cli/test_web_server.py @@ -0,0 +1,675 @@ +"""Tests for hermes_cli.web_server and related config utilities.""" + +import os +import json +import tempfile +from pathlib import Path +from unittest.mock import patch, MagicMock + +import pytest + +from hermes_cli.config import ( + DEFAULT_CONFIG, + reload_env, + redact_key, + _EXTRA_ENV_KEYS, + OPTIONAL_ENV_VARS, +) + + +# --------------------------------------------------------------------------- +# reload_env tests +# --------------------------------------------------------------------------- + + +class TestReloadEnv: + """Tests for reload_env() โ€” re-reads .env into os.environ.""" + + def test_adds_new_vars(self, tmp_path): + """reload_env() adds vars from .env that are not in os.environ.""" + env_file = tmp_path / ".env" + env_file.write_text("TEST_RELOAD_VAR=hello123\n") + with patch("hermes_cli.config.get_env_path", return_value=env_file): + os.environ.pop("TEST_RELOAD_VAR", None) + count = reload_env() + assert count >= 1 + assert os.environ.get("TEST_RELOAD_VAR") == "hello123" + os.environ.pop("TEST_RELOAD_VAR", None) + + def test_updates_changed_vars(self, tmp_path): + """reload_env() updates vars whose value changed on disk.""" + env_file = tmp_path / ".env" + env_file.write_text("TEST_RELOAD_VAR=old_value\n") + with patch("hermes_cli.config.get_env_path", return_value=env_file): + os.environ["TEST_RELOAD_VAR"] = "old_value" + # Now change the file + env_file.write_text("TEST_RELOAD_VAR=new_value\n") + count = reload_env() + assert count >= 1 + assert os.environ.get("TEST_RELOAD_VAR") == "new_value" + os.environ.pop("TEST_RELOAD_VAR", None) + + def test_removes_deleted_known_vars(self, tmp_path): + """reload_env() removes known Hermes vars not present in .env.""" + env_file = tmp_path / ".env" + env_file.write_text("") # empty .env + # Pick a known key from OPTIONAL_ENV_VARS + known_key = next(iter(OPTIONAL_ENV_VARS.keys())) + with patch("hermes_cli.config.get_env_path", return_value=env_file): + os.environ[known_key] = "stale_value" + count = reload_env() + assert known_key not in os.environ + assert count >= 1 + + def test_does_not_remove_unknown_vars(self, tmp_path): + """reload_env() preserves non-Hermes env vars even when absent from .env.""" + env_file = tmp_path / ".env" + env_file.write_text("") + with patch("hermes_cli.config.get_env_path", return_value=env_file): + os.environ["MY_CUSTOM_UNRELATED_VAR"] = "keep_me" + reload_env() + assert os.environ.get("MY_CUSTOM_UNRELATED_VAR") == "keep_me" + os.environ.pop("MY_CUSTOM_UNRELATED_VAR", None) + + +# --------------------------------------------------------------------------- +# redact_key tests +# --------------------------------------------------------------------------- + + +class TestRedactKey: + def test_long_key_shows_prefix_suffix(self): + result = redact_key("sk-1234567890abcdef") + assert result.startswith("sk-1") + assert result.endswith("cdef") + assert "..." in result + + def test_short_key_fully_masked(self): + assert redact_key("short") == "***" + + def test_empty_key(self): + result = redact_key("") + assert "not set" in result.lower() or result == "***" or "\x1b" in result + + +# --------------------------------------------------------------------------- +# web_server tests (FastAPI endpoints) +# --------------------------------------------------------------------------- + + +class TestWebServerEndpoints: + """Test the FastAPI REST endpoints using Starlette TestClient.""" + + @pytest.fixture(autouse=True) + def _setup_test_client(self): + """Create a TestClient โ€” import is deferred to avoid requiring fastapi.""" + try: + from starlette.testclient import TestClient + except ImportError: + pytest.skip("fastapi/starlette not installed") + + from hermes_cli.web_server import app + self.client = TestClient(app) + + def test_get_status(self): + resp = self.client.get("/api/status") + assert resp.status_code == 200 + data = resp.json() + assert "version" in data + assert "hermes_home" in data + assert "active_sessions" in data + + def test_get_status_filters_unconfigured_gateway_platforms(self, monkeypatch): + import gateway.config as gateway_config + import hermes_cli.web_server as web_server + + class _Platform: + def __init__(self, value): + self.value = value + + class _GatewayConfig: + def get_connected_platforms(self): + return [_Platform("telegram")] + + monkeypatch.setattr(web_server, "get_running_pid", lambda: 1234) + monkeypatch.setattr( + web_server, + "read_runtime_status", + lambda: { + "gateway_state": "running", + "updated_at": "2026-04-12T00:00:00+00:00", + "platforms": { + "telegram": {"state": "connected", "updated_at": "2026-04-12T00:00:00+00:00"}, + "whatsapp": {"state": "retrying", "updated_at": "2026-04-12T00:00:00+00:00"}, + "feishu": {"state": "connected", "updated_at": "2026-04-12T00:00:00+00:00"}, + }, + }, + ) + monkeypatch.setattr(web_server, "check_config_version", lambda: (1, 1)) + monkeypatch.setattr(gateway_config, "load_gateway_config", lambda: _GatewayConfig()) + + resp = self.client.get("/api/status") + + assert resp.status_code == 200 + assert resp.json()["gateway_platforms"] == { + "telegram": {"state": "connected", "updated_at": "2026-04-12T00:00:00+00:00"}, + } + + def test_get_status_hides_stale_platforms_when_gateway_not_running(self, monkeypatch): + import gateway.config as gateway_config + import hermes_cli.web_server as web_server + + class _GatewayConfig: + def get_connected_platforms(self): + return [] + + monkeypatch.setattr(web_server, "get_running_pid", lambda: None) + monkeypatch.setattr( + web_server, + "read_runtime_status", + lambda: { + "gateway_state": "startup_failed", + "updated_at": "2026-04-12T00:00:00+00:00", + "platforms": { + "whatsapp": {"state": "retrying", "updated_at": "2026-04-12T00:00:00+00:00"}, + "feishu": {"state": "connected", "updated_at": "2026-04-12T00:00:00+00:00"}, + }, + }, + ) + monkeypatch.setattr(web_server, "check_config_version", lambda: (1, 1)) + monkeypatch.setattr(gateway_config, "load_gateway_config", lambda: _GatewayConfig()) + + resp = self.client.get("/api/status") + + assert resp.status_code == 200 + assert resp.json()["gateway_state"] == "startup_failed" + assert resp.json()["gateway_platforms"] == {} + + def test_get_config_schema(self): + resp = self.client.get("/api/config/schema") + assert resp.status_code == 200 + data = resp.json() + assert "fields" in data + assert "category_order" in data + schema = data["fields"] + assert len(schema) > 100 # Should have 150+ fields + assert "model" in schema + # Verify category_order is a non-empty list + assert isinstance(data["category_order"], list) + assert len(data["category_order"]) > 0 + assert "general" in data["category_order"] + + def test_get_config_defaults(self): + resp = self.client.get("/api/config/defaults") + assert resp.status_code == 200 + defaults = resp.json() + assert "model" in defaults + + def test_get_env_vars(self): + resp = self.client.get("/api/env") + assert resp.status_code == 200 + data = resp.json() + # Should contain known env var names + assert any(k.endswith("_API_KEY") or k.endswith("_TOKEN") for k in data.keys()) + + def test_reveal_env_var(self, tmp_path): + """POST /api/env/reveal should return the real unredacted value.""" + from hermes_cli.config import save_env_value + from hermes_cli.web_server import _SESSION_TOKEN + save_env_value("TEST_REVEAL_KEY", "super-secret-value-12345") + resp = self.client.post( + "/api/env/reveal", + json={"key": "TEST_REVEAL_KEY"}, + headers={"Authorization": f"Bearer {_SESSION_TOKEN}"}, + ) + assert resp.status_code == 200 + data = resp.json() + assert data["key"] == "TEST_REVEAL_KEY" + assert data["value"] == "super-secret-value-12345" + + def test_reveal_env_var_not_found(self): + """POST /api/env/reveal should 404 for unknown keys.""" + from hermes_cli.web_server import _SESSION_TOKEN + resp = self.client.post( + "/api/env/reveal", + json={"key": "NONEXISTENT_KEY_XYZ"}, + headers={"Authorization": f"Bearer {_SESSION_TOKEN}"}, + ) + assert resp.status_code == 404 + + def test_reveal_env_var_no_token(self, tmp_path): + """POST /api/env/reveal without token should return 401.""" + from hermes_cli.config import save_env_value + save_env_value("TEST_REVEAL_NOAUTH", "secret-value") + resp = self.client.post( + "/api/env/reveal", + json={"key": "TEST_REVEAL_NOAUTH"}, + ) + assert resp.status_code == 401 + + def test_reveal_env_var_bad_token(self, tmp_path): + """POST /api/env/reveal with wrong token should return 401.""" + from hermes_cli.config import save_env_value + save_env_value("TEST_REVEAL_BADAUTH", "secret-value") + resp = self.client.post( + "/api/env/reveal", + json={"key": "TEST_REVEAL_BADAUTH"}, + headers={"Authorization": "Bearer wrong-token-here"}, + ) + assert resp.status_code == 401 + + def test_session_token_endpoint(self): + """GET /api/auth/session-token should return a token.""" + from hermes_cli.web_server import _SESSION_TOKEN + resp = self.client.get("/api/auth/session-token") + assert resp.status_code == 200 + assert resp.json()["token"] == _SESSION_TOKEN + + def test_path_traversal_blocked(self): + """Verify URL-encoded path traversal is blocked.""" + # %2e%2e = .. + resp = self.client.get("/%2e%2e/%2e%2e/etc/passwd") + # Should return 200 with index.html (SPA fallback), not the actual file + assert resp.status_code in (200, 404) + if resp.status_code == 200: + # Should be the SPA fallback, not the system file + assert "root:" not in resp.text + + def test_path_traversal_dotdot_blocked(self): + """Direct .. path traversal via encoded sequences.""" + resp = self.client.get("/%2e%2e/hermes_cli/web_server.py") + assert resp.status_code in (200, 404) + if resp.status_code == 200: + assert "FastAPI" not in resp.text # Should not serve the actual source + + +# --------------------------------------------------------------------------- +# _build_schema_from_config tests +# --------------------------------------------------------------------------- + + +class TestBuildSchemaFromConfig: + def test_produces_expected_field_count(self): + from hermes_cli.web_server import CONFIG_SCHEMA + # DEFAULT_CONFIG has ~150+ leaf fields + assert len(CONFIG_SCHEMA) > 100 + + def test_schema_entries_have_required_fields(self): + from hermes_cli.web_server import CONFIG_SCHEMA + for key, entry in list(CONFIG_SCHEMA.items())[:10]: + assert "type" in entry, f"Missing type for {key}" + assert "category" in entry, f"Missing category for {key}" + + def test_overrides_applied(self): + from hermes_cli.web_server import CONFIG_SCHEMA + # terminal.backend should be a select with options + if "terminal.backend" in CONFIG_SCHEMA: + entry = CONFIG_SCHEMA["terminal.backend"] + assert entry["type"] == "select" + assert "options" in entry + assert "local" in entry["options"] + + def test_empty_prefix_produces_correct_keys(self): + from hermes_cli.web_server import _build_schema_from_config + test_config = {"model": "test", "nested": {"key": "val"}} + schema = _build_schema_from_config(test_config) + assert "model" in schema + assert "nested.key" in schema + + def test_top_level_scalars_get_general_category(self): + """Top-level scalar fields should be in 'general' category.""" + from hermes_cli.web_server import CONFIG_SCHEMA + assert CONFIG_SCHEMA["model"]["category"] == "general" + + def test_nested_keys_get_parent_category(self): + """Nested fields should use the top-level parent as their category.""" + from hermes_cli.web_server import CONFIG_SCHEMA + if "agent.max_turns" in CONFIG_SCHEMA: + assert CONFIG_SCHEMA["agent.max_turns"]["category"] == "agent" + + def test_category_merge_applied(self): + """Small categories should be merged into larger ones.""" + from hermes_cli.web_server import CONFIG_SCHEMA + categories = {e["category"] for e in CONFIG_SCHEMA.values()} + # These should be merged away + assert "privacy" not in categories # merged into security + assert "context" not in categories # merged into agent + + def test_no_single_field_categories(self): + """After merging, no category should have just 1 field.""" + from hermes_cli.web_server import CONFIG_SCHEMA + from collections import Counter + cats = Counter(e["category"] for e in CONFIG_SCHEMA.values()) + for cat, count in cats.items(): + assert count >= 2, f"Category '{cat}' has only {count} field(s) โ€” should be merged" + + +# --------------------------------------------------------------------------- +# Config round-trip tests +# --------------------------------------------------------------------------- + + +class TestConfigRoundTrip: + """Verify config survives GET โ†’ edit โ†’ PUT without data loss.""" + + @pytest.fixture(autouse=True) + def _setup(self): + try: + from starlette.testclient import TestClient + except ImportError: + pytest.skip("fastapi/starlette not installed") + from hermes_cli.web_server import app + self.client = TestClient(app) + + def test_get_config_no_internal_keys(self): + """GET /api/config should not expose _config_version or _model_meta.""" + config = self.client.get("/api/config").json() + internal = [k for k in config if k.startswith("_")] + assert not internal, f"Internal keys leaked to frontend: {internal}" + + def test_get_config_model_is_string(self): + """GET /api/config should normalize model dict to a string.""" + config = self.client.get("/api/config").json() + assert isinstance(config.get("model"), str), \ + f"model should be string, got {type(config.get('model'))}" + + def test_round_trip_preserves_model_subkeys(self): + """Save and reload should not lose model.provider, model.base_url, etc.""" + from hermes_cli.config import load_config, save_config + + # Set up a config with model as a dict (the common user config form) + save_config({ + "model": { + "default": "anthropic/claude-sonnet-4", + "provider": "openrouter", + "base_url": "https://openrouter.ai/api/v1", + "api_mode": "openai", + } + }) + + before = load_config() + assert isinstance(before.get("model"), dict) + original_keys = set(before["model"].keys()) + + # GET โ†’ PUT unchanged + web_config = self.client.get("/api/config").json() + assert isinstance(web_config.get("model"), str), "GET should normalize model to string" + + self.client.put("/api/config", json={"config": web_config}) + + after = load_config() + assert isinstance(after.get("model"), dict), "model should still be a dict after save" + assert set(after["model"].keys()) >= original_keys, \ + f"Lost model subkeys: {original_keys - set(after['model'].keys())}" + + def test_edit_model_name_preserved(self): + """Changing the model string should update model.default on disk.""" + from hermes_cli.config import load_config + + web_config = self.client.get("/api/config").json() + original_model = web_config["model"] + + # Change model + web_config["model"] = "test/editing-model" + self.client.put("/api/config", json={"config": web_config}) + + after = load_config() + if isinstance(after.get("model"), dict): + assert after["model"]["default"] == "test/editing-model" + else: + assert after["model"] == "test/editing-model" + + # Restore + web_config["model"] = original_model + self.client.put("/api/config", json={"config": web_config}) + + def test_edit_nested_value(self): + """Editing a nested config value should persist correctly.""" + from hermes_cli.config import load_config + + web_config = self.client.get("/api/config").json() + original_turns = web_config.get("agent", {}).get("max_turns") + + # Change max_turns + if "agent" not in web_config: + web_config["agent"] = {} + web_config["agent"]["max_turns"] = 42 + + self.client.put("/api/config", json={"config": web_config}) + + after = load_config() + assert after.get("agent", {}).get("max_turns") == 42 + + # Restore + web_config["agent"]["max_turns"] = original_turns + self.client.put("/api/config", json={"config": web_config}) + + def test_schema_types_match_config_values(self): + """Every schema field should have a matching-type value in the config.""" + config = self.client.get("/api/config").json() + schema_resp = self.client.get("/api/config/schema").json() + schema = schema_resp["fields"] + + def get_nested(obj, path): + parts = path.split(".") + cur = obj + for p in parts: + if cur is None or not isinstance(cur, dict): + return None + cur = cur.get(p) + return cur + + mismatches = [] + for key, entry in schema.items(): + val = get_nested(config, key) + if val is None: + continue # not set in user config โ€” fine + expected = entry["type"] + if expected in ("string", "select") and not isinstance(val, str): + mismatches.append(f"{key}: expected str, got {type(val).__name__}") + elif expected == "number" and not isinstance(val, (int, float)): + mismatches.append(f"{key}: expected number, got {type(val).__name__}") + elif expected == "boolean" and not isinstance(val, bool): + mismatches.append(f"{key}: expected bool, got {type(val).__name__}") + elif expected == "list" and not isinstance(val, list): + mismatches.append(f"{key}: expected list, got {type(val).__name__}") + assert not mismatches, f"Type mismatches:\n" + "\n".join(mismatches) + + +# --------------------------------------------------------------------------- +# New feature endpoint tests +# --------------------------------------------------------------------------- + + +class TestNewEndpoints: + """Tests for session detail, logs, cron, skills, tools, raw config, analytics.""" + + @pytest.fixture(autouse=True) + def _setup(self): + try: + from starlette.testclient import TestClient + except ImportError: + pytest.skip("fastapi/starlette not installed") + from hermes_cli.web_server import app + self.client = TestClient(app) + + def test_get_logs_default(self): + resp = self.client.get("/api/logs") + assert resp.status_code == 200 + data = resp.json() + assert "file" in data + assert "lines" in data + assert isinstance(data["lines"], list) + + def test_get_logs_invalid_file(self): + resp = self.client.get("/api/logs?file=nonexistent") + assert resp.status_code == 400 + + def test_cron_list(self): + resp = self.client.get("/api/cron/jobs") + assert resp.status_code == 200 + assert isinstance(resp.json(), list) + + def test_cron_job_not_found(self): + resp = self.client.get("/api/cron/jobs/nonexistent-id") + assert resp.status_code == 404 + + def test_skills_list(self): + resp = self.client.get("/api/skills") + assert resp.status_code == 200 + skills = resp.json() + assert isinstance(skills, list) + if skills: + assert "name" in skills[0] + assert "enabled" in skills[0] + + def test_skills_list_includes_disabled_skills(self, monkeypatch): + import tools.skills_tool as skills_tool + import hermes_cli.skills_config as skills_config + import hermes_cli.web_server as web_server + + def _fake_find_all_skills(*, skip_disabled=False): + if skip_disabled: + return [ + {"name": "active-skill", "description": "active", "category": "demo"}, + {"name": "disabled-skill", "description": "disabled", "category": "demo"}, + ] + return [ + {"name": "active-skill", "description": "active", "category": "demo"}, + ] + + monkeypatch.setattr(skills_tool, "_find_all_skills", _fake_find_all_skills) + monkeypatch.setattr(skills_config, "get_disabled_skills", lambda config: {"disabled-skill"}) + monkeypatch.setattr(web_server, "load_config", lambda: {"skills": {"disabled": ["disabled-skill"]}}) + + resp = self.client.get("/api/skills") + + assert resp.status_code == 200 + assert resp.json() == [ + { + "name": "active-skill", + "description": "active", + "category": "demo", + "enabled": True, + }, + { + "name": "disabled-skill", + "description": "disabled", + "category": "demo", + "enabled": False, + }, + ] + + def test_toolsets_list(self): + resp = self.client.get("/api/tools/toolsets") + assert resp.status_code == 200 + toolsets = resp.json() + assert isinstance(toolsets, list) + if toolsets: + assert "name" in toolsets[0] + assert "label" in toolsets[0] + assert "enabled" in toolsets[0] + + def test_toolsets_list_matches_cli_enabled_state(self, monkeypatch): + import hermes_cli.tools_config as tools_config + import toolsets as toolsets_module + import hermes_cli.web_server as web_server + + monkeypatch.setattr( + tools_config, + "_get_effective_configurable_toolsets", + lambda: [ + ("web", "๐Ÿ” Web Search & Scraping", "web_search, web_extract"), + ("skills", "๐Ÿ“š Skills", "list, view, manage"), + ("memory", "๐Ÿ’พ Memory", "persistent memory across sessions"), + ], + ) + monkeypatch.setattr( + tools_config, + "_get_platform_tools", + lambda config, platform, include_default_mcp_servers=False: {"web", "skills"}, + ) + monkeypatch.setattr( + tools_config, + "_toolset_has_keys", + lambda ts_key, config=None: ts_key != "web", + ) + monkeypatch.setattr( + toolsets_module, + "resolve_toolset", + lambda name: { + "web": ["web_search", "web_extract"], + "skills": ["skills_list", "skill_view"], + "memory": ["memory_read"], + }[name], + ) + monkeypatch.setattr(web_server, "load_config", lambda: {"platform_toolsets": {"cli": ["web", "skills"]}}) + + resp = self.client.get("/api/tools/toolsets") + + assert resp.status_code == 200 + assert resp.json() == [ + { + "name": "web", + "label": "๐Ÿ” Web Search & Scraping", + "description": "web_search, web_extract", + "enabled": True, + "available": True, + "configured": False, + "tools": ["web_extract", "web_search"], + }, + { + "name": "skills", + "label": "๐Ÿ“š Skills", + "description": "list, view, manage", + "enabled": True, + "available": True, + "configured": True, + "tools": ["skill_view", "skills_list"], + }, + { + "name": "memory", + "label": "๐Ÿ’พ Memory", + "description": "persistent memory across sessions", + "enabled": False, + "available": False, + "configured": True, + "tools": ["memory_read"], + }, + ] + + def test_config_raw_get(self): + resp = self.client.get("/api/config/raw") + assert resp.status_code == 200 + assert "yaml" in resp.json() + + def test_config_raw_put_valid(self): + resp = self.client.put( + "/api/config/raw", + json={"yaml_text": "model: test\ntoolsets:\n - all\n"}, + ) + assert resp.status_code == 200 + assert resp.json()["ok"] is True + + def test_config_raw_put_invalid(self): + resp = self.client.put( + "/api/config/raw", + json={"yaml_text": "- this is a list not a dict"}, + ) + assert resp.status_code == 400 + + def test_analytics_usage(self): + resp = self.client.get("/api/analytics/usage?days=7") + assert resp.status_code == 200 + data = resp.json() + assert "daily" in data + assert "by_model" in data + assert "totals" in data + assert isinstance(data["daily"], list) + assert "total_sessions" in data["totals"] + + def test_session_token_endpoint(self): + from hermes_cli.web_server import _SESSION_TOKEN + resp = self.client.get("/api/auth/session-token") + assert resp.status_code == 200 + assert resp.json()["token"] == _SESSION_TOKEN diff --git a/tests/run_agent/test_anthropic_error_handling.py b/tests/run_agent/test_anthropic_error_handling.py index 3d7660aa8..00055928e 100644 --- a/tests/run_agent/test_anthropic_error_handling.py +++ b/tests/run_agent/test_anthropic_error_handling.py @@ -102,7 +102,19 @@ class _PromptTooLongError(Exception): self.status_code = 400 +class _FakeMessages: + """Stub for client.messages.create() / client.messages.stream().""" + def create(self, **kwargs): + raise NotImplementedError("_FakeAnthropicClient.messages.create should not be called directly in tests") + + def stream(self, **kwargs): + raise NotImplementedError("_FakeAnthropicClient.messages.stream should not be called directly in tests") + + class _FakeAnthropicClient: + def __init__(self): + self.messages = _FakeMessages() + def close(self): pass @@ -131,13 +143,14 @@ def _make_agent_cls(error_cls, recover_after=None): def run_conversation(self, user_message, conversation_history=None, task_id=None): calls = {"n": 0} - def _fake_api_call(api_kwargs): + def _fake_api_call(api_kwargs, **kw): calls["n"] += 1 if recover_after is not None and calls["n"] > recover_after: return _anthropic_response("Recovered") raise error_cls() self._interruptible_api_call = _fake_api_call + self._interruptible_streaming_api_call = _fake_api_call return super().run_conversation( user_message, conversation_history=conversation_history, task_id=task_id ) @@ -352,10 +365,11 @@ def test_401_refresh_fails_is_non_retryable(monkeypatch): return False # Simulate failed credential refresh def run_conversation(self, user_message, conversation_history=None, task_id=None): - def _fake_api_call(api_kwargs): + def _fake_api_call(api_kwargs, **kw): raise _UnauthorizedError() self._interruptible_api_call = _fake_api_call + self._interruptible_streaming_api_call = _fake_api_call return super().run_conversation( user_message, conversation_history=conversation_history, task_id=task_id ) @@ -436,13 +450,14 @@ def test_prompt_too_long_triggers_compression(monkeypatch): def run_conversation(self, user_message, conversation_history=None, task_id=None): calls = {"n": 0} - def _fake_api_call(api_kwargs): + def _fake_api_call(api_kwargs, **kw): calls["n"] += 1 if calls["n"] == 1: raise _PromptTooLongError() return _anthropic_response("Compressed and recovered") self._interruptible_api_call = _fake_api_call + self._interruptible_streaming_api_call = _fake_api_call return super().run_conversation( user_message, conversation_history=conversation_history, task_id=task_id ) diff --git a/tests/run_agent/test_compression_feasibility.py b/tests/run_agent/test_compression_feasibility.py index 0738b1d43..0756fcda6 100644 --- a/tests/run_agent/test_compression_feasibility.py +++ b/tests/run_agent/test_compression_feasibility.py @@ -38,6 +38,7 @@ def _make_agent( agent.status_callback = None agent.tool_progress_callback = None agent._compression_warning = None + agent.config = None compressor = MagicMock(spec=ContextCompressor) compressor.context_length = main_context @@ -130,6 +131,64 @@ def test_feasibility_check_passes_live_main_runtime(): ) +@patch("agent.model_metadata.get_model_context_length", return_value=1_000_000) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_feasibility_check_passes_config_context_length(mock_get_client, mock_ctx_len): + """auxiliary.compression.context_length from config is forwarded to + get_model_context_length so custom endpoints that lack /models still + report the correct context window (fixes #8499).""" + agent = _make_agent(main_context=200_000, threshold_percent=0.85) + agent.config = { + "auxiliary": { + "compression": { + "context_length": 1_000_000, + }, + }, + } + mock_client = MagicMock() + mock_client.base_url = "http://custom-endpoint:8080/v1" + mock_client.api_key = "sk-custom" + mock_get_client.return_value = (mock_client, "custom/big-model") + + agent._emit_status = lambda msg: None + agent._check_compression_model_feasibility() + + mock_ctx_len.assert_called_once_with( + "custom/big-model", + base_url="http://custom-endpoint:8080/v1", + api_key="sk-custom", + config_context_length=1_000_000, + ) + + +@patch("agent.model_metadata.get_model_context_length", return_value=128_000) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_feasibility_check_ignores_invalid_context_length(mock_get_client, mock_ctx_len): + """Non-integer context_length in config is silently ignored.""" + agent = _make_agent(main_context=200_000, threshold_percent=0.50) + agent.config = { + "auxiliary": { + "compression": { + "context_length": "not-a-number", + }, + }, + } + mock_client = MagicMock() + mock_client.base_url = "http://custom:8080/v1" + mock_client.api_key = "sk-test" + mock_get_client.return_value = (mock_client, "custom/model") + + agent._emit_status = lambda msg: None + agent._check_compression_model_feasibility() + + mock_ctx_len.assert_called_once_with( + "custom/model", + base_url="http://custom:8080/v1", + api_key="sk-test", + config_context_length=None, + ) + + @patch("agent.auxiliary_client.get_text_auxiliary_client") def test_warns_when_no_auxiliary_provider(mock_get_client): """Warning emitted when no auxiliary provider is configured.""" diff --git a/tests/run_agent/test_context_token_tracking.py b/tests/run_agent/test_context_token_tracking.py index 377a04a5d..b924448b6 100644 --- a/tests/run_agent/test_context_token_tracking.py +++ b/tests/run_agent/test_context_token_tracking.py @@ -56,6 +56,7 @@ def _make_agent(monkeypatch, api_mode, provider, response_fn): def run_conversation(self, msg, conversation_history=None, task_id=None): self._interruptible_api_call = lambda kw: response_fn() + self._disable_streaming = True return super().run_conversation(msg, conversation_history=conversation_history, task_id=task_id) return _A(model="test-model", api_key="test-key", provider=provider, api_mode=api_mode) diff --git a/tests/run_agent/test_dict_tool_call_args.py b/tests/run_agent/test_dict_tool_call_args.py index e8b4d70fa..61ee6fc5c 100644 --- a/tests/run_agent/test_dict_tool_call_args.py +++ b/tests/run_agent/test_dict_tool_call_args.py @@ -66,6 +66,7 @@ def test_tool_call_validation_accepts_dict_arguments(monkeypatch): quiet_mode=True, skip_memory=True, ) + agent._disable_streaming = True result = agent.run_conversation("read the file") diff --git a/tests/run_agent/test_plugin_context_engine_init.py b/tests/run_agent/test_plugin_context_engine_init.py new file mode 100644 index 000000000..7583d9e75 --- /dev/null +++ b/tests/run_agent/test_plugin_context_engine_init.py @@ -0,0 +1,89 @@ +"""Tests that plugin context engines get update_model() called during init. + +Regression test for #9071 โ€” plugin engines were never initialized with +context_length, causing the CLI status bar to show 'ctx --'. +""" + +from unittest.mock import MagicMock, patch + +from agent.context_engine import ContextEngine + + +class _StubEngine(ContextEngine): + """Minimal concrete context engine for testing.""" + + @property + def name(self) -> str: + return "stub" + + def update_from_response(self, usage): + pass + + def should_compress(self, prompt_tokens=None): + return False + + def compress(self, messages, current_tokens=None): + return messages + + +def test_plugin_engine_gets_context_length_on_init(): + """Plugin context engine should have context_length set during AIAgent init.""" + engine = _StubEngine() + assert engine.context_length == 0 # ABC default before fix + + cfg = {"context": {"engine": "stub"}, "agent": {}} + + with ( + patch("hermes_cli.config.load_config", return_value=cfg), + patch("plugins.context_engine.load_context_engine", return_value=engine), + patch("agent.model_metadata.get_model_context_length", return_value=204_800), + patch("run_agent.get_tool_definitions", return_value=[]), + patch("run_agent.check_toolset_requirements", return_value={}), + patch("run_agent.OpenAI"), + ): + from run_agent import AIAgent + + agent = AIAgent( + api_key="test-key-1234567890", + quiet_mode=True, + skip_context_files=True, + skip_memory=True, + ) + + assert agent.context_compressor is engine + assert engine.context_length == 204_800 + assert engine.threshold_tokens == int(204_800 * engine.threshold_percent) + + +def test_plugin_engine_update_model_args(): + """Verify update_model() receives model, context_length, base_url, api_key, provider.""" + engine = _StubEngine() + engine.update_model = MagicMock() + + cfg = {"context": {"engine": "stub"}, "agent": {}} + + with ( + patch("hermes_cli.config.load_config", return_value=cfg), + patch("plugins.context_engine.load_context_engine", return_value=engine), + patch("agent.model_metadata.get_model_context_length", return_value=131_072), + patch("run_agent.get_tool_definitions", return_value=[]), + patch("run_agent.check_toolset_requirements", return_value={}), + patch("run_agent.OpenAI"), + ): + from run_agent import AIAgent + + agent = AIAgent( + model="openrouter/auto", + api_key="test-key-1234567890", + quiet_mode=True, + skip_context_files=True, + skip_memory=True, + ) + + engine.update_model.assert_called_once() + kw = engine.update_model.call_args.kwargs + assert kw["context_length"] == 131_072 + assert "model" in kw + assert "provider" in kw + # Should NOT pass api_mode โ€” the ABC doesn't accept it + assert "api_mode" not in kw diff --git a/tests/run_agent/test_provider_parity.py b/tests/run_agent/test_provider_parity.py index 067ecf672..c0c62b01b 100644 --- a/tests/run_agent/test_provider_parity.py +++ b/tests/run_agent/test_provider_parity.py @@ -44,11 +44,11 @@ class _FakeOpenAI: pass -def _make_agent(monkeypatch, provider, api_mode="chat_completions", base_url="https://openrouter.ai/api/v1"): +def _make_agent(monkeypatch, provider, api_mode="chat_completions", base_url="https://openrouter.ai/api/v1", model=None): monkeypatch.setattr("run_agent.get_tool_definitions", lambda **kw: _tool_defs("web_search", "terminal")) monkeypatch.setattr("run_agent.check_toolset_requirements", lambda: {}) monkeypatch.setattr("run_agent.OpenAI", _FakeOpenAI) - return AIAgent( + kwargs = dict( api_key="test-key", base_url=base_url, provider=provider, @@ -58,6 +58,9 @@ def _make_agent(monkeypatch, provider, api_mode="chat_completions", base_url="ht skip_context_files=True, skip_memory=True, ) + if model: + kwargs["model"] = model + return AIAgent(**kwargs) # โ”€โ”€ _build_api_kwargs tests โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ @@ -247,7 +250,7 @@ class TestBuildApiKwargsChatCompletionsServiceTier: class TestBuildApiKwargsAIGateway: def test_uses_chat_completions_format(self, monkeypatch): - agent = _make_agent(monkeypatch, "ai-gateway", base_url="https://ai-gateway.vercel.sh/v1") + agent = _make_agent(monkeypatch, "ai-gateway", base_url="https://ai-gateway.vercel.sh/v1", model="gpt-4o") messages = [{"role": "user", "content": "hi"}] kwargs = agent._build_api_kwargs(messages) assert "messages" in kwargs @@ -255,7 +258,7 @@ class TestBuildApiKwargsAIGateway: assert kwargs["messages"][-1]["content"] == "hi" def test_no_responses_api_fields(self, monkeypatch): - agent = _make_agent(monkeypatch, "ai-gateway", base_url="https://ai-gateway.vercel.sh/v1") + agent = _make_agent(monkeypatch, "ai-gateway", base_url="https://ai-gateway.vercel.sh/v1", model="gpt-4o") messages = [{"role": "user", "content": "hi"}] kwargs = agent._build_api_kwargs(messages) assert "input" not in kwargs @@ -263,7 +266,7 @@ class TestBuildApiKwargsAIGateway: assert "store" not in kwargs def test_includes_reasoning_in_extra_body(self, monkeypatch): - agent = _make_agent(monkeypatch, "ai-gateway", base_url="https://ai-gateway.vercel.sh/v1") + agent = _make_agent(monkeypatch, "ai-gateway", base_url="https://ai-gateway.vercel.sh/v1", model="gpt-4o") messages = [{"role": "user", "content": "hi"}] kwargs = agent._build_api_kwargs(messages) extra = kwargs.get("extra_body", {}) @@ -271,7 +274,7 @@ class TestBuildApiKwargsAIGateway: assert extra["reasoning"]["enabled"] is True def test_includes_tools(self, monkeypatch): - agent = _make_agent(monkeypatch, "ai-gateway", base_url="https://ai-gateway.vercel.sh/v1") + agent = _make_agent(monkeypatch, "ai-gateway", base_url="https://ai-gateway.vercel.sh/v1", model="gpt-4o") messages = [{"role": "user", "content": "hi"}] kwargs = agent._build_api_kwargs(messages) assert "tools" in kwargs diff --git a/tests/run_agent/test_real_interrupt_subagent.py b/tests/run_agent/test_real_interrupt_subagent.py index e0e681cdf..39b4c58e2 100644 --- a/tests/run_agent/test_real_interrupt_subagent.py +++ b/tests/run_agent/test_real_interrupt_subagent.py @@ -76,7 +76,8 @@ class TestRealSubagentInterrupt(unittest.TestCase): parent._delegate_spinner = None parent.tool_progress_callback = None parent.iteration_budget = IterationBudget(max_total=100) - parent._client_kwargs = {"api_key": "test", "base_url": "http://localhost:1"} + parent._client_kwargs = {"api_key": "***", "base_url": "http://localhost:1"} + parent._execution_thread_id = None from tools.delegate_tool import _run_single_child diff --git a/tests/run_agent/test_run_agent.py b/tests/run_agent/test_run_agent.py index d716b59b2..568077fd7 100644 --- a/tests/run_agent/test_run_agent.py +++ b/tests/run_agent/test_run_agent.py @@ -302,6 +302,17 @@ class TestStripThinkBlocks: assert "" not in result assert "visible" in result + def test_thought_block_removed(self, agent): + """Gemma 4 uses tags for inline reasoning.""" + result = agent._strip_think_blocks("internal reasoning answer") + assert "internal reasoning" not in result + assert "" not in result + assert "answer" in result + + def test_orphaned_thought_tag(self, agent): + result = agent._strip_think_blocks("orphaned reasoning without close") + assert "" not in result + class TestExtractReasoning: def test_reasoning_field(self, agent): @@ -869,6 +880,7 @@ class TestBuildApiKwargs: assert kwargs["extra_body"]["reasoning"] == {"enabled": False} def test_reasoning_not_sent_for_unsupported_openrouter_model(self, agent): + agent.base_url = "https://openrouter.ai/api/v1" agent.model = "minimax/minimax-m2.5" messages = [{"role": "user", "content": "hi"}] kwargs = agent._build_api_kwargs(messages) @@ -1564,6 +1576,7 @@ class TestHandleMaxIterations: assert "API down" in result def test_summary_skips_reasoning_for_unsupported_openrouter_model(self, agent): + agent.base_url = "https://openrouter.ai/api/v1" agent.model = "minimax/minimax-m2.5" resp = _mock_response(content="Summary") agent.client.chat.completions.create.return_value = resp @@ -1694,27 +1707,6 @@ class TestRunConversation: assert result["completed"] is True assert result["api_calls"] == 2 - def test_inline_think_blocks_reasoning_only_accepted(self, agent): - """Inline reasoning-only responses accepted with (empty) content, no retries.""" - self._setup_agent(agent) - empty_resp = _mock_response( - content="internal reasoning", - finish_reason="stop", - ) - agent.client.chat.completions.create.side_effect = [empty_resp] - with ( - patch.object(agent, "_persist_session"), - patch.object(agent, "_save_trajectory"), - patch.object(agent, "_cleanup_task_resources"), - ): - result = agent.run_conversation("answer me") - assert result["completed"] is True - assert result["final_response"] == "(empty)" - assert result["api_calls"] == 1 # no retries - # Reasoning should be preserved in the assistant message - assistant_msgs = [m for m in result["messages"] if m.get("role") == "assistant"] - assert any(m.get("reasoning") for m in assistant_msgs) - def test_reasoning_only_local_resumed_no_compression_triggered(self, agent): """Reasoning-only responses no longer trigger compression โ€” prefill then accepted.""" self._setup_agent(agent) @@ -1730,9 +1722,9 @@ class TestRunConversation: {"role": "assistant", "content": "old answer"}, ] - # 3 responses: original + 2 prefill continuations (structured reasoning triggers prefill) + # 6 responses: original + 2 prefill + 3 retries after prefill exhaustion with ( - patch.object(agent, "_interruptible_api_call", side_effect=[empty_resp, empty_resp, empty_resp]), + patch.object(agent, "_interruptible_api_call", side_effect=[empty_resp] * 6), patch.object(agent, "_compress_context") as mock_compress, patch.object(agent, "_persist_session"), patch.object(agent, "_save_trajectory"), @@ -1743,18 +1735,18 @@ class TestRunConversation: mock_compress.assert_not_called() # no compression triggered assert result["completed"] is True assert result["final_response"] == "(empty)" - assert result["api_calls"] == 3 # 1 original + 2 prefill continuations + assert result["api_calls"] == 6 # 1 original + 2 prefill + 3 retries def test_reasoning_only_response_prefill_then_empty(self, agent): - """Structured reasoning-only triggers prefill continuation (up to 2), then falls through to (empty).""" + """Structured reasoning-only triggers prefill (2), then retries (3), then (empty).""" self._setup_agent(agent) empty_resp = _mock_response( content=None, finish_reason="stop", reasoning_content="structured reasoning answer", ) - # 3 responses: original + 2 prefill continuations, all reasoning-only - agent.client.chat.completions.create.side_effect = [empty_resp, empty_resp, empty_resp] + # 6 responses: 1 original + 2 prefill + 3 retries after prefill exhaustion + agent.client.chat.completions.create.side_effect = [empty_resp] * 6 with ( patch.object(agent, "_persist_session"), patch.object(agent, "_save_trajectory"), @@ -1763,7 +1755,7 @@ class TestRunConversation: result = agent.run_conversation("answer me") assert result["completed"] is True assert result["final_response"] == "(empty)" - assert result["api_calls"] == 3 # 1 original + 2 prefill continuations + assert result["api_calls"] == 6 # 1 original + 2 prefill + 3 retries def test_reasoning_only_prefill_succeeds_on_continuation(self, agent): """When prefill continuation produces content, it becomes the final response.""" @@ -1938,6 +1930,88 @@ class TestRunConversation: failure_msgs = [m for m in status_messages if "no content" in m.lower() or "no fallback" in m.lower()] assert len(failure_msgs) >= 1, f"Expected at least 1 failure status, got: {status_messages}" + def test_partial_stream_recovery_uses_streamed_content(self, agent): + """When streaming fails after partial delivery, recovered partial content becomes final response.""" + self._setup_agent(agent) + # Simulate a partial-stream-stub response: content recovered from streaming + partial_resp = _mock_response( + content="Here is the partial answer that was stream", + finish_reason="stop", + ) + agent.client.chat.completions.create.return_value = partial_resp + # Simulate that streaming had already delivered this text + agent._current_streamed_assistant_text = "Here is the partial answer that was stream" + with ( + patch.object(agent, "_persist_session"), + patch.object(agent, "_save_trajectory"), + patch.object(agent, "_cleanup_task_resources"), + ): + result = agent.run_conversation("explain something") + # The partial content should be used as-is (not empty, not retried) + assert result["completed"] is True + assert result["final_response"] == "Here is the partial answer that was stream" + assert result["api_calls"] == 1 # No retries + + def test_partial_stream_recovery_on_empty_stub(self, agent): + """When stub response has no content but text was streamed, use streamed text.""" + self._setup_agent(agent) + # Stub response with no content (old behavior before fix) + empty_stub = _mock_response(content=None, finish_reason="stop") + + def _fake_api_call(api_kwargs): + # Simulate what streaming does: accumulate text before returning + # a stub with no content (connection died mid-stream) + agent._current_streamed_assistant_text = "The answer to your question is that" + return empty_stub + + status_messages = [] + + def _capture_status(msg): + status_messages.append(msg) + + with ( + patch.object(agent, "_interruptible_api_call", side_effect=_fake_api_call), + patch.object(agent, "_persist_session"), + patch.object(agent, "_save_trajectory"), + patch.object(agent, "_cleanup_task_resources"), + patch.object(agent, "_emit_status", side_effect=_capture_status), + ): + result = agent.run_conversation("ask me") + # Should recover partial streamed content, not fall through to (empty) + assert result["completed"] is True + assert result["final_response"] == "The answer to your question is that" + assert result["api_calls"] == 1 # No wasted retries + # Should emit the stream-interrupted status, NOT the empty-retry status + recovery_msgs = [m for m in status_messages if "stream interrupted" in m.lower()] + assert len(recovery_msgs) >= 1, f"Expected stream recovery status, got: {status_messages}" + # Should NOT have retry statuses + retry_msgs = [m for m in status_messages if "retrying" in m.lower()] + assert len(retry_msgs) == 0, f"Should not retry when stream content exists: {status_messages}" + + def test_partial_stream_recovery_preempts_prior_turn_fallback(self, agent): + """Partial streamed content takes priority over _last_content_with_tools fallback.""" + self._setup_agent(agent) + # Set up the prior-turn fallback content (from a previous turn with tool calls) + agent._last_content_with_tools = "Old content from prior turn with tools" + # Stub response with no content + empty_stub = _mock_response(content=None, finish_reason="stop") + + def _fake_api_call(api_kwargs): + # Simulate partial streaming before connection death + agent._current_streamed_assistant_text = "Fresh partial content from this turn" + return empty_stub + + with ( + patch.object(agent, "_interruptible_api_call", side_effect=_fake_api_call), + patch.object(agent, "_persist_session"), + patch.object(agent, "_save_trajectory"), + patch.object(agent, "_cleanup_task_resources"), + ): + result = agent.run_conversation("question") + # Should use the streamed content, not the old prior-turn fallback + assert result["final_response"] == "Fresh partial content from this turn" + assert result["api_calls"] == 1 + def test_nous_401_refreshes_after_remint_and_retries(self, agent): self._setup_agent(agent) agent.provider = "nous" @@ -3426,8 +3500,8 @@ class TestStreamingApiCall: call_kwargs = agent.client.chat.completions.create.call_args assert call_kwargs[1].get("stream") is True or call_kwargs.kwargs.get("stream") is True - def test_api_exception_falls_back_to_non_streaming(self, agent): - """When streaming fails before any deltas, fallback to non-streaming is attempted.""" + def test_api_exception_propagates_no_non_streaming_fallback(self, agent): + """When streaming fails before any deltas, error propagates to the main retry loop.""" agent.client.chat.completions.create.side_effect = ConnectionError("fail") # Prevent stream retry logic from replacing the mock client with patch.object(agent, "_replace_primary_openai_client", return_value=False): diff --git a/tests/run_agent/test_run_agent_codex_responses.py b/tests/run_agent/test_run_agent_codex_responses.py index 533a85ac8..0fca9e4df 100644 --- a/tests/run_agent/test_run_agent_codex_responses.py +++ b/tests/run_agent/test_run_agent_codex_responses.py @@ -243,6 +243,22 @@ def test_api_mode_respects_explicit_openrouter_provider_over_codex_url(monkeypat assert agent.provider == "openrouter" +def test_copilot_acp_stays_on_chat_completions_for_gpt_5_models(monkeypatch): + _patch_agent_bootstrap(monkeypatch) + agent = run_agent.AIAgent( + model="gpt-5.4-mini", + base_url="acp://copilot", + provider="copilot-acp", + api_key="copilot-acp", + quiet_mode=True, + max_iterations=1, + skip_context_files=True, + skip_memory=True, + ) + assert agent.provider == "copilot-acp" + assert agent.api_mode == "chat_completions" + + def test_build_api_kwargs_codex(monkeypatch): agent = _build_agent(monkeypatch) kwargs = agent._build_api_kwargs( diff --git a/tests/run_agent/test_streaming.py b/tests/run_agent/test_streaming.py index 37a61ac37..97dcffc67 100644 --- a/tests/run_agent/test_streaming.py +++ b/tests/run_agent/test_streaming.py @@ -291,6 +291,38 @@ class TestStreamingCallbacks: assert len(first_delta_calls) == 1 + @patch("run_agent.AIAgent._create_request_openai_client") + @patch("run_agent.AIAgent._close_request_openai_client") + def test_chat_stream_refreshes_activity_on_every_chunk(self, mock_close, mock_create): + """Each streamed chat chunk should refresh the activity timestamp.""" + from run_agent import AIAgent + + chunks = [ + _make_stream_chunk(content="a"), + _make_stream_chunk(content="b"), + _make_stream_chunk(finish_reason="stop"), + ] + + mock_client = MagicMock() + mock_client.chat.completions.create.return_value = iter(chunks) + mock_create.return_value = mock_client + + agent = AIAgent( + model="test/model", + quiet_mode=True, + skip_context_files=True, + skip_memory=True, + ) + agent.api_mode = "chat_completions" + agent._interrupt_requested = False + + touch_calls = [] + agent._touch_activity = lambda desc: touch_calls.append(desc) + + agent._interruptible_streaming_api_call({}) + + assert touch_calls.count("receiving stream response") == len(chunks) + @patch("run_agent.AIAgent._create_request_openai_client") @patch("run_agent.AIAgent._close_request_openai_client") def test_tool_only_does_not_fire_callback(self, mock_close, mock_create): @@ -374,13 +406,19 @@ class TestStreamingCallbacks: class TestStreamingFallback: - """Verify fallback to non-streaming on ANY streaming error.""" + """Verify streaming errors propagate to the main retry loop. + + Previously, streaming errors triggered an inline fallback to + non-streaming. Now they propagate so the main retry loop can apply + richer recovery (credential rotation, provider fallback, backoff). + The only special case: 'stream not supported' sets _disable_streaming + so the *next* main-loop retry uses non-streaming automatically. + """ - @patch("run_agent.AIAgent._interruptible_api_call") @patch("run_agent.AIAgent._create_request_openai_client") @patch("run_agent.AIAgent._close_request_openai_client") - def test_stream_error_falls_back(self, mock_close, mock_create, mock_non_stream): - """'not supported' error triggers fallback to non-streaming.""" + def test_stream_not_supported_sets_flag_and_raises(self, mock_close, mock_create): + """'not supported' error sets _disable_streaming and propagates.""" from run_agent import AIAgent mock_client = MagicMock() @@ -389,23 +427,6 @@ class TestStreamingFallback: ) mock_create.return_value = mock_client - fallback_response = SimpleNamespace( - id="fallback", - model="test", - choices=[SimpleNamespace( - index=0, - message=SimpleNamespace( - role="assistant", - content="fallback response", - tool_calls=None, - reasoning_content=None, - ), - finish_reason="stop", - )], - usage=None, - ) - mock_non_stream.return_value = fallback_response - agent = AIAgent( model="test/model", quiet_mode=True, @@ -415,16 +436,16 @@ class TestStreamingFallback: agent.api_mode = "chat_completions" agent._interrupt_requested = False - response = agent._interruptible_streaming_api_call({}) + with pytest.raises(Exception, match="Streaming is not supported"): + agent._interruptible_streaming_api_call({}) - assert response.choices[0].message.content == "fallback response" - mock_non_stream.assert_called_once() + # The flag should be set so the main retry loop switches to non-streaming + assert agent._disable_streaming is True - @patch("run_agent.AIAgent._interruptible_api_call") @patch("run_agent.AIAgent._create_request_openai_client") @patch("run_agent.AIAgent._close_request_openai_client") - def test_any_stream_error_falls_back(self, mock_close, mock_create, mock_non_stream): - """ANY streaming error triggers fallback โ€” not just specific messages.""" + def test_non_transport_error_propagates(self, mock_close, mock_create): + """Non-transport streaming errors propagate to the main retry loop.""" from run_agent import AIAgent mock_client = MagicMock() @@ -433,23 +454,6 @@ class TestStreamingFallback: ) mock_create.return_value = mock_client - fallback_response = SimpleNamespace( - id="fallback", - model="test", - choices=[SimpleNamespace( - index=0, - message=SimpleNamespace( - role="assistant", - content="fallback after connection error", - tool_calls=None, - reasoning_content=None, - ), - finish_reason="stop", - )], - usage=None, - ) - mock_non_stream.return_value = fallback_response - agent = AIAgent( model="test/model", quiet_mode=True, @@ -459,24 +463,19 @@ class TestStreamingFallback: agent.api_mode = "chat_completions" agent._interrupt_requested = False - response = agent._interruptible_streaming_api_call({}) + with pytest.raises(Exception, match="Connection reset by peer"): + agent._interruptible_streaming_api_call({}) - assert response.choices[0].message.content == "fallback after connection error" - mock_non_stream.assert_called_once() - - @patch("run_agent.AIAgent._interruptible_api_call") @patch("run_agent.AIAgent._create_request_openai_client") @patch("run_agent.AIAgent._close_request_openai_client") - def test_fallback_error_propagates(self, mock_close, mock_create, mock_non_stream): - """When both streaming AND fallback fail, the fallback error propagates.""" + def test_stream_error_propagates_original(self, mock_close, mock_create): + """The original streaming error propagates (not a fallback error).""" from run_agent import AIAgent mock_client = MagicMock() mock_client.chat.completions.create.side_effect = Exception("stream broke") mock_create.return_value = mock_client - mock_non_stream.side_effect = Exception("Rate limit exceeded") - agent = AIAgent( model="test/model", quiet_mode=True, @@ -486,14 +485,13 @@ class TestStreamingFallback: agent.api_mode = "chat_completions" agent._interrupt_requested = False - with pytest.raises(Exception, match="Rate limit exceeded"): + with pytest.raises(Exception, match="stream broke"): agent._interruptible_streaming_api_call({}) - @patch("run_agent.AIAgent._interruptible_api_call") @patch("run_agent.AIAgent._create_request_openai_client") @patch("run_agent.AIAgent._close_request_openai_client") - def test_exhausted_transient_stream_error_falls_back(self, mock_close, mock_create, mock_non_stream): - """Transient stream errors retry first, then fall back after retries are exhausted.""" + def test_exhausted_transient_stream_error_propagates(self, mock_close, mock_create): + """Transient stream errors retry first, then propagate after retries exhausted.""" from run_agent import AIAgent import httpx @@ -501,23 +499,6 @@ class TestStreamingFallback: mock_client.chat.completions.create.side_effect = httpx.ConnectError("socket closed") mock_create.return_value = mock_client - fallback_response = SimpleNamespace( - id="fallback", - model="test", - choices=[SimpleNamespace( - index=0, - message=SimpleNamespace( - role="assistant", - content="fallback after retries exhausted", - tool_calls=None, - reasoning_content=None, - ), - finish_reason="stop", - )], - usage=None, - ) - mock_non_stream.return_value = fallback_response - agent = AIAgent( model="test/model", quiet_mode=True, @@ -527,23 +508,22 @@ class TestStreamingFallback: agent.api_mode = "chat_completions" agent._interrupt_requested = False - response = agent._interruptible_streaming_api_call({}) + with pytest.raises(httpx.ConnectError, match="socket closed"): + agent._interruptible_streaming_api_call({}) - assert response.choices[0].message.content == "fallback after retries exhausted" + # Should have retried 3 times (default HERMES_STREAM_RETRIES=2 โ†’ 3 attempts) assert mock_client.chat.completions.create.call_count == 3 - mock_non_stream.assert_called_once() assert mock_close.call_count >= 1 - @patch("run_agent.AIAgent._interruptible_api_call") @patch("run_agent.AIAgent._create_request_openai_client") @patch("run_agent.AIAgent._close_request_openai_client") - def test_sse_connection_lost_retried_as_transient(self, mock_close, mock_create, mock_non_stream): + def test_sse_connection_lost_retried_as_transient(self, mock_close, mock_create): """SSE 'Network connection lost' (APIError w/ no status_code) retries like httpx errors. OpenRouter sends {"error":{"message":"Network connection lost."}} as an SSE event when the upstream stream drops. The OpenAI SDK raises APIError from this. It should be retried at the streaming level, same as httpx connection - errors, before falling back to non-streaming. + errors, then propagate to the main retry loop after exhaustion. """ from run_agent import AIAgent import httpx @@ -561,23 +541,6 @@ class TestStreamingFallback: mock_client.chat.completions.create.side_effect = sse_error mock_create.return_value = mock_client - fallback_response = SimpleNamespace( - id="fallback", - model="test", - choices=[SimpleNamespace( - index=0, - message=SimpleNamespace( - role="assistant", - content="fallback after SSE retries", - tool_calls=None, - reasoning_content=None, - ), - finish_reason="stop", - )], - usage=None, - ) - mock_non_stream.return_value = fallback_response - agent = AIAgent( model="test/model", quiet_mode=True, @@ -587,21 +550,18 @@ class TestStreamingFallback: agent.api_mode = "chat_completions" agent._interrupt_requested = False - response = agent._interruptible_streaming_api_call({}) + with pytest.raises(OAIAPIError): + agent._interruptible_streaming_api_call({}) - assert response.choices[0].message.content == "fallback after SSE retries" # Should retry 3 times (default HERMES_STREAM_RETRIES=2 โ†’ 3 attempts) - # before falling back to non-streaming assert mock_client.chat.completions.create.call_count == 3 - mock_non_stream.assert_called_once() # Connection cleanup should happen for each failed retry assert mock_close.call_count >= 2 - @patch("run_agent.AIAgent._interruptible_api_call") @patch("run_agent.AIAgent._create_request_openai_client") @patch("run_agent.AIAgent._close_request_openai_client") - def test_sse_non_connection_error_falls_back_immediately(self, mock_close, mock_create, mock_non_stream): - """SSE errors that aren't connection-related still fall back immediately (no stream retry).""" + def test_sse_non_connection_error_propagates_immediately(self, mock_close, mock_create): + """SSE errors that aren't connection-related propagate immediately (no stream retry).""" from run_agent import AIAgent import httpx @@ -616,23 +576,6 @@ class TestStreamingFallback: mock_client.chat.completions.create.side_effect = sse_error mock_create.return_value = mock_client - fallback_response = SimpleNamespace( - id="fallback", - model="test", - choices=[SimpleNamespace( - index=0, - message=SimpleNamespace( - role="assistant", - content="fallback no retry", - tool_calls=None, - reasoning_content=None, - ), - finish_reason="stop", - )], - usage=None, - ) - mock_non_stream.return_value = fallback_response - agent = AIAgent( model="test/model", quiet_mode=True, @@ -642,12 +585,11 @@ class TestStreamingFallback: agent.api_mode = "chat_completions" agent._interrupt_requested = False - response = agent._interruptible_streaming_api_call({}) + with pytest.raises(OAIAPIError): + agent._interruptible_streaming_api_call({}) - assert response.choices[0].message.content == "fallback no retry" - # Should NOT retry โ€” goes straight to non-streaming fallback + # Should NOT retry โ€” propagates immediately assert mock_client.chat.completions.create.call_count == 1 - mock_non_stream.assert_called_once() # โ”€โ”€ Test: Reasoning Streaming โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ @@ -783,6 +725,55 @@ class TestCodexStreamCallbacks: response = agent._run_codex_stream({}, client=mock_client) assert "Hello from Codex!" in deltas + def test_codex_stream_refreshes_activity_on_every_event(self): + from run_agent import AIAgent + + agent = AIAgent( + model="test/model", + quiet_mode=True, + skip_context_files=True, + skip_memory=True, + ) + agent.api_mode = "codex_responses" + agent._interrupt_requested = False + + touch_calls = [] + agent._touch_activity = lambda desc: touch_calls.append(desc) + + mock_event_text_1 = SimpleNamespace( + type="response.output_text.delta", + delta="Hello", + ) + mock_event_text_2 = SimpleNamespace( + type="response.output_text.delta", + delta=" world", + ) + mock_event_done = SimpleNamespace( + type="response.completed", + delta="", + ) + + mock_stream = MagicMock() + mock_stream.__enter__ = MagicMock(return_value=mock_stream) + mock_stream.__exit__ = MagicMock(return_value=False) + mock_stream.__iter__ = MagicMock( + return_value=iter([mock_event_text_1, mock_event_text_2, mock_event_done]) + ) + mock_stream.get_final_response.return_value = SimpleNamespace( + output=[SimpleNamespace( + type="message", + content=[SimpleNamespace(type="output_text", text="Hello world")], + )], + status="completed", + ) + + mock_client = MagicMock() + mock_client.responses.stream.return_value = mock_stream + + agent._run_codex_stream({}, client=mock_client) + + assert touch_calls.count("receiving stream response") == 3 + def test_codex_remote_protocol_error_falls_back_to_create_stream(self): from run_agent import AIAgent import httpx @@ -814,3 +805,102 @@ class TestCodexStreamCallbacks: assert response is fallback_response mock_fallback.assert_called_once_with({}, client=mock_client) + + def test_codex_create_stream_fallback_refreshes_activity_on_every_event(self): + from run_agent import AIAgent + + agent = AIAgent( + model="test/model", + quiet_mode=True, + skip_context_files=True, + skip_memory=True, + ) + agent.api_mode = "codex_responses" + + touch_calls = [] + agent._touch_activity = lambda desc: touch_calls.append(desc) + + events = [ + SimpleNamespace(type="response.output_text.delta", delta="Hello"), + SimpleNamespace(type="response.output_item.done", item=SimpleNamespace(type="message")), + SimpleNamespace( + type="response.completed", + response=SimpleNamespace( + output=[SimpleNamespace( + type="message", + content=[SimpleNamespace(type="output_text", text="Hello")], + )] + ), + ), + ] + + class _FakeCreateStream: + def __iter__(self_inner): + return iter(events) + + def close(self_inner): + return None + + mock_stream = _FakeCreateStream() + + mock_client = MagicMock() + mock_client.responses.create.return_value = mock_stream + + agent._run_codex_create_stream_fallback( + {"model": "test/model", "instructions": "hi", "input": []}, + client=mock_client, + ) + + assert touch_calls.count("receiving stream response") == len(events) + + +class TestAnthropicStreamCallbacks: + """Verify Anthropic streaming refreshes activity on every event.""" + + def test_anthropic_stream_refreshes_activity_on_every_event(self): + from run_agent import AIAgent + + agent = AIAgent( + model="test/model", + quiet_mode=True, + skip_context_files=True, + skip_memory=True, + ) + agent.api_mode = "anthropic_messages" + agent._interrupt_requested = False + + touch_calls = [] + agent._touch_activity = lambda desc: touch_calls.append(desc) + + events = [ + SimpleNamespace( + type="content_block_delta", + delta=SimpleNamespace(type="text_delta", text="Hello"), + ), + SimpleNamespace( + type="content_block_delta", + delta=SimpleNamespace(type="thinking_delta", thinking="thinking"), + ), + SimpleNamespace( + type="content_block_start", + content_block=SimpleNamespace(type="tool_use", name="terminal"), + ), + ] + + final_message = SimpleNamespace( + content=[], + stop_reason="end_turn", + ) + + mock_stream = MagicMock() + mock_stream.__enter__ = MagicMock(return_value=mock_stream) + mock_stream.__exit__ = MagicMock(return_value=False) + mock_stream.__iter__ = MagicMock(return_value=iter(events)) + mock_stream.get_final_message.return_value = final_message + + agent._anthropic_client = MagicMock() + agent._anthropic_client.messages.stream.return_value = mock_stream + + agent._interruptible_streaming_api_call({}) + + assert touch_calls.count("receiving stream response") == len(events) diff --git a/tests/run_agent/test_unicode_ascii_codec.py b/tests/run_agent/test_unicode_ascii_codec.py index 30fe92e41..fc175696e 100644 --- a/tests/run_agent/test_unicode_ascii_codec.py +++ b/tests/run_agent/test_unicode_ascii_codec.py @@ -9,6 +9,8 @@ import pytest from run_agent import ( _strip_non_ascii, _sanitize_messages_non_ascii, + _sanitize_structure_non_ascii, + _sanitize_tools_non_ascii, _sanitize_messages_surrogates, ) @@ -138,3 +140,66 @@ class TestSurrogateVsAsciiSanitization: """When no surrogates present, _sanitize_messages_surrogates returns False.""" messages = [{"role": "user", "content": "hello โš• world"}] assert _sanitize_messages_surrogates(messages) is False + + +class TestSanitizeToolsNonAscii: + """Tests for _sanitize_tools_non_ascii.""" + + def test_sanitizes_tool_description_and_parameter_descriptions(self): + tools = [ + { + "type": "function", + "function": { + "name": "read_file", + "description": "Print structured output โ”‚ with emoji ๐Ÿค–", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "File path โ”‚ with unicode", + } + }, + }, + }, + } + ] + + assert _sanitize_tools_non_ascii(tools) is True + assert tools[0]["function"]["description"] == "Print structured output with emoji " + assert tools[0]["function"]["parameters"]["properties"]["path"]["description"] == "File path with unicode" + + def test_no_change_for_ascii_only_tools(self): + tools = [ + { + "type": "function", + "function": { + "name": "read_file", + "description": "Read file content", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "File path", + } + }, + }, + }, + } + ] + + assert _sanitize_tools_non_ascii(tools) is False + + +class TestSanitizeStructureNonAscii: + def test_sanitizes_nested_dict_structure(self): + payload = { + "default_headers": { + "X-Title": "Hermes โ”‚ Agent", + "User-Agent": "Hermes/1.0 ๐Ÿค–", + } + } + assert _sanitize_structure_non_ascii(payload) is True + assert payload["default_headers"]["X-Title"] == "Hermes Agent" + assert payload["default_headers"]["User-Agent"] == "Hermes/1.0 " diff --git a/tests/test_ctx_halving_fix.py b/tests/test_ctx_halving_fix.py index 1ba423c8f..0dd3ca4e7 100644 --- a/tests/test_ctx_halving_fix.py +++ b/tests/test_ctx_halving_fix.py @@ -179,6 +179,7 @@ class TestEphemeralMaxOutputTokens: return_value=[{"role": "user", "content": "hi"}] ) agent._anthropic_preserve_dots = MagicMock(return_value=False) + agent.request_overrides = {} return agent def test_ephemeral_override_is_used_on_first_call(self): @@ -253,6 +254,7 @@ class TestContextNotHalvedOnOutputCapError: ) agent._anthropic_preserve_dots = MagicMock(return_value=False) agent._vprint = MagicMock() + agent.request_overrides = {} return agent def test_output_cap_error_sets_ephemeral_not_context_length(self): diff --git a/tests/test_hermes_constants.py b/tests/test_hermes_constants.py index b3438596b..d49dff813 100644 --- a/tests/test_hermes_constants.py +++ b/tests/test_hermes_constants.py @@ -6,7 +6,8 @@ from unittest.mock import patch import pytest -from hermes_constants import get_default_hermes_root +import hermes_constants +from hermes_constants import get_default_hermes_root, is_container class TestGetDefaultHermesRoot: @@ -60,3 +61,53 @@ class TestGetDefaultHermesRoot: monkeypatch.setattr(Path, "home", lambda: tmp_path) monkeypatch.setenv("HERMES_HOME", str(profile)) assert get_default_hermes_root() == docker_root + + +class TestIsContainer: + """Tests for is_container() โ€” Docker/Podman detection.""" + + def _reset_cache(self, monkeypatch): + """Reset the cached detection result before each test.""" + monkeypatch.setattr(hermes_constants, "_container_detected", None) + + def test_detects_dockerenv(self, monkeypatch, tmp_path): + """/.dockerenv triggers container detection.""" + self._reset_cache(monkeypatch) + monkeypatch.setattr(os.path, "exists", lambda p: p == "/.dockerenv") + assert is_container() is True + + def test_detects_containerenv(self, monkeypatch, tmp_path): + """/run/.containerenv triggers container detection (Podman).""" + self._reset_cache(monkeypatch) + monkeypatch.setattr(os.path, "exists", lambda p: p == "/run/.containerenv") + assert is_container() is True + + def test_detects_cgroup_docker(self, monkeypatch, tmp_path): + """/proc/1/cgroup containing 'docker' triggers detection.""" + import builtins + self._reset_cache(monkeypatch) + monkeypatch.setattr(os.path, "exists", lambda p: False) + cgroup_file = tmp_path / "cgroup" + cgroup_file.write_text("12:memory:/docker/abc123\n") + _real_open = builtins.open + monkeypatch.setattr("builtins.open", lambda p, *a, **kw: _real_open(str(cgroup_file), *a, **kw) if p == "/proc/1/cgroup" else _real_open(p, *a, **kw)) + assert is_container() is True + + def test_negative_case(self, monkeypatch, tmp_path): + """Returns False on a regular Linux host.""" + import builtins + self._reset_cache(monkeypatch) + monkeypatch.setattr(os.path, "exists", lambda p: False) + cgroup_file = tmp_path / "cgroup" + cgroup_file.write_text("12:memory:/\n") + _real_open = builtins.open + monkeypatch.setattr("builtins.open", lambda p, *a, **kw: _real_open(str(cgroup_file), *a, **kw) if p == "/proc/1/cgroup" else _real_open(p, *a, **kw)) + assert is_container() is False + + def test_caches_result(self, monkeypatch): + """Second call uses cached value without re-probing.""" + monkeypatch.setattr(hermes_constants, "_container_detected", True) + assert is_container() is True + # Even if we make os.path.exists return False, cached value wins + monkeypatch.setattr(os.path, "exists", lambda p: False) + assert is_container() is True diff --git a/tests/test_hermes_logging.py b/tests/test_hermes_logging.py index 46969d58d..586a4d666 100644 --- a/tests/test_hermes_logging.py +++ b/tests/test_hermes_logging.py @@ -298,8 +298,17 @@ class TestGatewayMode: """agent.log (catch-all) still receives gateway AND tool records.""" hermes_logging.setup_logging(hermes_home=hermes_home, mode="gateway") - logging.getLogger("gateway.run").info("gateway msg") - logging.getLogger("tools.file_tools").info("file msg") + gw_logger = logging.getLogger("gateway.run") + file_logger = logging.getLogger("tools.file_tools") + # Ensure propagation and levels are clean (cross-test pollution defense) + gw_logger.propagate = True + file_logger.propagate = True + logging.getLogger("tools").propagate = True + file_logger.setLevel(logging.NOTSET) + logging.getLogger("tools").setLevel(logging.NOTSET) + + gw_logger.info("gateway msg") + file_logger.info("file msg") for h in logging.getLogger().handlers: h.flush() diff --git a/tests/test_trajectory_compressor_async.py b/tests/test_trajectory_compressor_async.py index 2b276d03d..1c671471d 100644 --- a/tests/test_trajectory_compressor_async.py +++ b/tests/test_trajectory_compressor_async.py @@ -103,7 +103,7 @@ class TestSourceLineVerification: if "self.async_client = AsyncOpenAI(" in line and "_get_async_client" not in lines[max(0,i-3):i+1]: # Allow it inside _get_async_client method # Check if we're inside _get_async_client by looking at context - context = "\n".join(lines[max(0,i-10):i+1]) + context = "\n".join(lines[max(0,i-20):i+1]) if "_get_async_client" not in context: pytest.fail( f"Line {i}: AsyncOpenAI created eagerly outside _get_async_client()" diff --git a/tests/tools/test_browser_camofox_state.py b/tests/tools/test_browser_camofox_state.py index 33a939f09..475e8c2d0 100644 --- a/tests/tools/test_browser_camofox_state.py +++ b/tests/tools/test_browser_camofox_state.py @@ -64,4 +64,4 @@ class TestCamofoxConfigDefaults: # The current schema version is tracked globally; unrelated default # options may bump it after browser defaults are added. - assert DEFAULT_CONFIG["_config_version"] == 15 + assert DEFAULT_CONFIG["_config_version"] == 17 diff --git a/tests/tools/test_file_write_safety.py b/tests/tools/test_file_write_safety.py index 12bc1ccac..e2eef17ab 100644 --- a/tests/tools/test_file_write_safety.py +++ b/tests/tools/test_file_write_safety.py @@ -79,5 +79,33 @@ class TestSafeWriteRoot: assert _is_write_denied(os.path.expanduser("~/.ssh/id_rsa")) is True +class TestCheckSensitivePathMacOSBypass: + """Verify _check_sensitive_path blocks /private/etc paths (issue #8734).""" + + def test_etc_hosts_blocked(self): + from tools.file_tools import _check_sensitive_path + assert _check_sensitive_path("/etc/hosts") is not None + + def test_private_etc_hosts_blocked(self): + from tools.file_tools import _check_sensitive_path + assert _check_sensitive_path("/private/etc/hosts") is not None + + def test_private_etc_ssh_config_blocked(self): + from tools.file_tools import _check_sensitive_path + assert _check_sensitive_path("/private/etc/ssh/sshd_config") is not None + + def test_private_var_blocked(self): + from tools.file_tools import _check_sensitive_path + assert _check_sensitive_path("/private/var/db/something") is not None + + def test_boot_still_blocked(self): + from tools.file_tools import _check_sensitive_path + assert _check_sensitive_path("/boot/grub/grub.cfg") is not None + + def test_safe_path_allowed(self): + from tools.file_tools import _check_sensitive_path + assert _check_sensitive_path("/tmp/safe_file.txt") is None + + if __name__ == "__main__": pytest.main([__file__, "-v"]) diff --git a/tests/tools/test_homeassistant_tool.py b/tests/tools/test_homeassistant_tool.py index b136b5653..654424a0a 100644 --- a/tests/tools/test_homeassistant_tool.py +++ b/tests/tools/test_homeassistant_tool.py @@ -5,6 +5,7 @@ handler validation, and availability gating. """ import json +from unittest.mock import patch import pytest @@ -18,6 +19,7 @@ from tools.homeassistant_tool import ( _handle_call_service, _BLOCKED_DOMAINS, _ENTITY_ID_RE, + _SERVICE_NAME_RE, ) @@ -303,6 +305,147 @@ class TestEntityIdValidation: assert "Invalid entity_id" not in result["error"] +# --------------------------------------------------------------------------- +# String-data deserialization (XML tool calling workaround) +# --------------------------------------------------------------------------- + + +class TestCallServiceStringData: + """data param may arrive as a JSON string (XML tool calling mode).""" + + @patch("tools.homeassistant_tool._run_async", return_value={"success": True}) + def test_string_data_deserialized(self, mock_run): + """JSON string data is parsed into a dict before dispatch.""" + _handle_call_service({ + "domain": "climate", + "service": "set_hvac_mode", + "entity_id": "climate.living_room", + "data": '{"hvac_mode": "heat"}', + }) + call_args = mock_run.call_args[0][0] # the coroutine arg + # _run_async was called, meaning we got past validation + + @patch("tools.homeassistant_tool._run_async", return_value={"success": True}) + def test_dict_data_passthrough(self, mock_run): + """Dict data (JSON tool calling mode) still works unchanged.""" + _handle_call_service({ + "domain": "light", + "service": "turn_on", + "entity_id": "light.bedroom", + "data": {"brightness": 255}, + }) + mock_run.assert_called_once() + + def test_invalid_json_string_returns_error(self): + """Malformed JSON string in data returns a clear error.""" + result = json.loads(_handle_call_service({ + "domain": "light", + "service": "turn_on", + "entity_id": "light.bedroom", + "data": "{not valid json}", + })) + assert "error" in result + assert "Invalid JSON" in result["error"] + + @patch("tools.homeassistant_tool._run_async", return_value={"success": True}) + def test_empty_string_data_becomes_none(self, mock_run): + """Empty/whitespace string data is treated as None.""" + _handle_call_service({ + "domain": "light", + "service": "turn_on", + "entity_id": "light.bedroom", + "data": " ", + }) + mock_run.assert_called_once() + + +# --------------------------------------------------------------------------- +# Security: domain/service name format validation +# --------------------------------------------------------------------------- + + +class TestServiceNameValidation: + """Verify domain/service format validation prevents path traversal in URL. + + The domain and service parameters are interpolated into + /api/services/{domain}/{service}, so allowing arbitrary strings would + enable SSRF via path traversal or blocked-domain bypass. + """ + + def test_valid_domain_names(self): + assert _SERVICE_NAME_RE.match("light") + assert _SERVICE_NAME_RE.match("switch") + assert _SERVICE_NAME_RE.match("climate") + assert _SERVICE_NAME_RE.match("shell_command") + assert _SERVICE_NAME_RE.match("media_player") + + def test_valid_service_names(self): + assert _SERVICE_NAME_RE.match("turn_on") + assert _SERVICE_NAME_RE.match("turn_off") + assert _SERVICE_NAME_RE.match("set_temperature") + assert _SERVICE_NAME_RE.match("toggle") + + def test_path_traversal_in_domain_rejected(self): + assert _SERVICE_NAME_RE.match("../../api/config") is None + assert _SERVICE_NAME_RE.match("light/../../../etc") is None + assert _SERVICE_NAME_RE.match("../config") is None + + def test_path_traversal_in_service_rejected(self): + assert _SERVICE_NAME_RE.match("../../api/config") is None + assert _SERVICE_NAME_RE.match("turn_on/../../config") is None + + def test_blocked_domain_bypass_via_traversal_rejected(self): + """Ensure shell_command/../light is rejected, not just checked against blocklist.""" + assert _SERVICE_NAME_RE.match("shell_command/../light") is None + assert _SERVICE_NAME_RE.match("python_script/../scene") is None + assert _SERVICE_NAME_RE.match("hassio/../automation") is None + + def test_slashes_rejected(self): + assert _SERVICE_NAME_RE.match("light/turn_on") is None + assert _SERVICE_NAME_RE.match("a/b/c") is None + + def test_dots_rejected(self): + assert _SERVICE_NAME_RE.match("light.turn_on") is None + assert _SERVICE_NAME_RE.match("..") is None + + def test_uppercase_rejected(self): + assert _SERVICE_NAME_RE.match("LIGHT") is None + assert _SERVICE_NAME_RE.match("Turn_On") is None + + def test_special_chars_rejected(self): + assert _SERVICE_NAME_RE.match("light;rm") is None + assert _SERVICE_NAME_RE.match("light&cmd") is None + assert _SERVICE_NAME_RE.match("light cmd") is None + + def test_handler_rejects_traversal_domain(self): + """_handle_call_service must reject domain with path traversal.""" + result = json.loads(_handle_call_service({ + "domain": "../../api/config", + "service": "turn_on", + })) + assert "error" in result + assert "Invalid domain" in result["error"] + + def test_handler_rejects_traversal_service(self): + """_handle_call_service must reject service with path traversal.""" + result = json.loads(_handle_call_service({ + "domain": "light", + "service": "../../api/config", + })) + assert "error" in result + assert "Invalid service" in result["error"] + + def test_handler_rejects_blocklist_bypass_traversal(self): + """Blocklist bypass via shell_command/../light must be caught by format validation.""" + result = json.loads(_handle_call_service({ + "domain": "shell_command/../light", + "service": "turn_on", + })) + assert "error" in result + # Must be rejected as "Invalid domain", not slip through the blocklist + assert "Invalid domain" in result["error"] + + # --------------------------------------------------------------------------- # Availability check # --------------------------------------------------------------------------- diff --git a/tests/tools/test_interrupt.py b/tests/tools/test_interrupt.py index dc0ab4599..13b5041d6 100644 --- a/tests/tools/test_interrupt.py +++ b/tests/tools/test_interrupt.py @@ -28,7 +28,7 @@ class TestInterruptModule: assert not is_interrupted() def test_thread_safety(self): - """Set from one thread, check from another.""" + """Set from one thread targeting another thread's ident.""" from tools.interrupt import set_interrupt, is_interrupted set_interrupt(False) @@ -45,11 +45,12 @@ class TestInterruptModule: time.sleep(0.05) assert not seen["value"] - set_interrupt(True) + # Target the checker thread's ident so it sees the interrupt + set_interrupt(True, thread_id=t.ident) t.join(timeout=1) assert seen["value"] - set_interrupt(False) + set_interrupt(False, thread_id=t.ident) # --------------------------------------------------------------------------- @@ -189,10 +190,10 @@ class TestSIGKILLEscalation: t.start() time.sleep(0.5) - set_interrupt(True) + set_interrupt(True, thread_id=t.ident) t.join(timeout=5) - set_interrupt(False) + set_interrupt(False, thread_id=t.ident) assert result_holder["value"] is not None assert result_holder["value"]["returncode"] == 130 diff --git a/tests/tools/test_session_search.py b/tests/tools/test_session_search.py index acb64d62f..852ac7b9e 100644 --- a/tests/tools/test_session_search.py +++ b/tests/tools/test_session_search.py @@ -146,6 +146,40 @@ class TestTruncateAroundMatches: result = _truncate_around_matches(text, "KEYWORD") assert "KEYWORD" in result + def test_multiword_phrase_match_beats_individual_term(self): + """Full phrase deep in text should be found even when a single term + appears much earlier in boilerplate.""" + boilerplate = "The project setup is complex. " * 500 # ~15K, has 'project' early + filler = "x" * (MAX_SESSION_CHARS + 20000) + target = "We reviewed the keystone project roadmap in detail." + text = boilerplate + filler + target + filler + result = _truncate_around_matches(text, "keystone project") + assert "keystone project" in result.lower() + + def test_multiword_proximity_cooccurrence(self): + """When exact phrase is absent, terms co-occurring within proximity + should be preferred over a lone early term.""" + early = "project " + "a" * (MAX_SESSION_CHARS + 20000) + # Place 'keystone' and 'project' near each other (but not as exact phrase) + cooccur = "this keystone initiative for the project was pivotal" + tail = "b" * (MAX_SESSION_CHARS + 20000) + text = early + cooccur + tail + result = _truncate_around_matches(text, "keystone project") + assert "keystone" in result.lower() + assert "project" in result.lower() + + def test_multiword_window_maximises_coverage(self): + """Sliding window should capture as many match clusters as possible.""" + # Place two phrase matches: one at ~50K, one at ~60K, both should fit + pre = "z" * 50000 + match1 = " alpha beta " + gap = "z" * 10000 + match2 = " alpha beta " + post = "z" * (MAX_SESSION_CHARS + 40000) + text = pre + match1 + gap + match2 + post + result = _truncate_around_matches(text, "alpha beta") + assert result.lower().count("alpha beta") == 2 + # ========================================================================= # session_search (dispatcher) diff --git a/tests/tools/test_tts_speed.py b/tests/tools/test_tts_speed.py new file mode 100644 index 000000000..7622a7f62 --- /dev/null +++ b/tests/tools/test_tts_speed.py @@ -0,0 +1,145 @@ +"""Tests for TTS speed configuration across providers.""" + +import asyncio +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + + +@pytest.fixture(autouse=True) +def clean_env(monkeypatch): + for key in ("OPENAI_API_KEY", "MINIMAX_API_KEY", "HERMES_SESSION_PLATFORM"): + monkeypatch.delenv(key, raising=False) + + +# --------------------------------------------------------------------------- +# Edge TTS speed +# --------------------------------------------------------------------------- + +class TestEdgeTtsSpeed: + def _run(self, tts_config, tmp_path): + mock_comm = MagicMock() + mock_comm.save = AsyncMock() + mock_edge = MagicMock() + mock_edge.Communicate = MagicMock(return_value=mock_comm) + + with patch("tools.tts_tool._import_edge_tts", return_value=mock_edge): + from tools.tts_tool import _generate_edge_tts + asyncio.run(_generate_edge_tts("Hello", str(tmp_path / "out.mp3"), tts_config)) + return mock_edge.Communicate + + def test_default_no_rate_kwarg(self, tmp_path): + """No speed config => no rate kwarg passed to Communicate.""" + comm_cls = self._run({}, tmp_path) + kwargs = comm_cls.call_args[1] + assert "rate" not in kwargs + + def test_global_speed_applied(self, tmp_path): + """Global tts.speed used as fallback.""" + comm_cls = self._run({"speed": 1.5}, tmp_path) + kwargs = comm_cls.call_args[1] + assert kwargs["rate"] == "+50%" + + def test_provider_speed_overrides_global(self, tmp_path): + """tts.edge.speed takes precedence over tts.speed.""" + comm_cls = self._run({"speed": 1.5, "edge": {"speed": 2.0}}, tmp_path) + kwargs = comm_cls.call_args[1] + assert kwargs["rate"] == "+100%" + + def test_speed_below_one(self, tmp_path): + """Speed < 1.0 produces a negative rate string.""" + comm_cls = self._run({"speed": 0.5}, tmp_path) + kwargs = comm_cls.call_args[1] + assert kwargs["rate"] == "-50%" + + def test_speed_exactly_one_no_rate(self, tmp_path): + """Explicit speed=1.0 should not pass rate kwarg.""" + comm_cls = self._run({"speed": 1.0}, tmp_path) + kwargs = comm_cls.call_args[1] + assert "rate" not in kwargs + + +# --------------------------------------------------------------------------- +# OpenAI TTS speed +# --------------------------------------------------------------------------- + +class TestOpenaiTtsSpeed: + def _run(self, tts_config, tmp_path, monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "test-key") + mock_response = MagicMock() + mock_client = MagicMock() + mock_client.audio.speech.create.return_value = mock_response + mock_cls = MagicMock(return_value=mock_client) + + with patch("tools.tts_tool._import_openai_client", return_value=mock_cls), \ + patch("tools.tts_tool._resolve_openai_audio_client_config", + return_value=("test-key", None)): + from tools.tts_tool import _generate_openai_tts + _generate_openai_tts("Hello", str(tmp_path / "out.mp3"), tts_config) + return mock_client.audio.speech.create + + def test_default_no_speed_kwarg(self, tmp_path, monkeypatch): + """No speed config => no speed kwarg in create call.""" + create = self._run({}, tmp_path, monkeypatch) + kwargs = create.call_args[1] + assert "speed" not in kwargs + + def test_global_speed_applied(self, tmp_path, monkeypatch): + """Global tts.speed used as fallback.""" + create = self._run({"speed": 1.5}, tmp_path, monkeypatch) + kwargs = create.call_args[1] + assert kwargs["speed"] == 1.5 + + def test_provider_speed_overrides_global(self, tmp_path, monkeypatch): + """tts.openai.speed takes precedence over tts.speed.""" + create = self._run({"speed": 1.5, "openai": {"speed": 2.0}}, tmp_path, monkeypatch) + kwargs = create.call_args[1] + assert kwargs["speed"] == 2.0 + + def test_speed_clamped_low(self, tmp_path, monkeypatch): + """Speed below 0.25 is clamped to 0.25.""" + create = self._run({"speed": 0.1}, tmp_path, monkeypatch) + kwargs = create.call_args[1] + assert kwargs["speed"] == 0.25 + + def test_speed_clamped_high(self, tmp_path, monkeypatch): + """Speed above 4.0 is clamped to 4.0.""" + create = self._run({"speed": 10.0}, tmp_path, monkeypatch) + kwargs = create.call_args[1] + assert kwargs["speed"] == 4.0 + + +# --------------------------------------------------------------------------- +# MiniMax TTS speed (global fallback wired) +# --------------------------------------------------------------------------- + +class TestMinimaxTtsSpeed: + def _run(self, tts_config, tmp_path, monkeypatch): + monkeypatch.setenv("MINIMAX_API_KEY", "test-key") + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "data": {"audio": "deadbeef"}, + "base_resp": {"status_code": 0, "status_msg": "success"}, + "extra_info": {"audio_size": 8}, + } + + # requests is imported locally inside _generate_minimax_tts + with patch("requests.post", return_value=mock_response) as mock_post: + from tools.tts_tool import _generate_minimax_tts + _generate_minimax_tts("Hello", str(tmp_path / "out.mp3"), tts_config) + return mock_post + + def test_global_speed_fallback(self, tmp_path, monkeypatch): + """Global tts.speed used when minimax.speed not set.""" + mock_post = self._run({"speed": 1.5}, tmp_path, monkeypatch) + payload = mock_post.call_args[1]["json"] + assert payload["voice_setting"]["speed"] == 1.5 + + def test_provider_speed_overrides_global(self, tmp_path, monkeypatch): + """tts.minimax.speed takes precedence over tts.speed.""" + mock_post = self._run( + {"speed": 1.5, "minimax": {"speed": 2.0}}, tmp_path, monkeypatch + ) + payload = mock_post.call_args[1]["json"] + assert payload["voice_setting"]["speed"] == 2.0 diff --git a/tests/tools/test_vision_tools.py b/tests/tools/test_vision_tools.py index 55949144a..e8fe8b417 100644 --- a/tests/tools/test_vision_tools.py +++ b/tests/tools/test_vision_tools.py @@ -463,8 +463,6 @@ class TestVisionRequirements: monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) monkeypatch.delenv("OPENAI_BASE_URL", raising=False) monkeypatch.delenv("OPENAI_API_KEY", raising=False) - monkeypatch.delenv("AUXILIARY_VISION_PROVIDER", raising=False) - monkeypatch.delenv("CONTEXT_VISION_PROVIDER", raising=False) assert check_vision_requirements() is True diff --git a/tests/tools/test_voice_cli_integration.py b/tests/tools/test_voice_cli_integration.py index 39fa026ce..da500996a 100644 --- a/tests/tools/test_voice_cli_integration.py +++ b/tests/tools/test_voice_cli_integration.py @@ -32,6 +32,7 @@ def _make_voice_cli(**overrides): cli._voice_tts_done.set() cli._pending_input = queue.Queue() cli._app = None + cli._attached_images = [] cli.console = SimpleNamespace(width=80) for k, v in overrides.items(): setattr(cli, k, v) diff --git a/tests/tools/test_zombie_process_cleanup.py b/tests/tools/test_zombie_process_cleanup.py index 9cbbbcd1f..999bc3fe7 100644 --- a/tests/tools/test_zombie_process_cleanup.py +++ b/tests/tools/test_zombie_process_cleanup.py @@ -190,17 +190,38 @@ class TestGatewayCleanupWiring: def test_gateway_stop_calls_close(self): """gateway stop() should call close() on all running agents.""" import asyncio - from unittest.mock import MagicMock, patch + import threading + from unittest.mock import AsyncMock, MagicMock, patch - runner = MagicMock() + from gateway.run import GatewayRunner + + runner = object.__new__(GatewayRunner) runner._running = True runner._running_agents = {} + runner._running_agents_ts = {} runner.adapters = {} runner._background_tasks = set() runner._pending_messages = {} runner._pending_approvals = {} + runner._pending_model_notes = {} runner._shutdown_event = asyncio.Event() runner._exit_reason = None + runner._exit_code = None + runner._stop_task = None + runner._draining = False + runner._restart_requested = False + runner._restart_task_started = False + runner._restart_detached = False + runner._restart_via_service = False + runner._restart_drain_timeout = 5.0 + runner._voice_mode = {} + runner._session_model_overrides = {} + runner._update_prompt_pending = {} + runner._busy_input_mode = "interrupt" + runner._agent_cache = {} + runner._agent_cache_lock = threading.Lock() + runner._shutdown_all_gateway_honcho = lambda: None + runner._update_runtime_status = MagicMock() mock_agent_1 = MagicMock() mock_agent_2 = MagicMock() @@ -209,8 +230,6 @@ class TestGatewayCleanupWiring: "session-2": mock_agent_2, } - from gateway.run import GatewayRunner - loop = asyncio.new_event_loop() try: with patch("gateway.status.remove_pid_file"), \ diff --git a/tools/code_execution_tool.py b/tools/code_execution_tool.py index 8b5f79455..bed4f2091 100644 --- a/tools/code_execution_tool.py +++ b/tools/code_execution_tool.py @@ -1327,8 +1327,7 @@ def build_execute_code_schema(enabled_sandbox_tools: set = None) -> dict: f"Available via `from hermes_tools import ...`:\n\n" f"{tool_lines}\n\n" "Limits: 5-minute timeout, 50KB stdout cap, max 50 tool calls per script. " - "terminal() is foreground-only (no background or pty). " - "If the session uses a cloud sandbox backend, treat it as resumable task state rather than a durable always-on machine.\n\n" + "terminal() is foreground-only (no background or pty).\n\n" "Print your final result to stdout. Use Python stdlib (json, re, math, csv, " "datetime, collections, etc.) for processing between tool calls.\n\n" "Also available (no import needed โ€” built into hermes_tools):\n" diff --git a/tools/cronjob_tools.py b/tools/cronjob_tools.py index d5c81ad7a..90ecde65a 100644 --- a/tools/cronjob_tools.py +++ b/tools/cronjob_tools.py @@ -465,7 +465,7 @@ Important safety rule: cron-run sessions should not recursively schedule more cr }, "deliver": { "type": "string", - "description": "Omit this parameter to auto-deliver back to the current chat and topic (recommended). Auto-detection preserves thread/topic context. Only set explicitly when the user asks to deliver somewhere OTHER than the current conversation. Values: 'origin' (same as omitting), 'local' (no delivery, save only), or platform:chat_id:thread_id for a specific destination. Examples: 'telegram:-1001234567890:17585', 'discord:#engineering'. WARNING: 'platform:chat_id' without :thread_id loses topic targeting." + "description": "Omit this parameter to auto-deliver back to the current chat and topic (recommended). Auto-detection preserves thread/topic context. Only set explicitly when the user asks to deliver somewhere OTHER than the current conversation. Values: 'origin' (same as omitting), 'local' (no delivery, save only), or platform:chat_id:thread_id for a specific destination. Examples: 'telegram:-1001234567890:17585', 'discord:#engineering', 'sms:+15551234567'. WARNING: 'platform:chat_id' without :thread_id loses topic targeting." }, "skills": { "type": "array", diff --git a/tools/file_tools.py b/tools/file_tools.py index 186a9d052..5aa2d793e 100644 --- a/tools/file_tools.py +++ b/tools/file_tools.py @@ -92,7 +92,10 @@ def _is_blocked_device(filepath: str) -> bool: # Paths that file tools should refuse to write to without going through the # terminal tool's approval system. These match prefixes after os.path.realpath. -_SENSITIVE_PATH_PREFIXES = ("/etc/", "/boot/", "/usr/lib/systemd/") +_SENSITIVE_PATH_PREFIXES = ( + "/etc/", "/boot/", "/usr/lib/systemd/", + "/private/etc/", "/private/var/", +) _SENSITIVE_EXACT_PATHS = {"/var/run/docker.sock", "/run/docker.sock"} @@ -102,17 +105,16 @@ def _check_sensitive_path(filepath: str) -> str | None: resolved = os.path.realpath(os.path.expanduser(filepath)) except (OSError, ValueError): resolved = filepath + normalized = os.path.normpath(os.path.expanduser(filepath)) + _err = ( + f"Refusing to write to sensitive system path: {filepath}\n" + "Use the terminal tool with sudo if you need to modify system files." + ) for prefix in _SENSITIVE_PATH_PREFIXES: - if resolved.startswith(prefix): - return ( - f"Refusing to write to sensitive system path: {filepath}\n" - "Use the terminal tool with sudo if you need to modify system files." - ) - if resolved in _SENSITIVE_EXACT_PATHS: - return ( - f"Refusing to write to sensitive system path: {filepath}\n" - "Use the terminal tool with sudo if you need to modify system files." - ) + if resolved.startswith(prefix) or normalized.startswith(prefix): + return _err + if resolved in _SENSITIVE_EXACT_PATHS or normalized in _SENSITIVE_EXACT_PATHS: + return _err return None diff --git a/tools/homeassistant_tool.py b/tools/homeassistant_tool.py index 0ab99b4bf..2e698a459 100644 --- a/tools/homeassistant_tool.py +++ b/tools/homeassistant_tool.py @@ -38,6 +38,15 @@ def _get_config(): # Regex for valid HA entity_id format (e.g. "light.living_room", "sensor.temperature_1") _ENTITY_ID_RE = re.compile(r"^[a-z_][a-z0-9_]*\.[a-z0-9_]+$") +# Regex for valid HA service/domain names (e.g. "light", "turn_on", "shell_command"). +# Only lowercase ASCII letters, digits, and underscores โ€” no slashes, dots, or +# other characters that could allow path traversal in URL construction. +# The domain and service are interpolated into /api/services/{domain}/{service}, +# so allowing arbitrary strings would enable SSRF via path traversal +# (e.g. domain="../../api/config") or blocked-domain bypass +# (e.g. domain="shell_command/../light"). +_SERVICE_NAME_RE = re.compile(r"^[a-z][a-z0-9_]*$") + # Service domains blocked for security -- these allow arbitrary code/command # execution on the HA host or enable SSRF attacks on the local network. # HA provides zero service-level access control; all safety must be in our layer. @@ -246,6 +255,14 @@ def _handle_call_service(args: dict, **kw) -> str: if not domain or not service: return tool_error("Missing required parameters: domain and service") + # Validate domain/service format BEFORE the blocklist check โ€” prevents + # path traversal in /api/services/{domain}/{service} and blocklist bypass + # via payloads like "shell_command/../light". + if not _SERVICE_NAME_RE.match(domain): + return tool_error(f"Invalid domain format: {domain!r}") + if not _SERVICE_NAME_RE.match(service): + return tool_error(f"Invalid service format: {service!r}") + if domain in _BLOCKED_DOMAINS: return json.dumps({ "error": f"Service domain '{domain}' is blocked for security. " @@ -257,6 +274,12 @@ def _handle_call_service(args: dict, **kw) -> str: return tool_error(f"Invalid entity_id format: {entity_id}") data = args.get("data") + if isinstance(data, str): + try: + data = json.loads(data) if data.strip() else None + except json.JSONDecodeError as e: + return tool_error(f"Invalid JSON string in 'data' parameter: {e}") + try: result = _run_async(_async_call_service(domain, service, entity_id, data)) return json.dumps({"result": result}) @@ -433,9 +456,9 @@ HA_CALL_SERVICE_SCHEMA = { ), }, "data": { - "type": "object", + "type": "string", "description": ( - "Additional service data. Examples: " + "Additional service data as a JSON string. Examples: " '{"brightness": 255, "color_name": "blue"} for lights, ' '{"temperature": 22, "hvac_mode": "heat"} for climate, ' '{"volume_level": 0.5} for media players.' diff --git a/tools/send_message_tool.py b/tools/send_message_tool.py index 60503c0bc..a2b3e984c 100644 --- a/tools/send_message_tool.py +++ b/tools/send_message_tool.py @@ -322,7 +322,7 @@ async def _send_to_platform(platform, pconfig, chat_id, message, thread_id=None, (preserves code-block boundaries, adds part indicators). """ from gateway.config import Platform - from gateway.platforms.base import BasePlatformAdapter + from gateway.platforms.base import BasePlatformAdapter, utf16_len from gateway.platforms.telegram import TelegramAdapter from gateway.platforms.discord import DiscordAdapter from gateway.platforms.slack import SlackAdapter @@ -354,9 +354,11 @@ async def _send_to_platform(platform, pconfig, chat_id, message, thread_id=None, # Smart-chunk the message to fit within platform limits. # For short messages or platforms without a known limit this is a no-op. + # Telegram measures length in UTF-16 code units, not Unicode codepoints. max_len = _MAX_LENGTHS.get(platform) if max_len: - chunks = BasePlatformAdapter.truncate_message(message, max_len) + _len_fn = utf16_len if platform == Platform.TELEGRAM else None + chunks = BasePlatformAdapter.truncate_message(message, max_len, len_fn=_len_fn) else: chunks = [message] diff --git a/tools/session_search_tool.py b/tools/session_search_tool.py index 3e9c68af4..9be73a04a 100644 --- a/tools/session_search_tool.py +++ b/tools/session_search_tool.py @@ -19,6 +19,7 @@ import asyncio import concurrent.futures import json import logging +import re from typing import Dict, Any, List, Optional, Union from agent.auxiliary_client import async_call_llm, extract_content_or_reasoning @@ -90,31 +91,80 @@ def _truncate_around_matches( full_text: str, query: str, max_chars: int = MAX_SESSION_CHARS ) -> str: """ - Truncate a conversation transcript to max_chars, centered around - where the query terms appear. Keeps content near matches, trims the edges. + Truncate a conversation transcript to *max_chars*, choosing a window + that maximises coverage of positions where the *query* actually appears. + + Strategy (in priority order): + 1. Try to find the full query as a phrase (case-insensitive). + 2. If no phrase hit, look for positions where all query terms appear + within a 200-char proximity window (co-occurrence). + 3. Fall back to individual term positions. + + Once candidate positions are collected the function picks the window + start that covers the most of them. """ if len(full_text) <= max_chars: return full_text - # Find the first occurrence of any query term - query_terms = query.lower().split() text_lower = full_text.lower() - first_match = len(full_text) - for term in query_terms: - pos = text_lower.find(term) - if pos != -1 and pos < first_match: - first_match = pos + query_lower = query.lower().strip() + match_positions: list[int] = [] - if first_match == len(full_text): - # No match found, take from the start - first_match = 0 + # --- 1. Full-phrase search ------------------------------------------------ + phrase_pat = re.compile(re.escape(query_lower)) + match_positions = [m.start() for m in phrase_pat.finditer(text_lower)] - # Center the window around the first match - half = max_chars // 2 - start = max(0, first_match - half) + # --- 2. Proximity co-occurrence of all terms (within 200 chars) ----------- + if not match_positions: + terms = query_lower.split() + if len(terms) > 1: + # Collect every occurrence of each term + term_positions: dict[str, list[int]] = {} + for t in terms: + term_positions[t] = [ + m.start() for m in re.finditer(re.escape(t), text_lower) + ] + # Slide through positions of the rarest term and check proximity + rarest = min(terms, key=lambda t: len(term_positions.get(t, []))) + for pos in term_positions.get(rarest, []): + if all( + any(abs(p - pos) < 200 for p in term_positions.get(t, [])) + for t in terms + if t != rarest + ): + match_positions.append(pos) + + # --- 3. Individual term positions (last resort) --------------------------- + if not match_positions: + terms = query_lower.split() + for t in terms: + for m in re.finditer(re.escape(t), text_lower): + match_positions.append(m.start()) + + if not match_positions: + # Nothing at all โ€” take from the start + truncated = full_text[:max_chars] + suffix = "\n\n...[later conversation truncated]..." if max_chars < len(full_text) else "" + return truncated + suffix + + # --- Pick window that covers the most match positions --------------------- + match_positions.sort() + + best_start = 0 + best_count = 0 + for candidate in match_positions: + ws = max(0, candidate - max_chars // 4) # bias: 25% before, 75% after + we = ws + max_chars + if we > len(full_text): + ws = max(0, len(full_text) - max_chars) + we = len(full_text) + count = sum(1 for p in match_positions if ws <= p < we) + if count > best_count: + best_count = count + best_start = ws + + start = best_start end = min(len(full_text), start + max_chars) - if end - start < max_chars: - start = max(0, end - max_chars) truncated = full_text[start:end] prefix = "...[earlier conversation truncated]...\n\n" if start > 0 else "" diff --git a/tools/skills_hub.py b/tools/skills_hub.py index c73527ff2..47aef8075 100644 --- a/tools/skills_hub.py +++ b/tools/skills_hub.py @@ -296,10 +296,20 @@ class GitHubSource(SkillSource): self.taps = list(self.DEFAULT_TAPS) if extra_taps: self.taps.extend(extra_taps) + # Per-instance cache: repo -> (default_branch, tree_entries) + # Survives within a single search/install flow, avoiding redundant API calls. + self._tree_cache: Dict[str, Tuple[str, List[dict]]] = {} + # Set when GitHub returns 403 with rate limit exhausted + self._rate_limited: bool = False def source_id(self) -> str: return "github" + @property + def is_rate_limited(self) -> bool: + """Whether GitHub API rate limit was hit during operations.""" + return self._rate_limited + def trust_level_for(self, identifier: str) -> str: # identifier format: "owner/repo/path/to/skill" parts = identifier.split("/", 2) @@ -443,6 +453,69 @@ class GitHubSource(SkillSource): self._write_cache(cache_key, [self._meta_to_dict(s) for s in skills]) return skills + # -- Repo tree cache (avoids redundant API calls) -- + + def _get_repo_tree(self, repo: str) -> Optional[Tuple[str, List[dict]]]: + """Get cached or fresh repo tree. + + Returns ``(default_branch, tree_entries)`` or ``None``. + A single install can call ``_download_directory_via_tree`` and + ``_find_skill_in_repo_tree`` multiple times for the same repo โ€” this + cache eliminates the redundant ``GET /repos/{repo}`` + + ``GET /repos/{repo}/git/trees/{branch}`` round-trips (previously up to + 6 duplicated pairs per install, consuming ~12 of the 60/hr + unauthenticated rate limit for nothing). + """ + if repo in self._tree_cache: + return self._tree_cache[repo] + + headers = self.auth.get_headers() + + # Resolve default branch + try: + resp = httpx.get( + f"https://api.github.com/repos/{repo}", + headers=headers, timeout=15, follow_redirects=True, + ) + if resp.status_code != 200: + self._check_rate_limit_response(resp) + return None + default_branch = resp.json().get("default_branch", "main") + except (httpx.HTTPError, ValueError): + return None + + # Fetch recursive tree + try: + resp = httpx.get( + f"https://api.github.com/repos/{repo}/git/trees/{default_branch}", + params={"recursive": "1"}, + headers=headers, timeout=30, follow_redirects=True, + ) + if resp.status_code != 200: + self._check_rate_limit_response(resp) + return None + tree_data = resp.json() + if tree_data.get("truncated"): + logger.debug("Git tree truncated for %s, cannot cache", repo) + return None + except (httpx.HTTPError, ValueError): + return None + + entries = tree_data.get("tree", []) + self._tree_cache[repo] = (default_branch, entries) + return (default_branch, entries) + + def _check_rate_limit_response(self, resp: "httpx.Response") -> None: + """Flag the instance as rate-limited when GitHub returns 403 + exhausted quota.""" + if resp.status_code == 403: + remaining = resp.headers.get("X-RateLimit-Remaining", "") + if remaining == "0": + self._rate_limited = True + logger.warning( + "GitHub API rate limit exhausted (unauthenticated: 60 req/hr). " + "Set GITHUB_TOKEN or install the gh CLI to raise the limit to 5,000/hr." + ) + def _download_directory(self, repo: str, path: str) -> Dict[str, str]: """Recursively download all text files from a GitHub directory. @@ -458,40 +531,34 @@ class GitHubSource(SkillSource): return self._download_directory_recursive(repo, path) def _download_directory_via_tree(self, repo: str, path: str) -> Optional[Dict[str, str]]: - """Download an entire directory using the Git Trees API (single request).""" + """Download an entire directory using the Git Trees API (single request). + + Returns: + dict of files if the path exists and has content, + empty dict ``{}`` if the tree is cached but the path doesn't exist + (prevents unnecessary Contents API fallback), + ``None`` if the tree couldn't be fetched (triggers Contents API fallback). + """ path = path.rstrip("/") - headers = self.auth.get_headers() - # Resolve the default branch via the repo endpoint - try: - repo_url = f"https://api.github.com/repos/{repo}" - resp = httpx.get(repo_url, headers=headers, timeout=15, follow_redirects=True) - if resp.status_code != 200: - return None - default_branch = resp.json().get("default_branch", "main") - except (httpx.HTTPError, ValueError): + cached = self._get_repo_tree(repo) + if cached is None: return None + _default_branch, tree_entries = cached - # Fetch the full recursive tree (branch name works as tree-ish) - try: - tree_url = f"https://api.github.com/repos/{repo}/git/trees/{default_branch}" - resp = httpx.get( - tree_url, params={"recursive": "1"}, - headers=headers, timeout=30, follow_redirects=True, - ) - if resp.status_code != 200: - return None - tree_data = resp.json() - if tree_data.get("truncated"): - logger.debug("Git tree truncated for %s, falling back to Contents API", repo) - return None - except (httpx.HTTPError, ValueError): - return None + # Check if ANY entry lives under the target path + prefix = f"{path}/" + has_entries = any( + item.get("path", "").startswith(prefix) for item in tree_entries + ) + if not has_entries: + # Path definitively doesn't exist in the repo โ€” return empty + # instead of None to skip the Contents API fallback. + return {} # Filter to blobs under our target path and fetch content - prefix = f"{path}/" files: Dict[str, str] = {} - for item in tree_data.get("tree", []): + for item in tree_entries: if item.get("type") != "blob": continue item_path = item.get("path", "") @@ -548,38 +615,14 @@ class GitHubSource(SkillSource): handles deeply nested directory structures like ``cli-tool/components/skills/development//SKILL.md``. """ - # Get default branch - try: - resp = httpx.get( - f"https://api.github.com/repos/{repo}", - headers=self.auth.get_headers(), - timeout=15, - follow_redirects=True, - ) - if resp.status_code != 200: - return None - default_branch = resp.json().get("default_branch", "main") - except (httpx.HTTPError, json.JSONDecodeError): - return None - - # Get recursive tree (single API call for the entire repo) - try: - resp = httpx.get( - f"https://api.github.com/repos/{repo}/git/trees/{default_branch}", - params={"recursive": "1"}, - headers=self.auth.get_headers(), - timeout=30, - follow_redirects=True, - ) - if resp.status_code != 200: - return None - tree_data = resp.json() - except (httpx.HTTPError, json.JSONDecodeError): + cached = self._get_repo_tree(repo) + if cached is None: return None + _default_branch, tree_entries = cached # Look for SKILL.md files inside directories named skill_md_suffix = f"/{skill_name}/SKILL.md" - for entry in tree_data.get("tree", []): + for entry in tree_entries: if entry.get("type") != "blob": continue path = entry.get("path", "") @@ -601,6 +644,7 @@ class GitHubSource(SkillSource): ) if resp.status_code == 200: return resp.text + self._check_rate_limit_response(resp) except httpx.HTTPError as e: logger.debug("GitHub contents API fetch failed: %s", e) return None @@ -2654,6 +2698,222 @@ def check_for_skill_updates( return results +# --------------------------------------------------------------------------- +# Hermes centralized index source +# --------------------------------------------------------------------------- + +HERMES_INDEX_URL = "https://hermes-agent.nousresearch.com/docs/api/skills-index.json" +HERMES_INDEX_CACHE_FILE = INDEX_CACHE_DIR / "hermes-index.json" +HERMES_INDEX_TTL = 6 * 3600 # 6 hours + + +def _load_hermes_index() -> Optional[dict]: + """Fetch the centralized skills index, with local cache. + + The index is a JSON file hosted on the docs site, rebuilt daily by CI. + We cache it locally for HERMES_INDEX_TTL seconds to avoid repeated + downloads within a session. + """ + # Check local cache + if HERMES_INDEX_CACHE_FILE.exists(): + try: + age = time.time() - HERMES_INDEX_CACHE_FILE.stat().st_mtime + if age < HERMES_INDEX_TTL: + return json.loads(HERMES_INDEX_CACHE_FILE.read_text()) + except (OSError, json.JSONDecodeError): + pass + + # Fetch from docs site + try: + resp = httpx.get(HERMES_INDEX_URL, timeout=15, follow_redirects=True) + if resp.status_code != 200: + logger.debug("Hermes index fetch returned %d", resp.status_code) + return _load_stale_index_cache() + data = resp.json() + except (httpx.HTTPError, json.JSONDecodeError) as e: + logger.debug("Hermes index fetch failed: %s", e) + return _load_stale_index_cache() + + # Validate structure + if not isinstance(data, dict) or "skills" not in data: + return _load_stale_index_cache() + + # Cache locally + try: + HERMES_INDEX_CACHE_FILE.parent.mkdir(parents=True, exist_ok=True) + HERMES_INDEX_CACHE_FILE.write_text(json.dumps(data)) + except OSError: + pass + + return data + + +def _load_stale_index_cache() -> Optional[dict]: + """Fall back to stale cache when the network fetch fails.""" + if HERMES_INDEX_CACHE_FILE.exists(): + try: + return json.loads(HERMES_INDEX_CACHE_FILE.read_text()) + except (OSError, json.JSONDecodeError): + pass + return None + + +class HermesIndexSource(SkillSource): + """Skill source backed by the centralized Hermes Skills Index. + + The index is a JSON catalog published to the docs site and rebuilt + daily by CI. It contains metadata + resolved GitHub paths for every + skill, eliminating the need for users to hit the GitHub API for + search or path discovery. + + When the index is unavailable, all methods return empty / None so + downstream sources take over transparently. + """ + + def __init__(self, auth: GitHubAuth): + self._index: Optional[dict] = None + self._loaded = False + self.auth = auth + # Lazily create GitHubSource for fetch โ€” only used when actually + # downloading files, which requires real GitHub API calls. + self._github: Optional[GitHubSource] = None + + def _ensure_loaded(self) -> dict: + if not self._loaded: + self._index = _load_hermes_index() + self._loaded = True + return self._index or {} + + def _get_github(self) -> GitHubSource: + if self._github is None: + self._github = GitHubSource(auth=self.auth) + return self._github + + def source_id(self) -> str: + return "hermes-index" + + @property + def is_available(self) -> bool: + """Whether the index is loaded and has skills.""" + index = self._ensure_loaded() + return bool(index.get("skills")) + + def trust_level_for(self, identifier: str) -> str: + index = self._ensure_loaded() + for skill in index.get("skills", []): + if skill.get("identifier") == identifier: + return skill.get("trust_level", "community") + return "community" + + def search(self, query: str, limit: int = 10) -> List[SkillMeta]: + """Search the cached index. Zero API calls.""" + index = self._ensure_loaded() + skills = index.get("skills", []) + if not skills: + return [] + + if not query.strip(): + # No query โ€” return featured/popular + return [self._to_meta(s) for s in skills[:limit]] + + query_lower = query.lower() + results: List[SkillMeta] = [] + for s in skills: + searchable = f"{s.get('name', '')} {s.get('description', '')} {' '.join(s.get('tags', []))}".lower() + if query_lower in searchable: + results.append(self._to_meta(s)) + if len(results) >= limit: + break + return results + + def fetch(self, identifier: str) -> Optional[SkillBundle]: + """Fetch a skill using the resolved path from the index. + + If the index has a ``resolved_github_id`` for this skill, we skip + the entire candidate/discovery chain and go directly to GitHub + with the exact path. This reduces install from ~31 API calls to + just the file content downloads (~5-22 depending on skill size). + """ + index = self._ensure_loaded() + entry = self._find_entry(identifier, index) + if not entry: + return None + + # Use resolved path if available + resolved = entry.get("resolved_github_id") + if resolved: + bundle = self._get_github().fetch(resolved) + if bundle: + bundle.source = entry.get("source", "hermes-index") + bundle.identifier = identifier + return bundle + + # Fall back to identifier-based fetch via repo/path + repo = entry.get("repo", "") + path = entry.get("path", "") + if repo and path: + github_id = f"{repo}/{path}" + bundle = self._get_github().fetch(github_id) + if bundle: + bundle.source = entry.get("source", "hermes-index") + bundle.identifier = identifier + return bundle + + return None + + def inspect(self, identifier: str) -> Optional[SkillMeta]: + """Return metadata from the index. Zero API calls.""" + index = self._ensure_loaded() + entry = self._find_entry(identifier, index) + if entry: + return self._to_meta(entry) + return None + + def _find_entry(self, identifier: str, index: dict) -> Optional[dict]: + """Look up a skill in the index by identifier or name.""" + skills = index.get("skills", []) + + # Exact identifier match + for s in skills: + if s.get("identifier") == identifier: + return s + + # Try without source prefix (e.g. "skills-sh/" stripped) + normalized = identifier + for prefix in ("skills-sh/", "skills.sh/", "official/", "github/", "clawhub/"): + if identifier.startswith(prefix): + normalized = identifier[len(prefix):] + break + + # Match on normalized identifier or name + for s in skills: + sid = s.get("identifier", "") + # Strip prefix from stored identifier too + stored_normalized = sid + for prefix in ("skills-sh/", "skills.sh/", "official/", "github/", "clawhub/"): + if sid.startswith(prefix): + stored_normalized = sid[len(prefix):] + break + if stored_normalized == normalized: + return s + + return None + + @staticmethod + def _to_meta(entry: dict) -> SkillMeta: + return SkillMeta( + name=entry.get("name", ""), + description=entry.get("description", ""), + source=entry.get("source", "hermes-index"), + identifier=entry.get("identifier", ""), + trust_level=entry.get("trust_level", "community"), + repo=entry.get("repo"), + path=entry.get("path"), + tags=entry.get("tags", []), + extra=entry.get("extra", {}), + ) + + def create_source_router(auth: Optional[GitHubAuth] = None) -> List[SkillSource]: """ Create all configured source adapters. @@ -2667,6 +2927,7 @@ def create_source_router(auth: Optional[GitHubAuth] = None) -> List[SkillSource] sources: List[SkillSource] = [ OptionalSkillSource(), # Official optional skills (highest priority) + HermesIndexSource(auth=auth), # Centralized index (search + resolved install paths) SkillsShSource(auth=auth), WellKnownSkillSource(), GitHubSource(auth=auth, extra_taps=extra_taps), @@ -2709,10 +2970,27 @@ def parallel_search_sources( per_source_limits = per_source_limits or {} active: List[SkillSource] = [] + # When the centralized index is available and the user hasn't filtered + # to a specific source, skip external API sources (github, skills-sh, + # clawhub, etc.) โ€” the index already has their data. This avoids + # ~70 GitHub API calls per search for unauthenticated users. + _index_available = False + _api_source_ids = frozenset({"github", "skills-sh", "clawhub", + "claude-marketplace", "lobehub", "well-known"}) + if source_filter == "all": + for src in sources: + if (src.source_id() == "hermes-index" + and getattr(src, "is_available", False)): + _index_available = True + break + for src in sources: sid = src.source_id() if source_filter != "all" and sid != source_filter and sid != "official": continue + # Skip external API sources when the index covers them + if _index_available and sid in _api_source_ids: + continue active.append(src) all_results: List[SkillMeta] = [] diff --git a/tools/terminal_tool.py b/tools/terminal_tool.py index 3dfa786e1..90c4a7ea2 100644 --- a/tools/terminal_tool.py +++ b/tools/terminal_tool.py @@ -531,7 +531,6 @@ Working directory: Use 'workdir' for per-command cwd. PTY mode: Set pty=true for interactive CLI tools (Codex, Claude Code, Python REPL). Do NOT use vim/nano/interactive tools without pty=true โ€” they hang without a pseudo-terminal. Pipe git output to cat if it might page. -Important: cloud sandboxes may be cleaned up, idled out, or recreated between turns. Persistent filesystem means files can resume later; it does NOT guarantee a continuously running machine or surviving background processes. Use terminal sandboxes for task work, not durable hosting. """ # Global state for environment lifecycle management diff --git a/tools/tts_tool.py b/tools/tts_tool.py index 1423e2e78..769ae30a9 100644 --- a/tools/tts_tool.py +++ b/tools/tts_tool.py @@ -188,8 +188,14 @@ async def _generate_edge_tts(text: str, output_path: str, tts_config: Dict[str, _edge_tts = _import_edge_tts() edge_config = tts_config.get("edge", {}) voice = edge_config.get("voice", DEFAULT_EDGE_VOICE) + speed = float(edge_config.get("speed", tts_config.get("speed", 1.0))) - communicate = _edge_tts.Communicate(text, voice) + kwargs = {"voice": voice} + if speed != 1.0: + pct = round((speed - 1.0) * 100) + kwargs["rate"] = f"{pct:+d}%" + + communicate = _edge_tts.Communicate(text, **kwargs) await communicate.save(output_path) return output_path @@ -261,6 +267,7 @@ def _generate_openai_tts(text: str, output_path: str, tts_config: Dict[str, Any] model = oai_config.get("model", DEFAULT_OPENAI_MODEL) voice = oai_config.get("voice", DEFAULT_OPENAI_VOICE) base_url = oai_config.get("base_url", base_url) + speed = float(oai_config.get("speed", tts_config.get("speed", 1.0))) # Determine response format from extension if output_path.endswith(".ogg"): @@ -271,13 +278,16 @@ def _generate_openai_tts(text: str, output_path: str, tts_config: Dict[str, Any] OpenAIClient = _import_openai_client() client = OpenAIClient(api_key=api_key, base_url=base_url) try: - response = client.audio.speech.create( + create_kwargs = dict( model=model, voice=voice, input=text, response_format=response_format, extra_headers={"x-idempotency-key": str(uuid.uuid4())}, ) + if speed != 1.0: + create_kwargs["speed"] = max(0.25, min(4.0, speed)) + response = client.audio.speech.create(**create_kwargs) response.stream_to_file(output_path) return output_path @@ -314,7 +324,7 @@ def _generate_minimax_tts(text: str, output_path: str, tts_config: Dict[str, Any mm_config = tts_config.get("minimax", {}) model = mm_config.get("model", DEFAULT_MINIMAX_MODEL) voice_id = mm_config.get("voice_id", DEFAULT_MINIMAX_VOICE_ID) - speed = mm_config.get("speed", 1) + speed = mm_config.get("speed", tts_config.get("speed", 1)) vol = mm_config.get("vol", 1) pitch = mm_config.get("pitch", 0) base_url = mm_config.get("base_url", DEFAULT_MINIMAX_BASE_URL) diff --git a/tools/voice_mode.py b/tools/voice_mode.py index 5b6a1e3b1..5dc99070c 100644 --- a/tools/voice_mode.py +++ b/tools/voice_mode.py @@ -106,8 +106,9 @@ def detect_audio_environment() -> dict: if any(os.environ.get(v) for v in ('SSH_CLIENT', 'SSH_TTY', 'SSH_CONNECTION')): warnings.append("Running over SSH -- no audio devices available") - # Docker detection - if os.path.exists('/.dockerenv'): + # Docker/Podman container detection + from hermes_constants import is_container + if is_container(): warnings.append("Running inside Docker container -- no audio devices") # WSL detection โ€” PulseAudio bridge makes audio work in WSL. @@ -428,6 +429,11 @@ class AudioRecorder: """Current audio input RMS level (0-32767). Updated each audio chunk.""" return self._current_rms + @property + def is_recording(self) -> bool: + """Whether audio recording is currently active.""" + return self._recording + # -- public methods ------------------------------------------------------ def _ensure_stream(self) -> None: diff --git a/trajectory_compressor.py b/trajectory_compressor.py index 6bc0a499e..f05fca881 100644 --- a/trajectory_compressor.py +++ b/trajectory_compressor.py @@ -415,7 +415,7 @@ class TrajectoryCompressor: return "codex" if "api.z.ai" in url: return "zai" - if "moonshot.ai" in url or "api.kimi.com" in url: + if "moonshot.ai" in url or "moonshot.cn" in url or "api.kimi.com" in url: return "kimi-coding" if "minimaxi.com" in url: return "minimax-cn" diff --git a/utils.py b/utils.py index bd2a6b70f..f967c08ae 100644 --- a/utils.py +++ b/utils.py @@ -5,7 +5,7 @@ import logging import os import tempfile from pathlib import Path -from typing import Any, List, Optional, Union +from typing import Any, Union import yaml @@ -145,59 +145,9 @@ def safe_json_loads(text: str, default: Any = None) -> Any: return default -def read_json_file(path: Path, default: Any = None) -> Any: - """Read and parse a JSON file, returning *default* on any error. - - Replaces the repeated ``try: json.loads(path.read_text()) except ...`` - pattern in anthropic_adapter.py, auxiliary_client.py, credential_pool.py, - and skill_utils.py. - """ - try: - return json.loads(Path(path).read_text(encoding="utf-8")) - except (json.JSONDecodeError, OSError, IOError, ValueError) as exc: - logger.debug("Failed to read %s: %s", path, exc) - return default - - -def read_jsonl(path: Path) -> List[dict]: - """Read a JSONL file (one JSON object per line). - - Returns a list of parsed objects, skipping blank lines. - """ - entries = [] - with open(path, "r", encoding="utf-8") as f: - for line in f: - line = line.strip() - if line: - entries.append(json.loads(line)) - return entries - - -def append_jsonl(path: Path, entry: dict) -> None: - """Append a single JSON object as a new line to a JSONL file.""" - path = Path(path) - path.parent.mkdir(parents=True, exist_ok=True) - with open(path, "a", encoding="utf-8") as f: - f.write(json.dumps(entry, ensure_ascii=False) + "\n") - - # โ”€โ”€โ”€ Environment Variable Helpers โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ -def env_str(key: str, default: str = "") -> str: - """Read an environment variable, stripped of whitespace. - - Replaces the ``os.getenv("X", "").strip()`` pattern repeated 50+ times - across runtime_provider.py, anthropic_adapter.py, models.py, etc. - """ - return os.getenv(key, default).strip() - - -def env_lower(key: str, default: str = "") -> str: - """Read an environment variable, stripped and lowercased.""" - return os.getenv(key, default).strip().lower() - - def env_int(key: str, default: int = 0) -> int: """Read an environment variable as an integer, with fallback.""" raw = os.getenv(key, "").strip() diff --git a/uv.lock b/uv.lock index c70d3e77e..45efc2d93 100644 --- a/uv.lock +++ b/uv.lock @@ -165,6 +165,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, ] +[[package]] +name = "aiosqlite" +version = "0.22.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/8a/64761f4005f17809769d23e518d915db74e6310474e733e3593cfc854ef1/aiosqlite-0.22.1.tar.gz", hash = "sha256:043e0bd78d32888c0a9ca90fc788b38796843360c855a7262a532813133a0650", size = 14821, upload-time = "2025-12-23T19:25:43.997Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/b7/e3bf5133d697a08128598c8d0abc5e16377b51465a33756de24fa7dee953/aiosqlite-0.22.1-py3-none-any.whl", hash = "sha256:21c002eb13823fad740196c5a2e9d8e62f6243bd9e7e4a1f87fb5e44ecb4fceb", size = 17405, upload-time = "2025-12-23T19:25:42.139Z" }, +] + [[package]] name = "altair" version = "6.0.0" @@ -240,6 +249,54 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" }, ] +[[package]] +name = "asyncpg" +version = "0.31.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cc/d18065ce2380d80b1bcce927c24a2642efd38918e33fd724bc4bca904877/asyncpg-0.31.0.tar.gz", hash = "sha256:c989386c83940bfbd787180f2b1519415e2d3d6277a70d9d0f0145ac73500735", size = 993667, upload-time = "2025-11-24T23:27:00.812Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/17/cc02bc49bc350623d050fa139e34ea512cd6e020562f2a7312a7bcae4bc9/asyncpg-0.31.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eee690960e8ab85063ba93af2ce128c0f52fd655fdff9fdb1a28df01329f031d", size = 643159, upload-time = "2025-11-24T23:25:36.443Z" }, + { url = "https://files.pythonhosted.org/packages/a4/62/4ded7d400a7b651adf06f49ea8f73100cca07c6df012119594d1e3447aa6/asyncpg-0.31.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2657204552b75f8288de08ca60faf4a99a65deef3a71d1467454123205a88fab", size = 638157, upload-time = "2025-11-24T23:25:37.89Z" }, + { url = "https://files.pythonhosted.org/packages/d6/5b/4179538a9a72166a0bf60ad783b1ef16efb7960e4d7b9afe9f77a5551680/asyncpg-0.31.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a429e842a3a4b4ea240ea52d7fe3f82d5149853249306f7ff166cb9948faa46c", size = 2918051, upload-time = "2025-11-24T23:25:39.461Z" }, + { url = "https://files.pythonhosted.org/packages/e6/35/c27719ae0536c5b6e61e4701391ffe435ef59539e9360959240d6e47c8c8/asyncpg-0.31.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c0807be46c32c963ae40d329b3a686356e417f674c976c07fa49f1b30303f109", size = 2972640, upload-time = "2025-11-24T23:25:41.512Z" }, + { url = "https://files.pythonhosted.org/packages/43/f4/01ebb9207f29e645a64699b9ce0eefeff8e7a33494e1d29bb53736f7766b/asyncpg-0.31.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e5d5098f63beeae93512ee513d4c0c53dc12e9aa2b7a1af5a81cddf93fe4e4da", size = 2851050, upload-time = "2025-11-24T23:25:43.153Z" }, + { url = "https://files.pythonhosted.org/packages/3e/f4/03ff1426acc87be0f4e8d40fa2bff5c3952bef0080062af9efc2212e3be8/asyncpg-0.31.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37fc6c00a814e18eef51833545d1891cac9aa69140598bb076b4cd29b3e010b9", size = 2962574, upload-time = "2025-11-24T23:25:44.942Z" }, + { url = "https://files.pythonhosted.org/packages/c7/39/cc788dfca3d4060f9d93e67be396ceec458dfc429e26139059e58c2c244d/asyncpg-0.31.0-cp311-cp311-win32.whl", hash = "sha256:5a4af56edf82a701aece93190cc4e094d2df7d33f6e915c222fb09efbb5afc24", size = 521076, upload-time = "2025-11-24T23:25:46.486Z" }, + { url = "https://files.pythonhosted.org/packages/28/fc/735af5384c029eb7f1ca60ccb8fa95521dbdaeef788edf4cecfc604c3cab/asyncpg-0.31.0-cp311-cp311-win_amd64.whl", hash = "sha256:480c4befbdf079c14c9ca43c8c5e1fe8b6296c96f1f927158d4f1e750aacc047", size = 584980, upload-time = "2025-11-24T23:25:47.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/a6/59d0a146e61d20e18db7396583242e32e0f120693b67a8de43f1557033e2/asyncpg-0.31.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b44c31e1efc1c15188ef183f287c728e2046abb1d26af4d20858215d50d91fad", size = 662042, upload-time = "2025-11-24T23:25:49.578Z" }, + { url = "https://files.pythonhosted.org/packages/36/01/ffaa189dcb63a2471720615e60185c3f6327716fdc0fc04334436fbb7c65/asyncpg-0.31.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0c89ccf741c067614c9b5fc7f1fc6f3b61ab05ae4aaa966e6fd6b93097c7d20d", size = 638504, upload-time = "2025-11-24T23:25:51.501Z" }, + { url = "https://files.pythonhosted.org/packages/9f/62/3f699ba45d8bd24c5d65392190d19656d74ff0185f42e19d0bbd973bb371/asyncpg-0.31.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:12b3b2e39dc5470abd5e98c8d3373e4b1d1234d9fbdedf538798b2c13c64460a", size = 3426241, upload-time = "2025-11-24T23:25:53.278Z" }, + { url = "https://files.pythonhosted.org/packages/8c/d1/a867c2150f9c6e7af6462637f613ba67f78a314b00db220cd26ff559d532/asyncpg-0.31.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:aad7a33913fb8bcb5454313377cc330fbb19a0cd5faa7272407d8a0c4257b671", size = 3520321, upload-time = "2025-11-24T23:25:54.982Z" }, + { url = "https://files.pythonhosted.org/packages/7a/1a/cce4c3f246805ecd285a3591222a2611141f1669d002163abef999b60f98/asyncpg-0.31.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3df118d94f46d85b2e434fd62c84cb66d5834d5a890725fe625f498e72e4d5ec", size = 3316685, upload-time = "2025-11-24T23:25:57.43Z" }, + { url = "https://files.pythonhosted.org/packages/40/ae/0fc961179e78cc579e138fad6eb580448ecae64908f95b8cb8ee2f241f67/asyncpg-0.31.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bd5b6efff3c17c3202d4b37189969acf8927438a238c6257f66be3c426beba20", size = 3471858, upload-time = "2025-11-24T23:25:59.636Z" }, + { url = "https://files.pythonhosted.org/packages/52/b2/b20e09670be031afa4cbfabd645caece7f85ec62d69c312239de568e058e/asyncpg-0.31.0-cp312-cp312-win32.whl", hash = "sha256:027eaa61361ec735926566f995d959ade4796f6a49d3bde17e5134b9964f9ba8", size = 527852, upload-time = "2025-11-24T23:26:01.084Z" }, + { url = "https://files.pythonhosted.org/packages/b5/f0/f2ed1de154e15b107dc692262395b3c17fc34eafe2a78fc2115931561730/asyncpg-0.31.0-cp312-cp312-win_amd64.whl", hash = "sha256:72d6bdcbc93d608a1158f17932de2321f68b1a967a13e014998db87a72ed3186", size = 597175, upload-time = "2025-11-24T23:26:02.564Z" }, + { url = "https://files.pythonhosted.org/packages/95/11/97b5c2af72a5d0b9bc3fa30cd4b9ce22284a9a943a150fdc768763caf035/asyncpg-0.31.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c204fab1b91e08b0f47e90a75d1b3c62174dab21f670ad6c5d0f243a228f015b", size = 661111, upload-time = "2025-11-24T23:26:04.467Z" }, + { url = "https://files.pythonhosted.org/packages/1b/71/157d611c791a5e2d0423f09f027bd499935f0906e0c2a416ce712ba51ef3/asyncpg-0.31.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:54a64f91839ba59008eccf7aad2e93d6e3de688d796f35803235ea1c4898ae1e", size = 636928, upload-time = "2025-11-24T23:26:05.944Z" }, + { url = "https://files.pythonhosted.org/packages/2e/fc/9e3486fb2bbe69d4a867c0b76d68542650a7ff1574ca40e84c3111bb0c6e/asyncpg-0.31.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0e0822b1038dc7253b337b0f3f676cadc4ac31b126c5d42691c39691962e403", size = 3424067, upload-time = "2025-11-24T23:26:07.957Z" }, + { url = "https://files.pythonhosted.org/packages/12/c6/8c9d076f73f07f995013c791e018a1cd5f31823c2a3187fc8581706aa00f/asyncpg-0.31.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bef056aa502ee34204c161c72ca1f3c274917596877f825968368b2c33f585f4", size = 3518156, upload-time = "2025-11-24T23:26:09.591Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3b/60683a0baf50fbc546499cfb53132cb6835b92b529a05f6a81471ab60d0c/asyncpg-0.31.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0bfbcc5b7ffcd9b75ab1558f00db2ae07db9c80637ad1b2469c43df79d7a5ae2", size = 3319636, upload-time = "2025-11-24T23:26:11.168Z" }, + { url = "https://files.pythonhosted.org/packages/50/dc/8487df0f69bd398a61e1792b3cba0e47477f214eff085ba0efa7eac9ce87/asyncpg-0.31.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:22bc525ebbdc24d1261ecbf6f504998244d4e3be1721784b5f64664d61fbe602", size = 3472079, upload-time = "2025-11-24T23:26:13.164Z" }, + { url = "https://files.pythonhosted.org/packages/13/a1/c5bbeeb8531c05c89135cb8b28575ac2fac618bcb60119ee9696c3faf71c/asyncpg-0.31.0-cp313-cp313-win32.whl", hash = "sha256:f890de5e1e4f7e14023619399a471ce4b71f5418cd67a51853b9910fdfa73696", size = 527606, upload-time = "2025-11-24T23:26:14.78Z" }, + { url = "https://files.pythonhosted.org/packages/91/66/b25ccb84a246b470eb943b0107c07edcae51804912b824054b3413995a10/asyncpg-0.31.0-cp313-cp313-win_amd64.whl", hash = "sha256:dc5f2fa9916f292e5c5c8b2ac2813763bcd7f58e130055b4ad8a0531314201ab", size = 596569, upload-time = "2025-11-24T23:26:16.189Z" }, + { url = "https://files.pythonhosted.org/packages/3c/36/e9450d62e84a13aea6580c83a47a437f26c7ca6fa0f0fd40b6670793ea30/asyncpg-0.31.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:f6b56b91bb0ffc328c4e3ed113136cddd9deefdf5f79ab448598b9772831df44", size = 660867, upload-time = "2025-11-24T23:26:17.631Z" }, + { url = "https://files.pythonhosted.org/packages/82/4b/1d0a2b33b3102d210439338e1beea616a6122267c0df459ff0265cd5807a/asyncpg-0.31.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:334dec28cf20d7f5bb9e45b39546ddf247f8042a690bff9b9573d00086e69cb5", size = 638349, upload-time = "2025-11-24T23:26:19.689Z" }, + { url = "https://files.pythonhosted.org/packages/41/aa/e7f7ac9a7974f08eff9183e392b2d62516f90412686532d27e196c0f0eeb/asyncpg-0.31.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98cc158c53f46de7bb677fd20c417e264fc02b36d901cc2a43bd6cb0dc6dbfd2", size = 3410428, upload-time = "2025-11-24T23:26:21.275Z" }, + { url = "https://files.pythonhosted.org/packages/6f/de/bf1b60de3dede5c2731e6788617a512bc0ebd9693eac297ee74086f101d7/asyncpg-0.31.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9322b563e2661a52e3cdbc93eed3be7748b289f792e0011cb2720d278b366ce2", size = 3471678, upload-time = "2025-11-24T23:26:23.627Z" }, + { url = "https://files.pythonhosted.org/packages/46/78/fc3ade003e22d8bd53aaf8f75f4be48f0b460fa73738f0391b9c856a9147/asyncpg-0.31.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19857a358fc811d82227449b7ca40afb46e75b33eb8897240c3839dd8b744218", size = 3313505, upload-time = "2025-11-24T23:26:25.235Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e9/73eb8a6789e927816f4705291be21f2225687bfa97321e40cd23055e903a/asyncpg-0.31.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ba5f8886e850882ff2c2ace5732300e99193823e8107e2c53ef01c1ebfa1e85d", size = 3434744, upload-time = "2025-11-24T23:26:26.944Z" }, + { url = "https://files.pythonhosted.org/packages/08/4b/f10b880534413c65c5b5862f79b8e81553a8f364e5238832ad4c0af71b7f/asyncpg-0.31.0-cp314-cp314-win32.whl", hash = "sha256:cea3a0b2a14f95834cee29432e4ddc399b95700eb1d51bbc5bfee8f31fa07b2b", size = 532251, upload-time = "2025-11-24T23:26:28.404Z" }, + { url = "https://files.pythonhosted.org/packages/d3/2d/7aa40750b7a19efa5d66e67fc06008ca0f27ba1bd082e457ad82f59aba49/asyncpg-0.31.0-cp314-cp314-win_amd64.whl", hash = "sha256:04d19392716af6b029411a0264d92093b6e5e8285ae97a39957b9a9c14ea72be", size = 604901, upload-time = "2025-11-24T23:26:30.34Z" }, + { url = "https://files.pythonhosted.org/packages/ce/fe/b9dfe349b83b9dee28cc42360d2c86b2cdce4cb551a2c2d27e156bcac84d/asyncpg-0.31.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:bdb957706da132e982cc6856bb2f7b740603472b54c3ebc77fe60ea3e57e1bd2", size = 702280, upload-time = "2025-11-24T23:26:32Z" }, + { url = "https://files.pythonhosted.org/packages/6a/81/e6be6e37e560bd91e6c23ea8a6138a04fd057b08cf63d3c5055c98e81c1d/asyncpg-0.31.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6d11b198111a72f47154fa03b85799f9be63701e068b43f84ac25da0bda9cb31", size = 682931, upload-time = "2025-11-24T23:26:33.572Z" }, + { url = "https://files.pythonhosted.org/packages/a6/45/6009040da85a1648dd5bc75b3b0a062081c483e75a1a29041ae63a0bf0dc/asyncpg-0.31.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:18c83b03bc0d1b23e6230f5bf8d4f217dc9bc08644ce0502a9d91dc9e634a9c7", size = 3581608, upload-time = "2025-11-24T23:26:35.638Z" }, + { url = "https://files.pythonhosted.org/packages/7e/06/2e3d4d7608b0b2b3adbee0d0bd6a2d29ca0fc4d8a78f8277df04e2d1fd7b/asyncpg-0.31.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e009abc333464ff18b8f6fd146addffd9aaf63e79aa3bb40ab7a4c332d0c5e9e", size = 3498738, upload-time = "2025-11-24T23:26:37.275Z" }, + { url = "https://files.pythonhosted.org/packages/7d/aa/7d75ede780033141c51d83577ea23236ba7d3a23593929b32b49db8ed36e/asyncpg-0.31.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3b1fbcb0e396a5ca435a8826a87e5c2c2cc0c8c68eb6fadf82168056b0e53a8c", size = 3401026, upload-time = "2025-11-24T23:26:39.423Z" }, + { url = "https://files.pythonhosted.org/packages/ba/7a/15e37d45e7f7c94facc1e9148c0e455e8f33c08f0b8a0b1deb2c5171771b/asyncpg-0.31.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8df714dba348efcc162d2adf02d213e5fab1bd9f557e1305633e851a61814a7a", size = 3429426, upload-time = "2025-11-24T23:26:41.032Z" }, + { url = "https://files.pythonhosted.org/packages/13/d5/71437c5f6ae5f307828710efbe62163974e71237d5d46ebd2869ea052d10/asyncpg-0.31.0-cp314-cp314t-win32.whl", hash = "sha256:1b41f1afb1033f2b44f3234993b15096ddc9cd71b21a42dbd87fc6a57b43d65d", size = 614495, upload-time = "2025-11-24T23:26:42.659Z" }, + { url = "https://files.pythonhosted.org/packages/3c/d7/8fb3044eaef08a310acfe23dae9a8e2e07d305edc29a53497e52bc76eca7/asyncpg-0.31.0-cp314-cp314t-win_amd64.whl", hash = "sha256:bd4107bb7cdd0e9e65fae66a62afd3a249663b844fa34d479f6d5b3bef9c04c3", size = 706062, upload-time = "2025-11-24T23:26:44.086Z" }, +] + [[package]] name = "atroposlib" version = "0.4.0" @@ -1672,6 +1729,8 @@ acp = [ all = [ { name = "agent-client-protocol" }, { name = "aiohttp" }, + { name = "aiosqlite", marker = "sys_platform == 'linux'" }, + { name = "asyncpg", marker = "sys_platform == 'linux'" }, { name = "croniter" }, { name = "daytona" }, { name = "debugpy" }, @@ -1727,6 +1786,8 @@ honcho = [ { name = "honcho-ai" }, ] matrix = [ + { name = "aiosqlite" }, + { name = "asyncpg" }, { name = "markdown" }, { name = "mautrix", extra = ["encryption"] }, ] @@ -1791,7 +1852,9 @@ requires-dist = [ { name = "aiohttp", marker = "extra == 'homeassistant'", specifier = ">=3.9.0,<4" }, { name = "aiohttp", marker = "extra == 'messaging'", specifier = ">=3.13.3,<4" }, { name = "aiohttp", marker = "extra == 'sms'", specifier = ">=3.9.0,<4" }, + { name = "aiosqlite", marker = "extra == 'matrix'", specifier = ">=0.20" }, { name = "anthropic", specifier = ">=0.39.0,<1" }, + { name = "asyncpg", marker = "extra == 'matrix'", specifier = ">=0.29" }, { name = "atroposlib", marker = "extra == 'rl'", git = "https://github.com/NousResearch/atropos.git" }, { name = "croniter", marker = "extra == 'cron'", specifier = ">=6.0.0,<7" }, { name = "daytona", marker = "extra == 'daytona'", specifier = ">=0.148.0,<1" }, diff --git a/web/README.md b/web/README.md new file mode 100644 index 000000000..d8127f96e --- /dev/null +++ b/web/README.md @@ -0,0 +1,48 @@ +# Hermes Agent โ€” Web UI + +Browser-based dashboard for managing Hermes Agent configuration, API keys, and monitoring active sessions. + +## Stack + +- **Vite** + **React 19** + **TypeScript** +- **Tailwind CSS v4** with custom dark theme +- **shadcn/ui**-style components (hand-rolled, no CLI dependency) + +## Development + +```bash +# Start the backend API server +cd ../ +python -m hermes_cli.main web --no-open + +# In another terminal, start the Vite dev server (with HMR + API proxy) +cd web/ +npm run dev +``` + +The Vite dev server proxies `/api` requests to `http://127.0.0.1:9119` (the FastAPI backend). + +## Build + +```bash +npm run build +``` + +This outputs to `../hermes_cli/web_dist/`, which the FastAPI server serves as a static SPA. The built assets are included in the Python package via `pyproject.toml` package-data. + +## Structure + +``` +src/ +โ”œโ”€โ”€ components/ui/ # Reusable UI primitives (Card, Badge, Button, Input, etc.) +โ”œโ”€โ”€ lib/ +โ”‚ โ”œโ”€โ”€ api.ts # API client โ€” typed fetch wrappers for all backend endpoints +โ”‚ โ””โ”€โ”€ utils.ts # cn() helper for Tailwind class merging +โ”œโ”€โ”€ pages/ +โ”‚ โ”œโ”€โ”€ StatusPage # Agent status, active/recent sessions +โ”‚ โ”œโ”€โ”€ ConfigPage # Dynamic config editor (reads schema from backend) +โ”‚ โ””โ”€โ”€ EnvPage # API key management with save/clear +โ”œโ”€โ”€ App.tsx # Main layout and navigation +โ”œโ”€โ”€ main.tsx # React entry point +โ””โ”€โ”€ index.css # Tailwind imports and theme variables +``` diff --git a/web/eslint.config.js b/web/eslint.config.js new file mode 100644 index 000000000..5e6b472f5 --- /dev/null +++ b/web/eslint.config.js @@ -0,0 +1,23 @@ +import js from '@eslint/js' +import globals from 'globals' +import reactHooks from 'eslint-plugin-react-hooks' +import reactRefresh from 'eslint-plugin-react-refresh' +import tseslint from 'typescript-eslint' +import { defineConfig, globalIgnores } from 'eslint/config' + +export default defineConfig([ + globalIgnores(['dist']), + { + files: ['**/*.{ts,tsx}'], + extends: [ + js.configs.recommended, + tseslint.configs.recommended, + reactHooks.configs.flat.recommended, + reactRefresh.configs.vite, + ], + languageOptions: { + ecmaVersion: 2020, + globals: globals.browser, + }, + }, +]) diff --git a/web/index.html b/web/index.html new file mode 100644 index 000000000..c9f0d18e1 --- /dev/null +++ b/web/index.html @@ -0,0 +1,13 @@ + + + + + + + Hermes Agent + + +
+ + + diff --git a/web/package-lock.json b/web/package-lock.json new file mode 100644 index 000000000..d9aa7a951 --- /dev/null +++ b/web/package-lock.json @@ -0,0 +1,3835 @@ +{ + "name": "web", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "web", + "version": "0.0.0", + "dependencies": { + "@tailwindcss/vite": "^4.2.1", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "lucide-react": "^0.577.0", + "react": "^19.2.4", + "react-dom": "^19.2.4", + "tailwind-merge": "^3.5.0", + "tailwindcss": "^4.2.1" + }, + "devDependencies": { + "@eslint/js": "^9.39.4", + "@types/node": "^24.12.0", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^5.2.0", + "eslint": "^9.39.4", + "eslint-plugin-react-hooks": "^7.0.1", + "eslint-plugin-react-refresh": "^0.5.2", + "globals": "^17.4.0", + "typescript": "~5.9.3", + "typescript-eslint": "^8.56.1", + "vite": "^7.3.1" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.4.tgz", + "integrity": "sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.4.tgz", + "integrity": "sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.4.tgz", + "integrity": "sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.4.tgz", + "integrity": "sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.4.tgz", + "integrity": "sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.4.tgz", + "integrity": "sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.4.tgz", + "integrity": "sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.4.tgz", + "integrity": "sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.4.tgz", + "integrity": "sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.4.tgz", + "integrity": "sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.4.tgz", + "integrity": "sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.4.tgz", + "integrity": "sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.4.tgz", + "integrity": "sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.4.tgz", + "integrity": "sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.4.tgz", + "integrity": "sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.4.tgz", + "integrity": "sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.4.tgz", + "integrity": "sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.4.tgz", + "integrity": "sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.4.tgz", + "integrity": "sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.4.tgz", + "integrity": "sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.4.tgz", + "integrity": "sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.4.tgz", + "integrity": "sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.4.tgz", + "integrity": "sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.4.tgz", + "integrity": "sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.4.tgz", + "integrity": "sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.4.tgz", + "integrity": "sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.2", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.2.tgz", + "integrity": "sha512-nJl2KGTlrf9GjLimgIru+V/mzgSK0ABCDQRvxw5BjURL7WfH5uoWmizbH7QB6MmnMBd8cIC9uceWnezL1VZWWw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.5" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.5.tgz", + "integrity": "sha512-4IlJx0X0qftVsN5E+/vGujTRIFtwuLbNsVUe7TO6zYPDR1O6nFwvwhIKEKSrl6dZchmYBITazxKoUYOjdtjlRg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.14.0", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.5", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.4", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.4.tgz", + "integrity": "sha512-nE7DEIchvtiFTwBw4Lfbu59PG+kCofhjsKaCWzxTpt4lfRjRMqG6uMBzKXuEcyXhOHoUp9riAm7/aWYGhXZ9cw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.3", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.3.tgz", + "integrity": "sha512-eybk3TjzzzV97Dlj5c+XrBFW57eTNhzod66y9HrBlzJ6NsCrWCp/2kaPS3K9wJmurBC0Tdw4yPjXKZqlznim3Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz", + "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz", + "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz", + "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz", + "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz", + "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz", + "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz", + "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz", + "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz", + "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz", + "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz", + "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz", + "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz", + "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz", + "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz", + "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz", + "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz", + "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz", + "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz", + "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz", + "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz", + "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz", + "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz", + "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz", + "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz", + "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@tailwindcss/node": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.2.1.tgz", + "integrity": "sha512-jlx6sLk4EOwO6hHe1oCGm1Q4AN/s0rSrTTPBGPM0/RQ6Uylwq17FuU8IeJJKEjtc6K6O07zsvP+gDO6MMWo7pg==", + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.5", + "enhanced-resolve": "^5.19.0", + "jiti": "^2.6.1", + "lightningcss": "1.31.1", + "magic-string": "^0.30.21", + "source-map-js": "^1.2.1", + "tailwindcss": "4.2.1" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.2.1.tgz", + "integrity": "sha512-yv9jeEFWnjKCI6/T3Oq50yQEOqmpmpfzG1hcZsAOaXFQPfzWprWrlHSdGPEF3WQTi8zu8ohC9Mh9J470nT5pUw==", + "license": "MIT", + "engines": { + "node": ">= 20" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.2.1", + "@tailwindcss/oxide-darwin-arm64": "4.2.1", + "@tailwindcss/oxide-darwin-x64": "4.2.1", + "@tailwindcss/oxide-freebsd-x64": "4.2.1", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.1", + "@tailwindcss/oxide-linux-arm64-gnu": "4.2.1", + "@tailwindcss/oxide-linux-arm64-musl": "4.2.1", + "@tailwindcss/oxide-linux-x64-gnu": "4.2.1", + "@tailwindcss/oxide-linux-x64-musl": "4.2.1", + "@tailwindcss/oxide-wasm32-wasi": "4.2.1", + "@tailwindcss/oxide-win32-arm64-msvc": "4.2.1", + "@tailwindcss/oxide-win32-x64-msvc": "4.2.1" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.2.1.tgz", + "integrity": "sha512-eZ7G1Zm5EC8OOKaesIKuw77jw++QJ2lL9N+dDpdQiAB/c/B2wDh0QPFHbkBVrXnwNugvrbJFk1gK2SsVjwWReg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.2.1.tgz", + "integrity": "sha512-q/LHkOstoJ7pI1J0q6djesLzRvQSIfEto148ppAd+BVQK0JYjQIFSK3JgYZJa+Yzi0DDa52ZsQx2rqytBnf8Hw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.2.1.tgz", + "integrity": "sha512-/f/ozlaXGY6QLbpvd/kFTro2l18f7dHKpB+ieXz+Cijl4Mt9AI2rTrpq7V+t04nK+j9XBQHnSMdeQRhbGyt6fw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.2.1.tgz", + "integrity": "sha512-5e/AkgYJT/cpbkys/OU2Ei2jdETCLlifwm7ogMC7/hksI2fC3iiq6OcXwjibcIjPung0kRtR3TxEITkqgn0TcA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.2.1.tgz", + "integrity": "sha512-Uny1EcVTTmerCKt/1ZuKTkb0x8ZaiuYucg2/kImO5A5Y/kBz41/+j0gxUZl+hTF3xkWpDmHX+TaWhOtba2Fyuw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.2.1.tgz", + "integrity": "sha512-CTrwomI+c7n6aSSQlsPL0roRiNMDQ/YzMD9EjcR+H4f0I1SQ8QqIuPnsVp7QgMkC1Qi8rtkekLkOFjo7OlEFRQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.2.1.tgz", + "integrity": "sha512-WZA0CHRL/SP1TRbA5mp9htsppSEkWuQ4KsSUumYQnyl8ZdT39ntwqmz4IUHGN6p4XdSlYfJwM4rRzZLShHsGAQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.2.1.tgz", + "integrity": "sha512-qMFzxI2YlBOLW5PhblzuSWlWfwLHaneBE0xHzLrBgNtqN6mWfs+qYbhryGSXQjFYB1Dzf5w+LN5qbUTPhW7Y5g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.2.1.tgz", + "integrity": "sha512-5r1X2FKnCMUPlXTWRYpHdPYUY6a1Ar/t7P24OuiEdEOmms5lyqjDRvVY1yy9Rmioh+AunQ0rWiOTPE8F9A3v5g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.2.1.tgz", + "integrity": "sha512-MGFB5cVPvshR85MTJkEvqDUnuNoysrsRxd6vnk1Lf2tbiqNlXpHYZqkqOQalydienEWOHHFyyuTSYRsLfxFJ2Q==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.8.1", + "@emnapi/runtime": "^1.8.1", + "@emnapi/wasi-threads": "^1.1.0", + "@napi-rs/wasm-runtime": "^1.1.1", + "@tybys/wasm-util": "^0.10.1", + "tslib": "^2.8.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.1.tgz", + "integrity": "sha512-YlUEHRHBGnCMh4Nj4GnqQyBtsshUPdiNroZj8VPkvTZSoHsilRCwXcVKnG9kyi0ZFAS/3u+qKHBdDc81SADTRA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.2.1.tgz", + "integrity": "sha512-rbO34G5sMWWyrN/idLeVxAZgAKWrn5LiR3/I90Q9MkA67s6T1oB0xtTe+0heoBvHSpbU9Mk7i6uwJnpo4u21XQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/vite": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.2.1.tgz", + "integrity": "sha512-TBf2sJjYeb28jD2U/OhwdW0bbOsxkWPwQ7SrqGf9sVcoYwZj7rkXljroBO9wKBut9XnmQLXanuDUeqQK0lGg/w==", + "license": "MIT", + "dependencies": { + "@tailwindcss/node": "4.2.1", + "@tailwindcss/oxide": "4.2.1", + "tailwindcss": "4.2.1" + }, + "peerDependencies": { + "vite": "^5.2.0 || ^6 || ^7" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "24.12.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.12.0.tgz", + "integrity": "sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@types/react": { + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", + "dev": true, + "license": "MIT", + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.57.0.tgz", + "integrity": "sha512-qeu4rTHR3/IaFORbD16gmjq9+rEs9fGKdX0kF6BKSfi+gCuG3RCKLlSBYzn/bGsY9Tj7KE/DAQStbp8AHJGHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.12.2", + "@typescript-eslint/scope-manager": "8.57.0", + "@typescript-eslint/type-utils": "8.57.0", + "@typescript-eslint/utils": "8.57.0", + "@typescript-eslint/visitor-keys": "8.57.0", + "ignore": "^7.0.5", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.57.0", + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.57.0.tgz", + "integrity": "sha512-XZzOmihLIr8AD1b9hL9ccNMzEMWt/dE2u7NyTY9jJG6YNiNthaD5XtUHVF2uCXZ15ng+z2hT3MVuxnUYhq6k1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.57.0", + "@typescript-eslint/types": "8.57.0", + "@typescript-eslint/typescript-estree": "8.57.0", + "@typescript-eslint/visitor-keys": "8.57.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.57.0.tgz", + "integrity": "sha512-pR+dK0BlxCLxtWfaKQWtYr7MhKmzqZxuii+ZjuFlZlIGRZm22HnXFqa2eY+90MUz8/i80YJmzFGDUsi8dMOV5w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.57.0", + "@typescript-eslint/types": "^8.57.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.57.0.tgz", + "integrity": "sha512-nvExQqAHF01lUM66MskSaZulpPL5pgy5hI5RfrxviLgzZVffB5yYzw27uK/ft8QnKXI2X0LBrHJFr1TaZtAibw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.57.0", + "@typescript-eslint/visitor-keys": "8.57.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.57.0.tgz", + "integrity": "sha512-LtXRihc5ytjJIQEH+xqjB0+YgsV4/tW35XKX3GTZHpWtcC8SPkT/d4tqdf1cKtesryHm2bgp6l555NYcT2NLvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.57.0.tgz", + "integrity": "sha512-yjgh7gmDcJ1+TcEg8x3uWQmn8ifvSupnPfjP21twPKrDP/pTHlEQgmKcitzF/rzPSmv7QjJ90vRpN4U+zoUjwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.57.0", + "@typescript-eslint/typescript-estree": "8.57.0", + "@typescript-eslint/utils": "8.57.0", + "debug": "^4.4.3", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.57.0.tgz", + "integrity": "sha512-dTLI8PEXhjUC7B9Kre+u0XznO696BhXcTlOn0/6kf1fHaQW8+VjJAVHJ3eTI14ZapTxdkOmc80HblPQLaEeJdg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.57.0.tgz", + "integrity": "sha512-m7faHcyVg0BT3VdYTlX8GdJEM7COexXxS6KqGopxdtkQRvBanK377QDHr4W/vIPAR+ah9+B/RclSW5ldVniO1Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.57.0", + "@typescript-eslint/tsconfig-utils": "8.57.0", + "@typescript-eslint/types": "8.57.0", + "@typescript-eslint/visitor-keys": "8.57.0", + "debug": "^4.4.3", + "minimatch": "^10.2.2", + "semver": "^7.7.3", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.4.tgz", + "integrity": "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "10.2.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.4.tgz", + "integrity": "sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.57.0.tgz", + "integrity": "sha512-5iIHvpD3CZe06riAsbNxxreP+MuYgVUsV0n4bwLH//VJmgtt54sQeY2GszntJ4BjYCpMzrfVh2SBnUQTtys2lQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.9.1", + "@typescript-eslint/scope-manager": "8.57.0", + "@typescript-eslint/types": "8.57.0", + "@typescript-eslint/typescript-estree": "8.57.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.57.0.tgz", + "integrity": "sha512-zm6xx8UT/Xy2oSr2ZXD0pZo7Jx2XsCoID2IUh9YSTFRu7z+WdwYTRk6LhUftm1crwqbuoF6I8zAFeCMw0YjwDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.57.0", + "eslint-visitor-keys": "^5.0.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-5.0.1.tgz", + "integrity": "sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.2.0.tgz", + "integrity": "sha512-YmKkfhOAi3wsB1PhJq5Scj3GXMn3WvtQ/JC0xoopuHoXSdmtdStOpFrYaT1kie2YgFBcIe64ROzMYRjCrYOdYw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.29.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-rc.3", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.18.0" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", + "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.7", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.7.tgz", + "integrity": "sha512-1ghYO3HnxGec0TCGBXiDLVns4eCSx4zJpxnHrlqFQajmhfKMQBzUGDdkMK7fUW7PTHTeLf+j87aTuKuuwWzMGw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001778", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001778.tgz", + "integrity": "sha512-PN7uxFL+ExFJO61aVmP1aIEG4i9whQd4eoSCebav62UwDyp5OHh06zN4jqKSMePVgxHifCw1QJxdRkA1Pisekg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.313", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.313.tgz", + "integrity": "sha512-QBMrTWEf00GXZmJyx2lbYD45jpI3TUFnNIzJ5BBc8piGUDwMPa1GV6HJWTZVvY/eiN3fSopl7NRbgGp9sZ9LTA==", + "dev": true, + "license": "ISC" + }, + "node_modules/enhanced-resolve": { + "version": "5.20.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.20.0.tgz", + "integrity": "sha512-/ce7+jQ1PQ6rVXwe+jKEg5hW5ciicHwIQUagZkp6IufBoY3YDgdTTY1azVs0qoRgVmvsNB+rbjLJxDAeHHtwsQ==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.3.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/esbuild": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.4.tgz", + "integrity": "sha512-Rq4vbHnYkK5fws5NF7MYTU68FPRE1ajX7heQ/8QXXWqNgqqJ/GkmmyxIzUnf2Sr/bakf8l54716CcMGHYhMrrQ==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.4", + "@esbuild/android-arm": "0.27.4", + "@esbuild/android-arm64": "0.27.4", + "@esbuild/android-x64": "0.27.4", + "@esbuild/darwin-arm64": "0.27.4", + "@esbuild/darwin-x64": "0.27.4", + "@esbuild/freebsd-arm64": "0.27.4", + "@esbuild/freebsd-x64": "0.27.4", + "@esbuild/linux-arm": "0.27.4", + "@esbuild/linux-arm64": "0.27.4", + "@esbuild/linux-ia32": "0.27.4", + "@esbuild/linux-loong64": "0.27.4", + "@esbuild/linux-mips64el": "0.27.4", + "@esbuild/linux-ppc64": "0.27.4", + "@esbuild/linux-riscv64": "0.27.4", + "@esbuild/linux-s390x": "0.27.4", + "@esbuild/linux-x64": "0.27.4", + "@esbuild/netbsd-arm64": "0.27.4", + "@esbuild/netbsd-x64": "0.27.4", + "@esbuild/openbsd-arm64": "0.27.4", + "@esbuild/openbsd-x64": "0.27.4", + "@esbuild/openharmony-arm64": "0.27.4", + "@esbuild/sunos-x64": "0.27.4", + "@esbuild/win32-arm64": "0.27.4", + "@esbuild/win32-ia32": "0.27.4", + "@esbuild/win32-x64": "0.27.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.4", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.4.tgz", + "integrity": "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.2", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.5", + "@eslint/js": "9.39.4", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.14.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.5", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz", + "integrity": "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.24.4", + "@babel/parser": "^7.24.4", + "hermes-parser": "^0.25.1", + "zod": "^3.25.0 || ^4.0.0", + "zod-validation-error": "^3.5.0 || ^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.5.2.tgz", + "integrity": "sha512-hmgTH57GfzoTFjVN0yBwTggnsVUF2tcqi7RJZHqi9lIezSs4eFyAMktA68YD4r5kNw1mxyY4dmkyoFDb3FIqrA==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "eslint": "^9 || ^10" + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.1.tgz", + "integrity": "sha512-IxfVbRFVlV8V/yRaGzk0UVIcsKKHMSfYw66T/u4nTwlWteQePsxe//LjudR1AMX4tZW3WFCh3Zqa/sjlqpbURQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "17.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-17.4.0.tgz", + "integrity": "sha512-hjrNztw/VajQwOLsMNT1cbJiH2muO3OROCHnbehc8eY5JyD2gqz4AcMHPqgaOR59DjgUjYAYLeH699g/eWi2jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/hermes-estree": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz", + "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", + "dev": true, + "license": "MIT" + }, + "node_modules/hermes-parser": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz", + "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "hermes-estree": "0.25.1" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jiti": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lightningcss": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.31.1.tgz", + "integrity": "sha512-l51N2r93WmGUye3WuFoN5k10zyvrVs0qfKBhyC5ogUQ6Ew6JUSswh78mbSO+IU3nTWsyOArqPCcShdQSadghBQ==", + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.31.1", + "lightningcss-darwin-arm64": "1.31.1", + "lightningcss-darwin-x64": "1.31.1", + "lightningcss-freebsd-x64": "1.31.1", + "lightningcss-linux-arm-gnueabihf": "1.31.1", + "lightningcss-linux-arm64-gnu": "1.31.1", + "lightningcss-linux-arm64-musl": "1.31.1", + "lightningcss-linux-x64-gnu": "1.31.1", + "lightningcss-linux-x64-musl": "1.31.1", + "lightningcss-win32-arm64-msvc": "1.31.1", + "lightningcss-win32-x64-msvc": "1.31.1" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-android-arm64": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.31.1.tgz", + "integrity": "sha512-HXJF3x8w9nQ4jbXRiNppBCqeZPIAfUo8zE/kOEGbW5NZvGc/K7nMxbhIr+YlFlHW5mpbg/YFPdbnCh1wAXCKFg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-darwin-arm64": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.31.1.tgz", + "integrity": "sha512-02uTEqf3vIfNMq3h/z2cJfcOXnQ0GRwQrkmPafhueLb2h7mqEidiCzkE4gBMEH65abHRiQvhdcQ+aP0D0g67sg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-darwin-x64": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.31.1.tgz", + "integrity": "sha512-1ObhyoCY+tGxtsz1lSx5NXCj3nirk0Y0kB/g8B8DT+sSx4G9djitg9ejFnjb3gJNWo7qXH4DIy2SUHvpoFwfTA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-freebsd-x64": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.31.1.tgz", + "integrity": "sha512-1RINmQKAItO6ISxYgPwszQE1BrsVU5aB45ho6O42mu96UiZBxEXsuQ7cJW4zs4CEodPUioj/QrXW1r9pLUM74A==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.31.1.tgz", + "integrity": "sha512-OOCm2//MZJ87CdDK62rZIu+aw9gBv4azMJuA8/KB74wmfS3lnC4yoPHm0uXZ/dvNNHmnZnB8XLAZzObeG0nS1g==", + "cpu": [ + "arm" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.31.1.tgz", + "integrity": "sha512-WKyLWztD71rTnou4xAD5kQT+982wvca7E6QoLpoawZ1gP9JM0GJj4Tp5jMUh9B3AitHbRZ2/H3W5xQmdEOUlLg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-linux-arm64-musl": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.31.1.tgz", + "integrity": "sha512-mVZ7Pg2zIbe3XlNbZJdjs86YViQFoJSpc41CbVmKBPiGmC4YrfeOyz65ms2qpAobVd7WQsbW4PdsSJEMymyIMg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-linux-x64-gnu": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.31.1.tgz", + "integrity": "sha512-xGlFWRMl+0KvUhgySdIaReQdB4FNudfUTARn7q0hh/V67PVGCs3ADFjw+6++kG1RNd0zdGRlEKa+T13/tQjPMA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-linux-x64-musl": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.31.1.tgz", + "integrity": "sha512-eowF8PrKHw9LpoZii5tdZwnBcYDxRw2rRCyvAXLi34iyeYfqCQNA9rmUM0ce62NlPhCvof1+9ivRaTY6pSKDaA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.31.1.tgz", + "integrity": "sha512-aJReEbSEQzx1uBlQizAOBSjcmr9dCdL3XuC/6HLXAxmtErsj2ICo5yYggg1qOODQMtnjNQv2UHb9NpOuFtYe4w==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss/node_modules/lightningcss-win32-x64-msvc": { + "version": "1.31.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.31.1.tgz", + "integrity": "sha512-I9aiFrbd7oYHwlnQDqr1Roz+fTz61oDDJX7n9tYF9FJymH1cIN1DtKw3iYt6b8WZgEjoNwVSncwF4wx/ZedMhw==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.577.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.577.0.tgz", + "integrity": "sha512-4LjoFv2eEPwYDPg/CUdBJQSDfPyzXCRrVW1X7jrx/trgxnxkHFjnVZINbzvzxjN70dxychOfg+FTYwBiS3pQ5A==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/minimatch": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", + "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.36", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.36.tgz", + "integrity": "sha512-TdC8FSgHz8Mwtw9g5L4gR/Sh9XhSP/0DEkQxfEFXOpiul5IiHgHan2VhYYb6agDSfp4KuvltmGApc8HMgUrIkA==", + "dev": true, + "license": "MIT" + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-refresh": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.18.0.tgz", + "integrity": "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/rollup": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", + "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==", + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.59.0", + "@rollup/rollup-android-arm64": "4.59.0", + "@rollup/rollup-darwin-arm64": "4.59.0", + "@rollup/rollup-darwin-x64": "4.59.0", + "@rollup/rollup-freebsd-arm64": "4.59.0", + "@rollup/rollup-freebsd-x64": "4.59.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.59.0", + "@rollup/rollup-linux-arm-musleabihf": "4.59.0", + "@rollup/rollup-linux-arm64-gnu": "4.59.0", + "@rollup/rollup-linux-arm64-musl": "4.59.0", + "@rollup/rollup-linux-loong64-gnu": "4.59.0", + "@rollup/rollup-linux-loong64-musl": "4.59.0", + "@rollup/rollup-linux-ppc64-gnu": "4.59.0", + "@rollup/rollup-linux-ppc64-musl": "4.59.0", + "@rollup/rollup-linux-riscv64-gnu": "4.59.0", + "@rollup/rollup-linux-riscv64-musl": "4.59.0", + "@rollup/rollup-linux-s390x-gnu": "4.59.0", + "@rollup/rollup-linux-x64-gnu": "4.59.0", + "@rollup/rollup-linux-x64-musl": "4.59.0", + "@rollup/rollup-openbsd-x64": "4.59.0", + "@rollup/rollup-openharmony-arm64": "4.59.0", + "@rollup/rollup-win32-arm64-msvc": "4.59.0", + "@rollup/rollup-win32-ia32-msvc": "4.59.0", + "@rollup/rollup-win32-x64-gnu": "4.59.0", + "@rollup/rollup-win32-x64-msvc": "4.59.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tailwind-merge": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.5.0.tgz", + "integrity": "sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.1.tgz", + "integrity": "sha512-/tBrSQ36vCleJkAOsy9kbNTgaxvGbyOamC30PRePTQe/o1MFwEKHQk4Cn7BNGaPtjp+PuUrByJehM1hgxfq4sw==", + "license": "MIT" + }, + "node_modules/tapable": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/ts-api-utils": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz", + "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.57.0.tgz", + "integrity": "sha512-W8GcigEMEeB07xEZol8oJ26rigm3+bfPHxHvwbYUlu1fUDsGuQ7Hiskx5xGW/xM4USc9Ephe3jtv7ZYPQntHeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.57.0", + "@typescript-eslint/parser": "8.57.0", + "@typescript-eslint/typescript-estree": "8.57.0", + "@typescript-eslint/utils": "8.57.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/vite": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "license": "MIT", + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-validation-error": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-4.0.2.tgz", + "integrity": "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "zod": "^3.25.0 || ^4.0.0" + } + } + } +} diff --git a/web/package.json b/web/package.json new file mode 100644 index 000000000..87dbfdb79 --- /dev/null +++ b/web/package.json @@ -0,0 +1,36 @@ +{ + "name": "web", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc -b && vite build", + "lint": "eslint .", + "preview": "vite preview" + }, + "dependencies": { + "@tailwindcss/vite": "^4.2.1", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "lucide-react": "^0.577.0", + "react": "^19.2.4", + "react-dom": "^19.2.4", + "tailwind-merge": "^3.5.0", + "tailwindcss": "^4.2.1" + }, + "devDependencies": { + "@eslint/js": "^9.39.4", + "@types/node": "^24.12.0", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^5.2.0", + "eslint": "^9.39.4", + "eslint-plugin-react-hooks": "^7.0.1", + "eslint-plugin-react-refresh": "^0.5.2", + "globals": "^17.4.0", + "typescript": "~5.9.3", + "typescript-eslint": "^8.56.1", + "vite": "^7.3.1" + } +} diff --git a/web/public/favicon.ico b/web/public/favicon.ico new file mode 100644 index 000000000..7a949324d Binary files /dev/null and b/web/public/favicon.ico differ diff --git a/web/public/fonts/Collapse-Bold.woff2 b/web/public/fonts/Collapse-Bold.woff2 new file mode 100644 index 000000000..262321038 Binary files /dev/null and b/web/public/fonts/Collapse-Bold.woff2 differ diff --git a/web/public/fonts/Collapse-Regular.woff2 b/web/public/fonts/Collapse-Regular.woff2 new file mode 100644 index 000000000..0d2e477cc Binary files /dev/null and b/web/public/fonts/Collapse-Regular.woff2 differ diff --git a/web/public/fonts/CourierPrime-Bold.woff2 b/web/public/fonts/CourierPrime-Bold.woff2 new file mode 100644 index 000000000..4f6d5e9c8 Binary files /dev/null and b/web/public/fonts/CourierPrime-Bold.woff2 differ diff --git a/web/public/fonts/CourierPrime-Regular.woff2 b/web/public/fonts/CourierPrime-Regular.woff2 new file mode 100644 index 000000000..feae1f758 Binary files /dev/null and b/web/public/fonts/CourierPrime-Regular.woff2 differ diff --git a/web/public/fonts/Mondwest-Regular.woff2 b/web/public/fonts/Mondwest-Regular.woff2 new file mode 100644 index 000000000..02a3658cf Binary files /dev/null and b/web/public/fonts/Mondwest-Regular.woff2 differ diff --git a/web/public/fonts/RulesCompressed-Medium.woff2 b/web/public/fonts/RulesCompressed-Medium.woff2 new file mode 100644 index 000000000..1a352536b Binary files /dev/null and b/web/public/fonts/RulesCompressed-Medium.woff2 differ diff --git a/web/public/fonts/RulesCompressed-Regular.woff2 b/web/public/fonts/RulesCompressed-Regular.woff2 new file mode 100644 index 000000000..25dabcc97 Binary files /dev/null and b/web/public/fonts/RulesCompressed-Regular.woff2 differ diff --git a/web/public/fonts/RulesExpanded-Bold.woff2 b/web/public/fonts/RulesExpanded-Bold.woff2 new file mode 100644 index 000000000..d85515dbd Binary files /dev/null and b/web/public/fonts/RulesExpanded-Bold.woff2 differ diff --git a/web/public/fonts/RulesExpanded-Regular.woff2 b/web/public/fonts/RulesExpanded-Regular.woff2 new file mode 100644 index 000000000..41e6a49e8 Binary files /dev/null and b/web/public/fonts/RulesExpanded-Regular.woff2 differ diff --git a/web/src/App.tsx b/web/src/App.tsx new file mode 100644 index 000000000..b2f76808e --- /dev/null +++ b/web/src/App.tsx @@ -0,0 +1,124 @@ +import { useState, useEffect, useRef } from "react"; +import { Activity, BarChart3, Clock, FileText, KeyRound, MessageSquare, Package, Settings } from "lucide-react"; +import StatusPage from "@/pages/StatusPage"; +import ConfigPage from "@/pages/ConfigPage"; +import EnvPage from "@/pages/EnvPage"; +import SessionsPage from "@/pages/SessionsPage"; +import LogsPage from "@/pages/LogsPage"; +import AnalyticsPage from "@/pages/AnalyticsPage"; +import CronPage from "@/pages/CronPage"; +import SkillsPage from "@/pages/SkillsPage"; + +const NAV_ITEMS = [ + { id: "status", label: "Status", icon: Activity }, + { id: "sessions", label: "Sessions", icon: MessageSquare }, + { id: "analytics", label: "Analytics", icon: BarChart3 }, + { id: "logs", label: "Logs", icon: FileText }, + { id: "cron", label: "Cron", icon: Clock }, + { id: "skills", label: "Skills", icon: Package }, + { id: "config", label: "Config", icon: Settings }, + { id: "env", label: "Keys", icon: KeyRound }, +] as const; + +type PageId = (typeof NAV_ITEMS)[number]["id"]; + +const PAGE_COMPONENTS: Record = { + status: StatusPage, + sessions: SessionsPage, + analytics: AnalyticsPage, + logs: LogsPage, + cron: CronPage, + skills: SkillsPage, + config: ConfigPage, + env: EnvPage, +}; + +export default function App() { + const [page, setPage] = useState("status"); + const [animKey, setAnimKey] = useState(0); + const initialRef = useRef(true); + + useEffect(() => { + // Skip the animation key bump on initial mount to avoid re-mounting + // the default page component (which causes duplicate API requests). + if (initialRef.current) { + initialRef.current = false; + return; + } + setAnimKey((k) => k + 1); + }, [page]); + + const PageComponent = PAGE_COMPONENTS[page]; + + return ( +
+ {/* Global grain + warm glow (matches landing page) */} +
+
+ + {/* ---- Header with grid-border nav ---- */} +
+
+ {/* Brand */} +
+ + Hermes
Agent +
+
+ + {/* Nav grid โ€” Mondwest labels like the landing page nav */} + + + {/* Version badge */} +
+ + Web UI + +
+
+
+ +
+ +
+ + {/* ---- Footer ---- */} +
+
+ + Hermes Agent + + + Nous Research + +
+
+
+ ); +} diff --git a/web/src/components/AutoField.tsx b/web/src/components/AutoField.tsx new file mode 100644 index 000000000..67f6739e9 --- /dev/null +++ b/web/src/components/AutoField.tsx @@ -0,0 +1,151 @@ +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { Select } from "@/components/ui/select"; +import { Switch } from "@/components/ui/switch"; + +function FieldHint({ schema, schemaKey }: { schema: Record; schemaKey: string }) { + const keyPath = schemaKey.includes(".") ? schemaKey : ""; + const description = schema.description ? String(schema.description) : ""; + + if (!keyPath && !description) return null; + + return ( +
+ {keyPath && {keyPath}} + {description && {description}} +
+ ); +} + +export function AutoField({ + schemaKey, + schema, + value, + onChange, +}: AutoFieldProps) { + const rawLabel = schemaKey.split(".").pop() ?? schemaKey; + const label = rawLabel.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase()); + + if (schema.type === "boolean") { + return ( +
+
+ + +
+ +
+ ); + } + + if (schema.type === "select") { + const options = (schema.options as string[]) ?? []; + return ( +
+ + + +
+ ); + } + + if (schema.type === "number") { + return ( +
+ + + { + const raw = e.target.value; + if (raw === "") { + onChange(0); + return; + } + const n = Number(raw); + if (!Number.isNaN(n)) { + onChange(n); + } + }} + /> +
+ ); + } + + if (schema.type === "text") { + return ( +
+ + +