diff --git a/.env.example b/.env.example index c8c4af9b3d..a6e98751a3 100644 --- a/.env.example +++ b/.env.example @@ -89,6 +89,15 @@ # Optional base URL override: # HERMES_QWEN_BASE_URL=https://portal.qwen.ai/v1 +# ============================================================================= +# LLM PROVIDER (Xiaomi MiMo) +# ============================================================================= +# Xiaomi MiMo models (mimo-v2-pro, mimo-v2-omni, mimo-v2-flash). +# Get your key at: https://platform.xiaomimimo.com +# XIAOMI_API_KEY=your_key_here +# Optional base URL override: +# XIAOMI_BASE_URL=https://api.xiaomimimo.com/v1 + # ============================================================================= # TOOL API KEYS # ============================================================================= diff --git a/agent/auxiliary_client.py b/agent/auxiliary_client.py index e48f9c2c3e..6b7bf19668 100644 --- a/agent/auxiliary_client.py +++ b/agent/auxiliary_client.py @@ -23,17 +23,13 @@ Resolution order for vision/multimodal tasks (auto mode): 6. Custom endpoint (for local vision models: Qwen-VL, LLaVA, Pixtral, etc.) 7. None -Per-task provider overrides (e.g. AUXILIARY_VISION_PROVIDER, -CONTEXT_COMPRESSION_PROVIDER) can force a specific provider for each task. +Per-task overrides are configured in config.yaml under the ``auxiliary:`` section +(e.g. ``auxiliary.vision.provider``, ``auxiliary.compression.model``). Default "auto" follows the chains above. -Per-task model overrides (e.g. AUXILIARY_VISION_MODEL, -AUXILIARY_WEB_EXTRACT_MODEL) let callers use a different model slug -than the provider's default. - -Per-task direct endpoint overrides (e.g. AUXILIARY_VISION_BASE_URL, -AUXILIARY_VISION_API_KEY) let callers route a specific auxiliary task to a -custom OpenAI-compatible endpoint without touching the main model settings. +Legacy env var overrides (AUXILIARY_{TASK}_PROVIDER, AUXILIARY_{TASK}_MODEL, +AUXILIARY_{TASK}_BASE_URL, etc.) are still read as a backward-compat fallback +but config.yaml takes priority. New configuration should always use config.yaml. Payment / credit exhaustion fallback: When a resolved provider returns HTTP 402 or a credit-related error, @@ -111,6 +107,14 @@ _API_KEY_PROVIDER_AUX_MODELS: Dict[str, str] = { "kilocode": "google/gemini-3-flash-preview", } +# Vision-specific model overrides for direct providers. +# When the user's main provider has a dedicated vision/multimodal model that +# differs from their main chat model, map it here. The vision auto-detect +# "exotic provider" branch checks this before falling back to the main model. +_PROVIDER_VISION_MODELS: Dict[str, str] = { + "xiaomi": "mimo-v2-omni", +} + # OpenRouter app attribution headers _OR_HEADERS = { "HTTP-Referer": "https://hermes-agent.nousresearch.com", @@ -1687,16 +1691,18 @@ def resolve_vision_provider_client( if sync_client is not None: return _finalize(main_provider, sync_client, default_model) else: - # Exotic provider (DeepSeek, Alibaba, named custom, etc.) + # Exotic provider (DeepSeek, Alibaba, Xiaomi, named custom, etc.) + # Use provider-specific vision model if available, otherwise main model. + vision_model = _PROVIDER_VISION_MODELS.get(main_provider, main_model) rpc_client, rpc_model = resolve_provider_client( - main_provider, main_model) + main_provider, vision_model) if rpc_client is not None: logger.info( "Vision auto-detect: using active provider %s (%s)", - main_provider, rpc_model or main_model, + main_provider, rpc_model or vision_model, ) return _finalize( - main_provider, rpc_client, rpc_model or main_model) + main_provider, rpc_client, rpc_model or vision_model) # Fall back through aggregators. for candidate in _VISION_AUTO_PROVIDER_ORDER: @@ -1958,8 +1964,8 @@ def _resolve_task_provider_model( Priority: 1. Explicit provider/model/base_url/api_key args (always win) - 2. Env var overrides (AUXILIARY_{TASK}_*, CONTEXT_{TASK}_*) - 3. Config file (auxiliary.{task}.* or compression.*) + 2. Config file (auxiliary.{task}.* or compression.*) + 3. Env var overrides (backward-compat: AUXILIARY_{TASK}_*, CONTEXT_{TASK}_*) 4. "auto" (full auto-detection chain) Returns (provider, model, base_url, api_key, api_mode) where model may @@ -2002,10 +2008,11 @@ def _resolve_task_provider_model( _sbu = comp.get("summary_base_url") or "" cfg_base_url = cfg_base_url or _sbu.strip() or None + # Env vars are backward-compat fallback only — config.yaml is primary. env_model = _get_auxiliary_env_override(task, "MODEL") if task else None env_api_mode = _get_auxiliary_env_override(task, "API_MODE") if task else None - resolved_model = model or env_model or cfg_model - resolved_api_mode = env_api_mode or cfg_api_mode + resolved_model = model or cfg_model or env_model + resolved_api_mode = cfg_api_mode or env_api_mode if base_url: return "custom", resolved_model, base_url, api_key, resolved_api_mode @@ -2013,19 +2020,23 @@ def _resolve_task_provider_model( return provider, resolved_model, base_url, api_key, resolved_api_mode if task: + # Config.yaml is the primary source for per-task overrides. + if cfg_base_url: + return "custom", resolved_model, cfg_base_url, cfg_api_key, resolved_api_mode + if cfg_provider and cfg_provider != "auto": + return cfg_provider, resolved_model, None, None, resolved_api_mode + + # Env vars are backward-compat fallback for users who haven't + # migrated to config.yaml yet. env_base_url = _get_auxiliary_env_override(task, "BASE_URL") env_api_key = _get_auxiliary_env_override(task, "API_KEY") if env_base_url: - return "custom", resolved_model, env_base_url, env_api_key or cfg_api_key, resolved_api_mode + return "custom", resolved_model, env_base_url, env_api_key, resolved_api_mode env_provider = _get_auxiliary_provider(task) if env_provider != "auto": return env_provider, resolved_model, None, None, resolved_api_mode - if cfg_base_url: - return "custom", resolved_model, cfg_base_url, cfg_api_key, resolved_api_mode - if cfg_provider and cfg_provider != "auto": - return cfg_provider, resolved_model, None, None, resolved_api_mode return "auto", resolved_model, None, None, resolved_api_mode return "auto", resolved_model, None, None, resolved_api_mode diff --git a/agent/display.py b/agent/display.py index 604b7a298c..1820645768 100644 --- a/agent/display.py +++ b/agent/display.py @@ -4,7 +4,6 @@ Pure display functions and classes with no AIAgent dependency. Used by AIAgent._execute_tool_calls for CLI feedback. """ -import json import logging import os import sys @@ -14,6 +13,8 @@ from dataclasses import dataclass, field from difflib import unified_diff from pathlib import Path +from utils import safe_json_loads + # ANSI escape codes for coloring tool failure indicators _RED = "\033[31m" _RESET = "\033[0m" @@ -372,9 +373,8 @@ def _result_succeeded(result: str | None) -> bool: """Conservatively detect whether a tool result represents success.""" if not result: return False - try: - data = json.loads(result) - except (json.JSONDecodeError, TypeError): + data = safe_json_loads(result) + if data is None: return False if not isinstance(data, dict): return False @@ -423,10 +423,7 @@ def extract_edit_diff( ) -> str | None: """Extract a unified diff from a file-edit tool result.""" if tool_name == "patch" and result: - try: - data = json.loads(result) - except (json.JSONDecodeError, TypeError): - data = None + data = safe_json_loads(result) if isinstance(data, dict): diff = data.get("diff") if isinstance(diff, str) and diff.strip(): @@ -780,23 +777,19 @@ def _detect_tool_failure(tool_name: str, result: str | None) -> tuple[bool, str] return False, "" if tool_name == "terminal": - try: - data = json.loads(result) + data = safe_json_loads(result) + if isinstance(data, dict): exit_code = data.get("exit_code") if exit_code is not None and exit_code != 0: return True, f" [exit {exit_code}]" - except (json.JSONDecodeError, TypeError, AttributeError): - logger.debug("Could not parse terminal result as JSON for exit code check") return False, "" # Memory-specific: distinguish "full" from real errors if tool_name == "memory": - try: - data = json.loads(result) + data = safe_json_loads(result) + if isinstance(data, dict): if data.get("success") is False and "exceed the limit" in data.get("error", ""): return True, " [full]" - except (json.JSONDecodeError, TypeError, AttributeError): - logger.debug("Could not parse memory result as JSON for capacity check") # Generic heuristic for non-terminal tools lower = result[:500].lower() diff --git a/agent/model_metadata.py b/agent/model_metadata.py index 2ce0cefa0d..f12801777d 100644 --- a/agent/model_metadata.py +++ b/agent/model_metadata.py @@ -27,12 +27,14 @@ _PROVIDER_PREFIXES: frozenset[str] = frozenset({ "gemini", "zai", "kimi-coding", "minimax", "minimax-cn", "anthropic", "deepseek", "opencode-zen", "opencode-go", "ai-gateway", "kilocode", "alibaba", "qwen-oauth", + "xiaomi", "custom", "local", # Common aliases "google", "google-gemini", "google-ai-studio", "glm", "z-ai", "z.ai", "zhipu", "github", "github-copilot", "github-models", "kimi", "moonshot", "claude", "deep-seek", "opencode", "zen", "go", "vercel", "kilo", "dashscope", "aliyun", "qwen", + "mimo", "xiaomi-mimo", "qwen-portal", }) @@ -149,9 +151,10 @@ DEFAULT_CONTEXT_LENGTHS = { "moonshotai/Kimi-K2.5": 262144, "moonshotai/Kimi-K2-Thinking": 262144, "MiniMaxAI/MiniMax-M2.5": 204800, - "XiaomiMiMo/MiMo-V2-Flash": 32768, - "mimo-v2-pro": 1048576, - "mimo-v2-omni": 1048576, + "XiaomiMiMo/MiMo-V2-Flash": 256000, + "mimo-v2-pro": 1000000, + "mimo-v2-omni": 256000, + "mimo-v2-flash": 256000, "zai-org/GLM-5": 202752, } @@ -176,6 +179,12 @@ _MAX_COMPLETION_KEYS = ( # Local server hostnames / address patterns _LOCAL_HOSTS = ("localhost", "127.0.0.1", "::1", "0.0.0.0") +# Docker / Podman / Lima DNS names that resolve to the host machine +_CONTAINER_LOCAL_SUFFIXES = ( + ".docker.internal", + ".containers.internal", + ".lima.internal", +) def _normalize_base_url(base_url: str) -> str: @@ -211,6 +220,8 @@ _URL_TO_PROVIDER: Dict[str, str] = { "api.fireworks.ai": "fireworks", "opencode.ai": "opencode-go", "api.x.ai": "xai", + "api.xiaomimimo.com": "xiaomi", + "xiaomimimo.com": "xiaomi", } @@ -249,6 +260,9 @@ def is_local_endpoint(base_url: str) -> bool: return False if host in _LOCAL_HOSTS: return True + # Docker / Podman / Lima internal DNS names (e.g. host.docker.internal) + if any(host.endswith(suffix) for suffix in _CONTAINER_LOCAL_SUFFIXES): + return True # RFC-1918 private ranges and link-local import ipaddress try: diff --git a/agent/models_dev.py b/agent/models_dev.py index 560e7cefec..f9eb49dbf2 100644 --- a/agent/models_dev.py +++ b/agent/models_dev.py @@ -161,6 +161,7 @@ PROVIDER_TO_MODELS_DEV: Dict[str, str] = { "gemini": "google", "google": "google", "xai": "xai", + "xiaomi": "xiaomi", "nvidia": "nvidia", "groq": "groq", "mistral": "mistral", diff --git a/agent/prompt_builder.py b/agent/prompt_builder.py index 08b8fe0a6a..26d913a029 100644 --- a/agent/prompt_builder.py +++ b/agent/prompt_builder.py @@ -12,7 +12,7 @@ import threading from collections import OrderedDict from pathlib import Path -from hermes_constants import get_hermes_home +from hermes_constants import get_hermes_home, get_skills_dir from typing import Optional from agent.skill_utils import ( @@ -548,8 +548,7 @@ def build_skills_system_prompt( are read-only — they appear in the index but new skills are always created in the local dir. Local skills take precedence when names collide. """ - hermes_home = get_hermes_home() - skills_dir = hermes_home / "skills" + skills_dir = get_skills_dir() external_dirs = get_all_skills_dirs()[1:] # skip local (index 0) if not skills_dir.exists() and not external_dirs: diff --git a/agent/skill_utils.py b/agent/skill_utils.py index ba606b358d..97ba92b735 100644 --- a/agent/skill_utils.py +++ b/agent/skill_utils.py @@ -12,7 +12,7 @@ import sys from pathlib import Path from typing import Any, Dict, List, Set, Tuple -from hermes_constants import get_hermes_home +from hermes_constants import get_config_path, get_skills_dir logger = logging.getLogger(__name__) @@ -130,7 +130,7 @@ def get_disabled_skill_names(platform: str | None = None) -> Set[str]: Reads the config file directly (no CLI config imports) to stay lightweight. """ - config_path = get_hermes_home() / "config.yaml" + config_path = get_config_path() if not config_path.exists(): return set() try: @@ -178,7 +178,7 @@ def get_external_skills_dirs() -> List[Path]: path. Only directories that actually exist are returned. Duplicates and paths that resolve to the local ``~/.hermes/skills/`` are silently skipped. """ - config_path = get_hermes_home() / "config.yaml" + config_path = get_config_path() if not config_path.exists(): return [] try: @@ -200,7 +200,7 @@ def get_external_skills_dirs() -> List[Path]: if not isinstance(raw_dirs, list): return [] - local_skills = (get_hermes_home() / "skills").resolve() + local_skills = get_skills_dir().resolve() seen: Set[Path] = set() result: List[Path] = [] @@ -230,7 +230,7 @@ def get_all_skills_dirs() -> List[Path]: The local dir is always first (and always included even if it doesn't exist yet — callers handle that). External dirs follow in config order. """ - dirs = [get_hermes_home() / "skills"] + dirs = [get_skills_dir()] dirs.extend(get_external_skills_dirs()) return dirs @@ -384,7 +384,7 @@ def resolve_skill_config_values( current values (or the declared default if the key isn't set). Path values are expanded via ``os.path.expanduser``. """ - config_path = get_hermes_home() / "config.yaml" + config_path = get_config_path() config: Dict[str, Any] = {} if config_path.exists(): try: diff --git a/cli-config.yaml.example b/cli-config.yaml.example index e9284d8137..12e2b39995 100644 --- a/cli-config.yaml.example +++ b/cli-config.yaml.example @@ -24,6 +24,7 @@ model: # "minimax" - MiniMax global (requires: MINIMAX_API_KEY) # "minimax-cn" - MiniMax China (requires: MINIMAX_CN_API_KEY) # "huggingface" - Hugging Face Inference (requires: HF_TOKEN) + # "xiaomi" - Xiaomi MiMo (requires: XIAOMI_API_KEY) # "kilocode" - KiloCode gateway (requires: KILOCODE_API_KEY) # "ai-gateway" - Vercel AI Gateway (requires: AI_GATEWAY_API_KEY) # diff --git a/cli.py b/cli.py index 18f6df6711..1a57dd3eb2 100644 --- a/cli.py +++ b/cli.py @@ -2748,6 +2748,15 @@ class HermesCLI: self.api_key = api_key self.base_url = base_url + # When a custom_provider entry carries an explicit `model` field, + # use it as the effective model name. Without this, running + # `hermes chat --model ` sends the provider name + # (e.g. "my-provider") as the model string to the API instead of + # the configured model (e.g. "qwen3.6-plus"), causing 400 errors. + runtime_model = runtime.get("model") + if runtime_model and isinstance(runtime_model, str): + self.model = runtime_model + # Normalize model for the resolved provider (e.g. swap non-Codex # models when provider is openai-codex). Fixes #651. model_changed = self._normalize_model_for_provider(resolved_provider) diff --git a/cron/scheduler.py b/cron/scheduler.py index 0e04fb047b..870ebe1418 100644 --- a/cron/scheduler.py +++ b/cron/scheduler.py @@ -722,6 +722,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]: provider_sort=pr.get("sort"), disabled_toolsets=["cronjob", "messaging", "clarify"], quiet_mode=True, + skip_context_files=True, # Don't inject SOUL.md/AGENTS.md from scheduler cwd skip_memory=True, # Cron system prompts would corrupt user representations platform="cron", session_id=_cron_session_id, diff --git a/docs/migration/openclaw.md b/docs/migration/openclaw.md index c3aef46023..8545636abd 100644 --- a/docs/migration/openclaw.md +++ b/docs/migration/openclaw.md @@ -11,12 +11,14 @@ When you run `hermes setup` for the first time and Hermes detects `~/.openclaw`, ### 2. CLI Command (quick, scriptable) ```bash -hermes claw migrate # Full migration with confirmation prompt -hermes claw migrate --dry-run # Preview what would happen +hermes claw migrate # Preview then migrate (always shows preview first) +hermes claw migrate --dry-run # Preview only, no changes hermes claw migrate --preset user-data # Migrate without API keys/secrets hermes claw migrate --yes # Skip confirmation prompt ``` +The migration always shows a full preview of what will be imported before making any changes. You review the preview and confirm before anything is written. + **All options:** | Flag | Description | @@ -39,7 +41,7 @@ Ask the agent to run the migration for you: ``` The agent will use the `openclaw-migration` skill to: -1. Run a dry-run first to preview changes +1. Run a preview first to show what would change 2. Ask about conflict resolution (SOUL.md, skills, etc.) 3. Let you choose between `user-data` and `full` presets 4. Execute the migration with your choices @@ -58,16 +60,31 @@ The agent will use the `openclaw-migration` skill to: | Messaging settings | `~/.openclaw/config.yaml` (TELEGRAM_ALLOWED_USERS, MESSAGING_CWD) | `~/.hermes/.env` | | TTS assets | `~/.openclaw/workspace/tts/` | `~/.hermes/tts/` | +Workspace files are also checked at `workspace.default/` and `workspace-main/` as fallback paths (OpenClaw renamed `workspace/` to `workspace-main/` in recent versions). + ### `full` preset (adds to `user-data`) | Item | Source | Destination | |------|--------|-------------| -| Telegram bot token | `~/.openclaw/config.yaml` | `~/.hermes/.env` | -| OpenRouter API key | `~/.openclaw/.env` or config | `~/.hermes/.env` | -| OpenAI API key | `~/.openclaw/.env` or config | `~/.hermes/.env` | -| Anthropic API key | `~/.openclaw/.env` or config | `~/.hermes/.env` | -| ElevenLabs API key | `~/.openclaw/.env` or config | `~/.hermes/.env` | +| Telegram bot token | `openclaw.json` channels config | `~/.hermes/.env` | +| OpenRouter API key | `.env`, `openclaw.json`, or `openclaw.json["env"]` | `~/.hermes/.env` | +| OpenAI API key | `.env`, `openclaw.json`, or `openclaw.json["env"]` | `~/.hermes/.env` | +| Anthropic API key | `.env`, `openclaw.json`, or `openclaw.json["env"]` | `~/.hermes/.env` | +| ElevenLabs API key | `.env`, `openclaw.json`, or `openclaw.json["env"]` | `~/.hermes/.env` | -Only these 6 allowlisted secrets are ever imported. Other credentials are skipped and reported. +API keys are searched across four sources: inline config values, `~/.openclaw/.env`, the `openclaw.json` `"env"` sub-object, and per-agent auth profiles. + +Only allowlisted secrets are ever imported. Other credentials are skipped and reported. + +## OpenClaw Schema Compatibility + +The migration handles both old and current OpenClaw config layouts: + +- **Channel tokens**: Reads from flat paths (`channels.telegram.botToken`) and the newer `accounts.default` layout (`channels.telegram.accounts.default.botToken`) +- **TTS provider**: OpenClaw renamed "edge" to "microsoft" — both are recognized and mapped to Hermes' "edge" +- **Provider API types**: Both short (`openai`, `anthropic`) and hyphenated (`openai-completions`, `anthropic-messages`, `google-generative-ai`) values are mapped correctly +- **thinkingDefault**: All enum values are handled including newer ones (`minimal`, `xhigh`, `adaptive`) +- **Matrix**: Uses `accessToken` field (not `botToken`) +- **SecretRef formats**: Plain strings, env templates (`${VAR}`), and `source: "env"` SecretRefs are resolved. `source: "file"` and `source: "exec"` SecretRefs produce a warning — add those keys manually after migration. ## Conflict Handling @@ -84,18 +101,24 @@ For skills, you can also use `--skill-conflict rename` to import conflicting ski ## Migration Report -Every migration (including dry runs) produces a report showing: +Every migration produces a report showing: - **Migrated items** — what was successfully imported - **Conflicts** — items skipped because they already exist - **Skipped items** — items not found in the source - **Errors** — items that failed to import -For execute runs, the full report is saved to `~/.hermes/migration/openclaw//`. +For executed migrations, the full report is saved to `~/.hermes/migration/openclaw//`. + +## Post-Migration Notes + +- **Skills require a new session** — imported skills take effect after restarting your agent or starting a new chat. +- **WhatsApp requires re-pairing** — WhatsApp uses QR-code pairing, not token-based auth. Run `hermes whatsapp` to pair. +- **Archive cleanup** — after migration, you'll be offered to rename `~/.openclaw/` to `.openclaw.pre-migration/` to prevent state confusion. You can also run `hermes claw cleanup` later. ## Troubleshooting ### "OpenClaw directory not found" -The migration looks for `~/.openclaw` by default. If your OpenClaw is installed elsewhere, use `--source`: +The migration looks for `~/.openclaw` by default, then tries `~/.clawdbot` and `~/.moldbot`. If your OpenClaw is installed elsewhere, use `--source`: ```bash hermes claw migrate --source /path/to/.openclaw ``` @@ -108,3 +131,12 @@ hermes skills install openclaw-migration ### Memory overflow If your OpenClaw MEMORY.md or USER.md exceeds Hermes' character limits, excess entries are exported to an overflow file in the migration report directory. You can manually review and add the most important ones. + +### API keys not found +Keys might be stored in different places depending on your OpenClaw setup: +- `~/.openclaw/.env` file +- Inline in `openclaw.json` under `models.providers.*.apiKey` +- In `openclaw.json` under the `"env"` or `"env.vars"` sub-objects +- In `~/.openclaw/agents/main/agent/auth-profiles.json` + +The migration checks all four. If keys use `source: "file"` or `source: "exec"` SecretRefs, they can't be resolved automatically — add them via `hermes config set`. diff --git a/gateway/config.py b/gateway/config.py index d2dc45eaec..34ef31d7b0 100644 --- a/gateway/config.py +++ b/gateway/config.py @@ -1017,6 +1017,9 @@ def _apply_env_overrides(config: GatewayConfig) -> None: weixin_group_allowed_users = os.getenv("WEIXIN_GROUP_ALLOWED_USERS", "").strip() if weixin_group_allowed_users: extra["group_allow_from"] = weixin_group_allowed_users + weixin_split_multiline = os.getenv("WEIXIN_SPLIT_MULTILINE_MESSAGES", "").strip() + if weixin_split_multiline: + extra["split_multiline_messages"] = weixin_split_multiline weixin_home = os.getenv("WEIXIN_HOME_CHANNEL", "").strip() if weixin_home: config.platforms[Platform.WEIXIN].home_channel = HomeChannel( diff --git a/gateway/platforms/api_server.py b/gateway/platforms/api_server.py index baada7e058..1954a2b9e5 100644 --- a/gateway/platforms/api_server.py +++ b/gateway/platforms/api_server.py @@ -53,6 +53,7 @@ DEFAULT_HOST = "127.0.0.1" DEFAULT_PORT = 8642 MAX_STORED_RESPONSES = 100 MAX_REQUEST_BYTES = 1_000_000 # 1 MB default limit for POST bodies +CHAT_COMPLETIONS_SSE_KEEPALIVE_SECONDS = 30.0 def check_api_server_requirements() -> bool: @@ -762,7 +763,11 @@ class APIServerAdapter(BasePlatformAdapter): """ import queue as _q - sse_headers = {"Content-Type": "text/event-stream", "Cache-Control": "no-cache"} + sse_headers = { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + "X-Accel-Buffering": "no", + } # CORS middleware can't inject headers into StreamResponse after # prepare() flushes them, so resolve CORS headers up front. origin = request.headers.get("Origin", "") @@ -775,6 +780,8 @@ class APIServerAdapter(BasePlatformAdapter): await response.prepare(request) try: + last_activity = time.monotonic() + # Role chunk role_chunk = { "id": completion_id, "object": "chat.completion.chunk", @@ -782,6 +789,7 @@ class APIServerAdapter(BasePlatformAdapter): "choices": [{"index": 0, "delta": {"role": "assistant"}, "finish_reason": None}], } await response.write(f"data: {json.dumps(role_chunk)}\n\n".encode()) + last_activity = time.monotonic() # Helper — route a queue item to the correct SSE event. async def _emit(item): @@ -805,6 +813,7 @@ class APIServerAdapter(BasePlatformAdapter): "choices": [{"index": 0, "delta": {"content": item}, "finish_reason": None}], } await response.write(f"data: {json.dumps(content_chunk)}\n\n".encode()) + return time.monotonic() # Stream content chunks as they arrive from the agent loop = asyncio.get_event_loop() @@ -819,16 +828,19 @@ class APIServerAdapter(BasePlatformAdapter): delta = stream_q.get_nowait() if delta is None: break - await _emit(delta) + last_activity = await _emit(delta) except _q.Empty: break break + if time.monotonic() - last_activity >= CHAT_COMPLETIONS_SSE_KEEPALIVE_SECONDS: + await response.write(b": keepalive\n\n") + last_activity = time.monotonic() continue if delta is None: # End of stream sentinel break - await _emit(delta) + last_activity = await _emit(delta) # Get usage from completed agent usage = {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0} diff --git a/gateway/platforms/base.py b/gateway/platforms/base.py index b4c84f3119..45cb3694a7 100644 --- a/gateway/platforms/base.py +++ b/gateway/platforms/base.py @@ -823,7 +823,36 @@ class BasePlatformAdapter(ABC): result = handler(self) if asyncio.iscoroutine(result): await result - + + def _acquire_platform_lock(self, scope: str, identity: str, resource_desc: str) -> bool: + """Acquire a scoped lock for this adapter. Returns True on success.""" + from gateway.status import acquire_scoped_lock + self._platform_lock_scope = scope + self._platform_lock_identity = identity + acquired, existing = acquire_scoped_lock( + scope, identity, metadata={'platform': self.platform.value} + ) + if acquired: + return True + owner_pid = existing.get('pid') if isinstance(existing, dict) else None + message = ( + f'{resource_desc} already in use' + + (f' (PID {owner_pid})' if owner_pid else '') + + '. Stop the other gateway first.' + ) + logger.error('[%s] %s', self.name, message) + self._set_fatal_error(f'{scope}_lock', message, retryable=False) + return False + + def _release_platform_lock(self) -> None: + """Release the scoped lock acquired by _acquire_platform_lock.""" + identity = getattr(self, '_platform_lock_identity', None) + if not identity: + return + from gateway.status import release_scoped_lock + release_scoped_lock(self._platform_lock_scope, identity) + self._platform_lock_identity = None + @property def name(self) -> str: """Human-readable name for this adapter.""" diff --git a/gateway/platforms/bluebubbles.py b/gateway/platforms/bluebubbles.py index f50cd9503c..1150009965 100644 --- a/gateway/platforms/bluebubbles.py +++ b/gateway/platforms/bluebubbles.py @@ -30,6 +30,7 @@ from gateway.platforms.base import ( cache_audio_from_bytes, cache_document_from_bytes, ) +from gateway.platforms.helpers import strip_markdown logger = logging.getLogger(__name__) @@ -89,18 +90,7 @@ def _normalize_server_url(raw: str) -> str: return value.rstrip("/") -def _strip_markdown(text: str) -> str: - """Strip common markdown formatting for iMessage plain-text delivery.""" - text = re.sub(r"\*\*(.+?)\*\*", r"\1", text, flags=re.DOTALL) - text = re.sub(r"\*(.+?)\*", r"\1", text, flags=re.DOTALL) - text = re.sub(r"__(.+?)__", r"\1", text, flags=re.DOTALL) - text = re.sub(r"_(.+?)_", r"\1", text, flags=re.DOTALL) - text = re.sub(r"```[a-zA-Z0-9_+-]*\n?", "", text) - text = re.sub(r"`(.+?)`", r"\1", text) - text = re.sub(r"^#{1,6}\s+", "", text, flags=re.MULTILINE) - text = re.sub(r"\[([^\]]+)\]\(([^\)]+)\)", r"\1", text) - text = re.sub(r"\n{3,}", "\n\n", text) - return text.strip() + # --------------------------------------------------------------------------- @@ -393,7 +383,7 @@ class BlueBubblesAdapter(BasePlatformAdapter): reply_to: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None, ) -> SendResult: - text = _strip_markdown(content or "") + text = strip_markdown(content or "") if not text: return SendResult(success=False, error="BlueBubbles send requires text") chunks = self.truncate_message(text, max_length=self.MAX_MESSAGE_LENGTH) @@ -679,7 +669,7 @@ class BlueBubblesAdapter(BasePlatformAdapter): return info def format_message(self, content: str) -> str: - return _strip_markdown(content) + return strip_markdown(content) # ------------------------------------------------------------------ # Inbound attachment downloading (from #4588) diff --git a/gateway/platforms/dingtalk.py b/gateway/platforms/dingtalk.py index e83b902dfb..5d50deca58 100644 --- a/gateway/platforms/dingtalk.py +++ b/gateway/platforms/dingtalk.py @@ -42,6 +42,7 @@ except ImportError: httpx = None # type: ignore[assignment] from gateway.config import Platform, PlatformConfig +from gateway.platforms.helpers import MessageDeduplicator from gateway.platforms.base import ( BasePlatformAdapter, MessageEvent, @@ -52,8 +53,6 @@ from gateway.platforms.base import ( logger = logging.getLogger(__name__) MAX_MESSAGE_LENGTH = 20000 -DEDUP_WINDOW_SECONDS = 300 -DEDUP_MAX_SIZE = 1000 RECONNECT_BACKOFF = [2, 5, 10, 30, 60] _SESSION_WEBHOOKS_MAX = 500 _DINGTALK_WEBHOOK_RE = re.compile(r'^https://api\.dingtalk\.com/') @@ -89,8 +88,8 @@ class DingTalkAdapter(BasePlatformAdapter): self._stream_task: Optional[asyncio.Task] = None self._http_client: Optional["httpx.AsyncClient"] = None - # Message deduplication: msg_id -> timestamp - self._seen_messages: Dict[str, float] = {} + # Message deduplication + self._dedup = MessageDeduplicator(max_size=1000) # Map chat_id -> session_webhook for reply routing self._session_webhooks: Dict[str, str] = {} @@ -170,7 +169,7 @@ class DingTalkAdapter(BasePlatformAdapter): self._stream_client = None self._session_webhooks.clear() - self._seen_messages.clear() + self._dedup.clear() logger.info("[%s] Disconnected", self.name) # -- Inbound message processing ----------------------------------------- @@ -178,7 +177,7 @@ class DingTalkAdapter(BasePlatformAdapter): async def _on_message(self, message: "ChatbotMessage") -> None: """Process an incoming DingTalk chatbot message.""" msg_id = getattr(message, "message_id", None) or uuid.uuid4().hex - if self._is_duplicate(msg_id): + if self._dedup.is_duplicate(msg_id): logger.debug("[%s] Duplicate message %s, skipping", self.name, msg_id) return @@ -256,20 +255,6 @@ class DingTalkAdapter(BasePlatformAdapter): content = " ".join(parts).strip() return content - # -- Deduplication ------------------------------------------------------ - - def _is_duplicate(self, msg_id: str) -> bool: - """Check and record a message ID. Returns True if already seen.""" - now = time.time() - if len(self._seen_messages) > DEDUP_MAX_SIZE: - cutoff = now - DEDUP_WINDOW_SECONDS - self._seen_messages = {k: v for k, v in self._seen_messages.items() if v > cutoff} - - if msg_id in self._seen_messages: - return True - self._seen_messages[msg_id] = now - return False - # -- Outbound messaging ------------------------------------------------- async def send( diff --git a/gateway/platforms/discord.py b/gateway/platforms/discord.py index dcf05a1625..b1d07e5d65 100644 --- a/gateway/platforms/discord.py +++ b/gateway/platforms/discord.py @@ -45,6 +45,7 @@ sys.path.insert(0, str(_Path(__file__).resolve().parents[2])) from gateway.config import Platform, PlatformConfig import re +from gateway.platforms.helpers import MessageDeduplicator, ThreadParticipationTracker from gateway.platforms.base import ( BasePlatformAdapter, MessageEvent, @@ -450,18 +451,14 @@ class DiscordAdapter(BasePlatformAdapter): # Track threads where the bot has participated so follow-up messages # in those threads don't require @mention. Persisted to disk so the # set survives gateway restarts. - self._bot_participated_threads: set = self._load_participated_threads() + self._threads = ThreadParticipationTracker("discord") # Persistent typing indicator loops per channel (DMs don't reliably # show the standard typing gateway event for bots) self._typing_tasks: Dict[str, asyncio.Task] = {} self._bot_task: Optional[asyncio.Task] = None - # Cap to prevent unbounded growth (Discord threads get archived). - self._MAX_TRACKED_THREADS = 500 - # Dedup cache: message_id → timestamp. Prevents duplicate bot - # responses when Discord RESUME replays events after reconnects. - self._seen_messages: Dict[str, float] = {} - self._SEEN_TTL = 300 # 5 minutes - self._SEEN_MAX = 2000 # prune threshold + # Dedup cache: prevents duplicate bot responses when Discord + # RESUME replays events after reconnects. + self._dedup = MessageDeduplicator() # Reply threading mode: "off" (no replies), "first" (reply on first # chunk only, default), "all" (reply-reference on every chunk). self._reply_to_mode: str = getattr(config, 'reply_to_mode', 'first') or 'first' @@ -502,18 +499,9 @@ class DiscordAdapter(BasePlatformAdapter): return False try: - # Acquire scoped lock to prevent duplicate bot token usage - from gateway.status import acquire_scoped_lock - self._token_lock_identity = self.config.token - acquired, existing = acquire_scoped_lock('discord-bot-token', self._token_lock_identity, metadata={'platform': 'discord'}) - if not acquired: - owner_pid = existing.get('pid') if isinstance(existing, dict) else None - message = f'Discord bot token already in use' + (f' (PID {owner_pid})' if owner_pid else '') + '. Stop the other gateway first.' - logger.error('[%s] %s', self.name, message) - self._set_fatal_error('discord_token_lock', message, retryable=False) + if not self._acquire_platform_lock('discord-bot-token', self.config.token, 'Discord bot token'): return False - # Parse allowed user entries (may contain usernames or IDs) allowed_env = os.getenv("DISCORD_ALLOWED_USERS", "") if allowed_env: @@ -569,17 +557,8 @@ class DiscordAdapter(BasePlatformAdapter): @self._client.event async def on_message(message: DiscordMessage): # Dedup: Discord RESUME replays events after reconnects (#4777) - msg_id = str(message.id) - now = time.time() - if msg_id in adapter_self._seen_messages: + if adapter_self._dedup.is_duplicate(str(message.id)): return - adapter_self._seen_messages[msg_id] = now - if len(adapter_self._seen_messages) > adapter_self._SEEN_MAX: - cutoff = now - adapter_self._SEEN_TTL - adapter_self._seen_messages = { - k: v for k, v in adapter_self._seen_messages.items() - if v > cutoff - } # Always ignore our own messages if message.author == self._client.user: @@ -685,23 +664,11 @@ class DiscordAdapter(BasePlatformAdapter): except asyncio.TimeoutError: logger.error("[%s] Timeout waiting for connection to Discord", self.name, exc_info=True) - try: - from gateway.status import release_scoped_lock - if getattr(self, '_token_lock_identity', None): - release_scoped_lock('discord-bot-token', self._token_lock_identity) - self._token_lock_identity = None - except Exception: - pass + self._release_platform_lock() return False except Exception as e: # pragma: no cover - defensive logging logger.error("[%s] Failed to connect to Discord: %s", self.name, e, exc_info=True) - try: - from gateway.status import release_scoped_lock - if getattr(self, '_token_lock_identity', None): - release_scoped_lock('discord-bot-token', self._token_lock_identity) - self._token_lock_identity = None - except Exception: - pass + self._release_platform_lock() return False async def disconnect(self) -> None: @@ -723,14 +690,7 @@ class DiscordAdapter(BasePlatformAdapter): self._client = None self._ready_event.clear() - # Release the token lock - try: - from gateway.status import release_scoped_lock - if getattr(self, '_token_lock_identity', None): - release_scoped_lock('discord-bot-token', self._token_lock_identity) - self._token_lock_identity = None - except Exception: - pass + self._release_platform_lock() logger.info("[%s] Disconnected", self.name) @@ -1870,7 +1830,7 @@ class DiscordAdapter(BasePlatformAdapter): # Track thread participation so follow-ups don't require @mention if thread_id: - self._track_thread(thread_id) + self._threads.mark(thread_id) # If a message was provided, kick off a new Hermes session in the thread starter = (message or "").strip() @@ -2241,49 +2201,6 @@ class DiscordAdapter(BasePlatformAdapter): return f"{parent_name} / {thread_name}" return thread_name - # ------------------------------------------------------------------ - # Thread participation persistence - # ------------------------------------------------------------------ - - @staticmethod - def _thread_state_path() -> Path: - """Path to the persisted thread participation set.""" - from hermes_cli.config import get_hermes_home - return get_hermes_home() / "discord_threads.json" - - @classmethod - def _load_participated_threads(cls) -> set: - """Load persisted thread IDs from disk.""" - path = cls._thread_state_path() - try: - if path.exists(): - data = json.loads(path.read_text(encoding="utf-8")) - if isinstance(data, list): - return set(data) - except Exception as e: - logger.debug("Could not load discord thread state: %s", e) - return set() - - def _save_participated_threads(self) -> None: - """Persist the current thread set to disk (best-effort).""" - path = self._thread_state_path() - try: - # Trim to most recent entries if over cap - thread_list = list(self._bot_participated_threads) - if len(thread_list) > self._MAX_TRACKED_THREADS: - thread_list = thread_list[-self._MAX_TRACKED_THREADS:] - self._bot_participated_threads = set(thread_list) - path.parent.mkdir(parents=True, exist_ok=True) - path.write_text(json.dumps(thread_list), encoding="utf-8") - except Exception as e: - logger.debug("Could not save discord thread state: %s", e) - - def _track_thread(self, thread_id: str) -> None: - """Add a thread to the participation set and persist.""" - if thread_id not in self._bot_participated_threads: - self._bot_participated_threads.add(thread_id) - self._save_participated_threads() - async def _handle_message(self, message: DiscordMessage) -> None: """Handle incoming Discord messages.""" # In server channels (not DMs), require the bot to be @mentioned @@ -2335,7 +2252,7 @@ class DiscordAdapter(BasePlatformAdapter): # Skip the mention check if the message is in a thread where # the bot has previously participated (auto-created or replied in). - in_bot_thread = is_thread and thread_id in self._bot_participated_threads + in_bot_thread = is_thread and thread_id in self._threads if require_mention and not is_free_channel and not in_bot_thread: if self._client.user not in message.mentions: @@ -2361,7 +2278,7 @@ class DiscordAdapter(BasePlatformAdapter): is_thread = True thread_id = str(thread.id) auto_threaded_channel = thread - self._track_thread(thread_id) + self._threads.mark(thread_id) # Determine message type msg_type = MessageType.TEXT @@ -2545,7 +2462,7 @@ class DiscordAdapter(BasePlatformAdapter): # Track thread participation so the bot won't require @mention for # follow-up messages in threads it has already engaged in. if thread_id: - self._track_thread(thread_id) + self._threads.mark(thread_id) # Only batch plain text messages — commands, media, etc. dispatch # immediately since they won't be split by the Discord client. diff --git a/gateway/platforms/feishu.py b/gateway/platforms/feishu.py index a88c7e52b9..16f5467b22 100644 --- a/gateway/platforms/feishu.py +++ b/gateway/platforms/feishu.py @@ -360,19 +360,21 @@ def _render_code_block_element(element: Dict[str, Any]) -> str: def _strip_markdown_to_plain_text(text: str) -> str: + """Strip markdown formatting to plain text for Feishu text fallbacks. + + Delegates common markdown stripping to the shared helper and adds + Feishu-specific patterns (blockquotes, strikethrough, underline tags, + horizontal rules, \\r\\n normalisation). + """ + from gateway.platforms.helpers import strip_markdown plain = text.replace("\r\n", "\n") plain = _MARKDOWN_LINK_RE.sub(lambda m: f"{m.group(1)} ({m.group(2).strip()})", plain) - plain = re.sub(r"^#{1,6}\s+", "", plain, flags=re.MULTILINE) plain = re.sub(r"^>\s?", "", plain, flags=re.MULTILINE) plain = re.sub(r"^\s*---+\s*$", "---", plain, flags=re.MULTILINE) - plain = re.sub(r"```(?:[^\n]*\n)?([\s\S]*?)```", lambda m: m.group(1).strip("\n"), plain) - plain = re.sub(r"`([^`\n]+)`", r"\1", plain) - plain = re.sub(r"\*\*([^*\n]+)\*\*", r"\1", plain) - plain = re.sub(r"\*([^*\n]+)\*", r"\1", plain) plain = re.sub(r"~~([^~\n]+)~~", r"\1", plain) plain = re.sub(r"([\s\S]*?)", r"\1", plain) - plain = re.sub(r"\n{3,}", "\n\n", plain) - return plain.strip() + plain = strip_markdown(plain) + return plain def _coerce_int(value: Any, default: Optional[int] = None, min_value: int = 0) -> Optional[int]: diff --git a/gateway/platforms/helpers.py b/gateway/platforms/helpers.py new file mode 100644 index 0000000000..c834dd89ca --- /dev/null +++ b/gateway/platforms/helpers.py @@ -0,0 +1,261 @@ +"""Shared helper classes for gateway platform adapters. + +Extracts common patterns that were duplicated across 5-7 adapters: +message deduplication, text batch aggregation, markdown stripping, +and thread participation tracking. +""" + +import asyncio +import json +import logging +import re +import time +from pathlib import Path +from typing import TYPE_CHECKING, Dict, Optional + +if TYPE_CHECKING: + from gateway.platforms.base import BasePlatformAdapter, MessageEvent + +logger = logging.getLogger(__name__) + + +# ─── Message Deduplication ──────────────────────────────────────────────────── + + +class MessageDeduplicator: + """TTL-based message deduplication cache. + + Replaces the identical ``_seen_messages`` / ``_is_duplicate()`` pattern + previously duplicated in discord, slack, dingtalk, wecom, weixin, + mattermost, and feishu adapters. + + Usage:: + + self._dedup = MessageDeduplicator() + + # In message handler: + if self._dedup.is_duplicate(msg_id): + return + """ + + def __init__(self, max_size: int = 2000, ttl_seconds: float = 300): + self._seen: Dict[str, float] = {} + self._max_size = max_size + self._ttl = ttl_seconds + + def is_duplicate(self, msg_id: str) -> bool: + """Return True if *msg_id* was already seen within the TTL window.""" + if not msg_id: + return False + now = time.time() + if msg_id in self._seen: + return True + self._seen[msg_id] = now + if len(self._seen) > self._max_size: + cutoff = now - self._ttl + self._seen = {k: v for k, v in self._seen.items() if v > cutoff} + return False + + def clear(self): + """Clear all tracked messages.""" + self._seen.clear() + + +# ─── Text Batch Aggregation ────────────────────────────────────────────────── + + +class TextBatchAggregator: + """Aggregates rapid-fire text events into single messages. + + Replaces the ``_enqueue_text_event`` / ``_flush_text_batch`` pattern + previously duplicated in telegram, discord, matrix, wecom, and feishu. + + Usage:: + + self._text_batcher = TextBatchAggregator( + handler=self._message_handler, + batch_delay=0.6, + split_threshold=1900, + ) + + # In message dispatch: + if msg_type == MessageType.TEXT and self._text_batcher.is_enabled(): + self._text_batcher.enqueue(event, session_key) + return + """ + + def __init__( + self, + handler, + *, + batch_delay: float = 0.6, + split_delay: float = 2.0, + split_threshold: int = 4000, + ): + self._handler = handler + self._batch_delay = batch_delay + self._split_delay = split_delay + self._split_threshold = split_threshold + self._pending: Dict[str, "MessageEvent"] = {} + self._pending_tasks: Dict[str, asyncio.Task] = {} + + def is_enabled(self) -> bool: + """Return True if batching is active (delay > 0).""" + return self._batch_delay > 0 + + def enqueue(self, event: "MessageEvent", key: str) -> None: + """Add *event* to the pending batch for *key*.""" + chunk_len = len(event.text or "") + existing = self._pending.get(key) + if not existing: + event._last_chunk_len = chunk_len # type: ignore[attr-defined] + self._pending[key] = event + else: + existing.text = f"{existing.text}\n{event.text}" + existing._last_chunk_len = chunk_len # type: ignore[attr-defined] + + # Cancel prior flush timer, start a new one + prior = self._pending_tasks.get(key) + if prior and not prior.done(): + prior.cancel() + self._pending_tasks[key] = asyncio.create_task(self._flush(key)) + + async def _flush(self, key: str) -> None: + """Wait then dispatch the batched event for *key*.""" + current_task = self._pending_tasks.get(key) + pending = self._pending.get(key) + last_len = getattr(pending, "_last_chunk_len", 0) if pending else 0 + + # Use longer delay when the last chunk looks like a split message + delay = self._split_delay if last_len >= self._split_threshold else self._batch_delay + await asyncio.sleep(delay) + + event = self._pending.pop(key, None) + if event: + try: + await self._handler(event) + except Exception: + logger.exception("[TextBatchAggregator] Error dispatching batched event for %s", key) + + if self._pending_tasks.get(key) is current_task: + self._pending_tasks.pop(key, None) + + def cancel_all(self) -> None: + """Cancel all pending flush tasks.""" + for task in self._pending_tasks.values(): + if not task.done(): + task.cancel() + self._pending_tasks.clear() + self._pending.clear() + + +# ─── Markdown Stripping ────────────────────────────────────────────────────── + +# Pre-compiled regexes for performance +_RE_BOLD = re.compile(r"\*\*(.+?)\*\*", re.DOTALL) +_RE_ITALIC_STAR = re.compile(r"\*(.+?)\*", re.DOTALL) +_RE_BOLD_UNDER = re.compile(r"__(.+?)__", re.DOTALL) +_RE_ITALIC_UNDER = re.compile(r"_(.+?)_", re.DOTALL) +_RE_CODE_BLOCK = re.compile(r"```[a-zA-Z0-9_+-]*\n?") +_RE_INLINE_CODE = re.compile(r"`(.+?)`") +_RE_HEADING = re.compile(r"^#{1,6}\s+", re.MULTILINE) +_RE_LINK = re.compile(r"\[([^\]]+)\]\([^\)]+\)") +_RE_MULTI_NEWLINE = re.compile(r"\n{3,}") + + +def strip_markdown(text: str) -> str: + """Strip markdown formatting for plain-text platforms (SMS, iMessage, etc.). + + Replaces the identical ``_strip_markdown()`` functions previously + duplicated in sms.py, bluebubbles.py, and feishu.py. + """ + text = _RE_BOLD.sub(r"\1", text) + text = _RE_ITALIC_STAR.sub(r"\1", text) + text = _RE_BOLD_UNDER.sub(r"\1", text) + text = _RE_ITALIC_UNDER.sub(r"\1", text) + text = _RE_CODE_BLOCK.sub("", text) + text = _RE_INLINE_CODE.sub(r"\1", text) + text = _RE_HEADING.sub("", text) + text = _RE_LINK.sub(r"\1", text) + text = _RE_MULTI_NEWLINE.sub("\n\n", text) + return text.strip() + + +# ─── Thread Participation Tracking ─────────────────────────────────────────── + + +class ThreadParticipationTracker: + """Persistent tracking of threads the bot has participated in. + + Replaces the identical ``_load/_save_participated_threads`` + + ``_mark_thread_participated`` pattern previously duplicated in + discord.py and matrix.py. + + Usage:: + + self._threads = ThreadParticipationTracker("discord") + + # Check membership: + if thread_id in self._threads: + ... + + # Mark participation: + self._threads.mark(thread_id) + """ + + _MAX_TRACKED = 500 + + def __init__(self, platform_name: str, max_tracked: int = 500): + self._platform = platform_name + self._max_tracked = max_tracked + self._threads: set = self._load() + + def _state_path(self) -> Path: + from hermes_constants import get_hermes_home + return get_hermes_home() / f"{self._platform}_threads.json" + + def _load(self) -> set: + path = self._state_path() + if path.exists(): + try: + return set(json.loads(path.read_text(encoding="utf-8"))) + except Exception: + pass + return set() + + def _save(self) -> None: + path = self._state_path() + path.parent.mkdir(parents=True, exist_ok=True) + thread_list = list(self._threads) + if len(thread_list) > self._max_tracked: + thread_list = thread_list[-self._max_tracked:] + self._threads = set(thread_list) + path.write_text(json.dumps(thread_list), encoding="utf-8") + + def mark(self, thread_id: str) -> None: + """Mark *thread_id* as participated and persist.""" + if thread_id not in self._threads: + self._threads.add(thread_id) + self._save() + + def __contains__(self, thread_id: str) -> bool: + return thread_id in self._threads + + def clear(self) -> None: + self._threads.clear() + + +# ─── Phone Number Redaction ────────────────────────────────────────────────── + + +def redact_phone(phone: str) -> str: + """Redact a phone number for logging, preserving country code and last 4. + + Replaces the identical ``_redact_phone()`` functions in signal.py, + sms.py, and bluebubbles.py. + """ + if not phone: + return "" + if len(phone) <= 8: + return phone[:2] + "****" + phone[-2:] if len(phone) > 4 else "****" + return phone[:4] + "****" + phone[-4:] diff --git a/gateway/platforms/matrix.py b/gateway/platforms/matrix.py index 7daf2e70e1..349f962d2e 100644 --- a/gateway/platforms/matrix.py +++ b/gateway/platforms/matrix.py @@ -92,6 +92,7 @@ from gateway.platforms.base import ( ProcessingOutcome, SendResult, ) +from gateway.platforms.helpers import ThreadParticipationTracker logger = logging.getLogger(__name__) @@ -216,8 +217,7 @@ class MatrixAdapter(BasePlatformAdapter): self._pending_megolm: list = [] # Thread participation tracking (for require_mention bypass) - self._bot_participated_threads: set = self._load_participated_threads() - self._MAX_TRACKED_THREADS = 500 + self._threads = ThreadParticipationTracker("matrix") # Mention/thread gating — parsed once from env vars. self._require_mention: bool = os.getenv("MATRIX_REQUIRE_MENTION", "true").lower() not in ("false", "0", "no") @@ -1019,7 +1019,7 @@ class MatrixAdapter(BasePlatformAdapter): # Require-mention gating. if not is_dm: is_free_room = room_id in self._free_rooms - in_bot_thread = bool(thread_id and thread_id in self._bot_participated_threads) + in_bot_thread = bool(thread_id and thread_id in self._threads) if self._require_mention and not is_free_room and not in_bot_thread: if not is_mentioned: return None @@ -1027,7 +1027,7 @@ class MatrixAdapter(BasePlatformAdapter): # DM mention-thread. if is_dm and not thread_id and self._dm_mention_threads and is_mentioned: thread_id = event_id - self._track_thread(thread_id) + self._threads.mark(thread_id) # Strip mention from body. if is_mentioned: @@ -1036,7 +1036,7 @@ class MatrixAdapter(BasePlatformAdapter): # Auto-thread. if not is_dm and not thread_id and self._auto_thread: thread_id = event_id - self._track_thread(thread_id) + self._threads.mark(thread_id) display_name = await self._get_display_name(room_id, sender) source = self.build_source( @@ -1048,7 +1048,7 @@ class MatrixAdapter(BasePlatformAdapter): ) if thread_id: - self._track_thread(thread_id) + self._threads.mark(thread_id) self._background_read_receipt(room_id, event_id) @@ -1697,48 +1697,6 @@ class MatrixAdapter(BasePlatformAdapter): for rid in self._joined_rooms } - # ------------------------------------------------------------------ - # Thread participation tracking - # ------------------------------------------------------------------ - - @staticmethod - def _thread_state_path() -> Path: - """Path to the persisted thread participation set.""" - from hermes_cli.config import get_hermes_home - return get_hermes_home() / "matrix_threads.json" - - @classmethod - def _load_participated_threads(cls) -> set: - """Load persisted thread IDs from disk.""" - path = cls._thread_state_path() - try: - if path.exists(): - data = json.loads(path.read_text(encoding="utf-8")) - if isinstance(data, list): - return set(data) - except Exception as e: - logger.debug("Could not load matrix thread state: %s", e) - return set() - - def _save_participated_threads(self) -> None: - """Persist the current thread set to disk (best-effort).""" - path = self._thread_state_path() - try: - thread_list = list(self._bot_participated_threads) - if len(thread_list) > self._MAX_TRACKED_THREADS: - thread_list = thread_list[-self._MAX_TRACKED_THREADS:] - self._bot_participated_threads = set(thread_list) - path.parent.mkdir(parents=True, exist_ok=True) - path.write_text(json.dumps(thread_list), encoding="utf-8") - except Exception as e: - logger.debug("Could not save matrix thread state: %s", e) - - def _track_thread(self, thread_id: str) -> None: - """Add a thread to the participation set and persist.""" - if thread_id not in self._bot_participated_threads: - self._bot_participated_threads.add(thread_id) - self._save_participated_threads() - # ------------------------------------------------------------------ # Mention detection helpers # ------------------------------------------------------------------ diff --git a/gateway/platforms/mattermost.py b/gateway/platforms/mattermost.py index 56f29e8760..23a86f02b1 100644 --- a/gateway/platforms/mattermost.py +++ b/gateway/platforms/mattermost.py @@ -18,11 +18,11 @@ import json import logging import os import re -import time from pathlib import Path from typing import Any, Dict, List, Optional from gateway.config import Platform, PlatformConfig +from gateway.platforms.helpers import MessageDeduplicator from gateway.platforms.base import ( BasePlatformAdapter, MessageEvent, @@ -96,10 +96,8 @@ class MattermostAdapter(BasePlatformAdapter): or os.getenv("MATTERMOST_REPLY_MODE", "off") ).lower() - # Dedup cache: post_id → timestamp (prevent reprocessing) - self._seen_posts: Dict[str, float] = {} - self._SEEN_MAX = 2000 - self._SEEN_TTL = 300 # 5 minutes + # Dedup cache (prevent reprocessing) + self._dedup = MessageDeduplicator() # ------------------------------------------------------------------ # HTTP helpers @@ -604,10 +602,8 @@ class MattermostAdapter(BasePlatformAdapter): post_id = post.get("id", "") # Dedup. - self._prune_seen() - if post_id in self._seen_posts: + if self._dedup.is_duplicate(post_id): return - self._seen_posts[post_id] = time.time() # Build message event. channel_id = post.get("channel_id", "") @@ -734,13 +730,4 @@ class MattermostAdapter(BasePlatformAdapter): await self.handle_message(msg_event) - def _prune_seen(self) -> None: - """Remove expired entries from the dedup cache.""" - if len(self._seen_posts) < self._SEEN_MAX: - return - now = time.time() - self._seen_posts = { - pid: ts - for pid, ts in self._seen_posts.items() - if now - ts < self._SEEN_TTL - } + diff --git a/gateway/platforms/signal.py b/gateway/platforms/signal.py index 08b62f2a6d..8ef7bd0d60 100644 --- a/gateway/platforms/signal.py +++ b/gateway/platforms/signal.py @@ -37,6 +37,7 @@ from gateway.platforms.base import ( cache_document_from_bytes, cache_image_from_url, ) +from gateway.platforms.helpers import redact_phone logger = logging.getLogger(__name__) @@ -51,22 +52,10 @@ SSE_RETRY_DELAY_MAX = 60.0 HEALTH_CHECK_INTERVAL = 30.0 # seconds between health checks HEALTH_CHECK_STALE_THRESHOLD = 120.0 # seconds without SSE activity before concern -# E.164 phone number pattern for redaction -_PHONE_RE = re.compile(r"\+[1-9]\d{6,14}") - - # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- -def _redact_phone(phone: str) -> str: - """Redact a phone number for logging: +15551234567 -> +155****4567.""" - if not phone: - return "" - if len(phone) <= 8: - return phone[:2] + "****" + phone[-2:] if len(phone) > 4 else "****" - return phone[:4] + "****" + phone[-4:] - def _parse_comma_list(value: str) -> List[str]: """Split a comma-separated string into a list, stripping whitespace.""" @@ -184,10 +173,8 @@ class SignalAdapter(BasePlatformAdapter): self._recent_sent_timestamps: set = set() self._max_recent_timestamps = 50 - self._phone_lock_identity: Optional[str] = None - logger.info("Signal adapter initialized: url=%s account=%s groups=%s", - self.http_url, _redact_phone(self.account), + self.http_url, redact_phone(self.account), "enabled" if self.group_allow_from else "disabled") # ------------------------------------------------------------------ @@ -202,23 +189,7 @@ class SignalAdapter(BasePlatformAdapter): # Acquire scoped lock to prevent duplicate Signal listeners for the same phone try: - from gateway.status import acquire_scoped_lock - - self._phone_lock_identity = self.account - acquired, existing = acquire_scoped_lock( - "signal-phone", - self._phone_lock_identity, - metadata={"platform": self.platform.value}, - ) - if not acquired: - owner_pid = existing.get("pid") if isinstance(existing, dict) else None - message = ( - "Another local Hermes gateway is already using this Signal account" - + (f" (PID {owner_pid})." if owner_pid else ".") - + " Stop the other gateway before starting a second Signal listener." - ) - logger.error("Signal: %s", message) - self._set_fatal_error("signal_phone_lock", message, retryable=False) + if not self._acquire_platform_lock('signal-phone', self.account, 'Signal account'): return False except Exception as e: logger.warning("Signal: Could not acquire phone lock (non-fatal): %s", e) @@ -270,13 +241,7 @@ class SignalAdapter(BasePlatformAdapter): await self.client.aclose() self.client = None - if self._phone_lock_identity: - try: - from gateway.status import release_scoped_lock - release_scoped_lock("signal-phone", self._phone_lock_identity) - except Exception as e: - logger.warning("Signal: Error releasing phone lock: %s", e, exc_info=True) - self._phone_lock_identity = None + self._release_platform_lock() logger.info("Signal: disconnected") @@ -542,7 +507,7 @@ class SignalAdapter(BasePlatformAdapter): ) logger.debug("Signal: message from %s in %s: %s", - _redact_phone(sender), chat_id[:20], (text or "")[:50]) + redact_phone(sender), chat_id[:20], (text or "")[:50]) await self.handle_message(event) diff --git a/gateway/platforms/slack.py b/gateway/platforms/slack.py index 361f74882e..8f9934cf7a 100644 --- a/gateway/platforms/slack.py +++ b/gateway/platforms/slack.py @@ -33,6 +33,7 @@ from pathlib import Path as _Path sys.path.insert(0, str(_Path(__file__).resolve().parents[2])) from gateway.config import Platform, PlatformConfig +from gateway.platforms.helpers import MessageDeduplicator from gateway.platforms.base import ( BasePlatformAdapter, MessageEvent, @@ -89,11 +90,9 @@ class SlackAdapter(BasePlatformAdapter): self._team_clients: Dict[str, AsyncWebClient] = {} # team_id → WebClient self._team_bot_user_ids: Dict[str, str] = {} # team_id → bot_user_id self._channel_team: Dict[str, str] = {} # channel_id → team_id - # Dedup cache: event_ts → timestamp. Prevents duplicate bot - # responses when Socket Mode reconnects redeliver events. - self._seen_messages: Dict[str, float] = {} - self._SEEN_TTL = 300 # 5 minutes - self._SEEN_MAX = 2000 # prune threshold + # Dedup cache: prevents duplicate bot responses when Socket Mode + # reconnects redeliver events. + self._dedup = MessageDeduplicator() # Track pending approval message_ts → resolved flag to prevent # double-clicks on approval buttons. self._approval_resolved: Dict[str, bool] = {} @@ -152,15 +151,7 @@ class SlackAdapter(BasePlatformAdapter): logger.warning("[Slack] Failed to read %s: %s", tokens_file, e) try: - # Acquire scoped lock to prevent duplicate app token usage - from gateway.status import acquire_scoped_lock - self._token_lock_identity = app_token - acquired, existing = acquire_scoped_lock('slack-app-token', app_token, metadata={'platform': 'slack'}) - if not acquired: - owner_pid = existing.get('pid') if isinstance(existing, dict) else None - message = f'Slack app token already in use' + (f' (PID {owner_pid})' if owner_pid else '') + '. Stop the other gateway first.' - logger.error('[%s] %s', self.name, message) - self._set_fatal_error('slack_token_lock', message, retryable=False) + if not self._acquire_platform_lock('slack-app-token', app_token, 'Slack app token'): return False # First token is the primary — used for AsyncApp / Socket Mode @@ -247,14 +238,7 @@ class SlackAdapter(BasePlatformAdapter): logger.warning("[Slack] Error while closing Socket Mode handler: %s", e, exc_info=True) self._running = False - # Release the token lock (use stored identity, not re-read env) - try: - from gateway.status import release_scoped_lock - if getattr(self, '_token_lock_identity', None): - release_scoped_lock('slack-app-token', self._token_lock_identity) - self._token_lock_identity = None - except Exception: - pass + self._release_platform_lock() logger.info("[Slack] Disconnected") @@ -953,17 +937,8 @@ class SlackAdapter(BasePlatformAdapter): """Handle an incoming Slack message event.""" # Dedup: Slack Socket Mode can redeliver events after reconnects (#4777) event_ts = event.get("ts", "") - if event_ts: - now = time.time() - if event_ts in self._seen_messages: - return - self._seen_messages[event_ts] = now - if len(self._seen_messages) > self._SEEN_MAX: - cutoff = now - self._SEEN_TTL - self._seen_messages = { - k: v for k, v in self._seen_messages.items() - if v > cutoff - } + if event_ts and self._dedup.is_duplicate(event_ts): + return # Bot message filtering (SLACK_ALLOW_BOTS / config allow_bots): # "none" — ignore all bot messages (default, backward-compatible) diff --git a/gateway/platforms/sms.py b/gateway/platforms/sms.py index a0760199ba..161949dab3 100644 --- a/gateway/platforms/sms.py +++ b/gateway/platforms/sms.py @@ -10,6 +10,9 @@ Shares credentials with the optional telephony skill — same env vars: Gateway-specific env vars: - SMS_WEBHOOK_PORT (default 8080) + - SMS_WEBHOOK_HOST (default 0.0.0.0) + - SMS_WEBHOOK_URL (public URL for Twilio signature validation — required) + - SMS_INSECURE_NO_SIGNATURE (true to disable signature validation — dev only) - SMS_ALLOWED_USERS (comma-separated E.164 phone numbers) - SMS_ALLOW_ALL_USERS (true/false) - SMS_HOME_CHANNEL (phone number for cron delivery) @@ -17,9 +20,10 @@ Gateway-specific env vars: import asyncio import base64 +import hashlib +import hmac import logging import os -import re import urllib.parse from typing import Any, Dict, Optional @@ -30,24 +34,14 @@ from gateway.platforms.base import ( MessageType, SendResult, ) +from gateway.platforms.helpers import redact_phone, strip_markdown logger = logging.getLogger(__name__) TWILIO_API_BASE = "https://api.twilio.com/2010-04-01/Accounts" MAX_SMS_LENGTH = 1600 # ~10 SMS segments DEFAULT_WEBHOOK_PORT = 8080 - -# E.164 phone number pattern for redaction -_PHONE_RE = re.compile(r"\+[1-9]\d{6,14}") - - -def _redact_phone(phone: str) -> str: - """Redact a phone number for logging: +15551234567 -> +1555***4567.""" - if not phone: - return "" - if len(phone) <= 8: - return phone[:2] + "***" + phone[-2:] if len(phone) > 4 else "****" - return phone[:5] + "***" + phone[-4:] +DEFAULT_WEBHOOK_HOST = "0.0.0.0" def check_sms_requirements() -> bool: @@ -77,6 +71,8 @@ class SmsAdapter(BasePlatformAdapter): self._webhook_port: int = int( os.getenv("SMS_WEBHOOK_PORT", str(DEFAULT_WEBHOOK_PORT)) ) + self._webhook_host: str = os.getenv("SMS_WEBHOOK_HOST", DEFAULT_WEBHOOK_HOST) + self._webhook_url: str = os.getenv("SMS_WEBHOOK_URL", "").strip() self._runner = None self._http_session: Optional["aiohttp.ClientSession"] = None @@ -98,13 +94,33 @@ class SmsAdapter(BasePlatformAdapter): logger.error("[sms] TWILIO_PHONE_NUMBER not set — cannot send replies") return False + insecure_no_sig = os.getenv("SMS_INSECURE_NO_SIGNATURE", "").lower() == "true" + + if not self._webhook_url and not insecure_no_sig: + logger.error( + "[sms] Refusing to start: SMS_WEBHOOK_URL is required for Twilio " + "signature validation. Set it to the public URL configured in your " + "Twilio console (e.g. https://example.com/webhooks/twilio). " + "For local development without validation, set " + "SMS_INSECURE_NO_SIGNATURE=true (NOT recommended for production).", + ) + return False + + if insecure_no_sig and not self._webhook_url: + logger.warning( + "[sms] SMS_INSECURE_NO_SIGNATURE=true — Twilio signature validation " + "is DISABLED. Any client that can reach port %d can inject messages. " + "Do NOT use this in production.", + self._webhook_port, + ) + app = web.Application() app.router.add_post("/webhooks/twilio", self._handle_webhook) app.router.add_get("/health", lambda _: web.Response(text="ok")) self._runner = web.AppRunner(app) await self._runner.setup() - site = web.TCPSite(self._runner, "0.0.0.0", self._webhook_port) + site = web.TCPSite(self._runner, self._webhook_host, self._webhook_port) await site.start() self._http_session = aiohttp.ClientSession( timeout=aiohttp.ClientTimeout(total=30), @@ -112,9 +128,10 @@ class SmsAdapter(BasePlatformAdapter): self._running = True logger.info( - "[sms] Twilio webhook server listening on port %d, from: %s", + "[sms] Twilio webhook server listening on %s:%d, from: %s", + self._webhook_host, self._webhook_port, - _redact_phone(self._from_number), + redact_phone(self._from_number), ) return True @@ -163,7 +180,7 @@ class SmsAdapter(BasePlatformAdapter): error_msg = body.get("message", str(body)) logger.error( "[sms] send failed to %s: %s %s", - _redact_phone(chat_id), + redact_phone(chat_id), resp.status, error_msg, ) @@ -174,7 +191,7 @@ class SmsAdapter(BasePlatformAdapter): msg_sid = body.get("sid", "") last_result = SendResult(success=True, message_id=msg_sid) except Exception as e: - logger.error("[sms] send error to %s: %s", _redact_phone(chat_id), e) + logger.error("[sms] send error to %s: %s", redact_phone(chat_id), e) return SendResult(success=False, error=str(e)) finally: # Close session only if we created a fallback (no persistent session) @@ -192,16 +209,75 @@ class SmsAdapter(BasePlatformAdapter): def format_message(self, content: str) -> str: """Strip markdown — SMS renders it as literal characters.""" - content = re.sub(r"\*\*(.+?)\*\*", r"\1", content, flags=re.DOTALL) - content = re.sub(r"\*(.+?)\*", r"\1", content, flags=re.DOTALL) - content = re.sub(r"__(.+?)__", r"\1", content, flags=re.DOTALL) - content = re.sub(r"_(.+?)_", r"\1", content, flags=re.DOTALL) - content = re.sub(r"```[a-z]*\n?", "", content) - content = re.sub(r"`(.+?)`", r"\1", content) - content = re.sub(r"^#{1,6}\s+", "", content, flags=re.MULTILINE) - content = re.sub(r"\[([^\]]+)\]\([^\)]+\)", r"\1", content) - content = re.sub(r"\n{3,}", "\n\n", content) - return content.strip() + return strip_markdown(content) + + # ------------------------------------------------------------------ + # Twilio signature validation + # ------------------------------------------------------------------ + + def _validate_twilio_signature( + self, url: str, post_params: dict, signature: str, + ) -> bool: + """Validate ``X-Twilio-Signature`` header (HMAC-SHA1, base64). + + Tries both with and without the default port for the URL scheme, + since Twilio may sign with either variant. + + Algorithm: https://www.twilio.com/docs/usage/security#validating-requests + """ + if self._check_signature(url, post_params, signature): + return True + + variant = self._port_variant_url(url) + if variant and self._check_signature(variant, post_params, signature): + return True + + return False + + def _check_signature( + self, url: str, post_params: dict, signature: str, + ) -> bool: + """Compute and compare a single Twilio signature.""" + data_to_sign = url + for key in sorted(post_params.keys()): + data_to_sign += key + post_params[key] + mac = hmac.new( + self._auth_token.encode("utf-8"), + data_to_sign.encode("utf-8"), + hashlib.sha1, + ) + computed = base64.b64encode(mac.digest()).decode("utf-8") + return hmac.compare_digest(computed, signature) + + @staticmethod + def _port_variant_url(url: str) -> str | None: + """Return the URL with the default port toggled, or None. + + Only toggles default ports (443 for https, 80 for http). + Non-standard ports are never modified. + """ + parsed = urllib.parse.urlparse(url) + default_ports = {"https": 443, "http": 80} + default_port = default_ports.get(parsed.scheme) + if default_port is None: + return None + + if parsed.port == default_port: + # Has explicit default port → strip it + return urllib.parse.urlunparse( + (parsed.scheme, parsed.hostname, parsed.path, + parsed.params, parsed.query, parsed.fragment) + ) + elif parsed.port is None: + # No port → add default + netloc = f"{parsed.hostname}:{default_port}" + return urllib.parse.urlunparse( + (parsed.scheme, netloc, parsed.path, + parsed.params, parsed.query, parsed.fragment) + ) + + # Non-standard port — no variant + return None # ------------------------------------------------------------------ # Twilio webhook handler @@ -213,7 +289,7 @@ class SmsAdapter(BasePlatformAdapter): try: raw = await request.read() # Twilio sends form-encoded data, not JSON - form = urllib.parse.parse_qs(raw.decode("utf-8")) + form = urllib.parse.parse_qs(raw.decode("utf-8"), keep_blank_values=True) except Exception as e: logger.error("[sms] webhook parse error: %s", e) return web.Response( @@ -222,6 +298,27 @@ class SmsAdapter(BasePlatformAdapter): status=400, ) + # Validate Twilio request signature when SMS_WEBHOOK_URL is configured + if self._webhook_url: + twilio_sig = request.headers.get("X-Twilio-Signature", "") + if not twilio_sig: + logger.warning("[sms] Rejected: missing X-Twilio-Signature header") + return web.Response( + text='', + content_type="application/xml", + status=403, + ) + flat_params = {k: v[0] for k, v in form.items() if v} + if not self._validate_twilio_signature( + self._webhook_url, flat_params, twilio_sig + ): + logger.warning("[sms] Rejected: invalid Twilio signature") + return web.Response( + text='', + content_type="application/xml", + status=403, + ) + # Extract fields (parse_qs returns lists) from_number = (form.get("From", [""]))[0].strip() to_number = (form.get("To", [""]))[0].strip() @@ -236,7 +333,7 @@ class SmsAdapter(BasePlatformAdapter): # Ignore messages from our own number (echo prevention) if from_number == self._from_number: - logger.debug("[sms] ignoring echo from own number %s", _redact_phone(from_number)) + logger.debug("[sms] ignoring echo from own number %s", redact_phone(from_number)) return web.Response( text='', content_type="application/xml", @@ -244,8 +341,8 @@ class SmsAdapter(BasePlatformAdapter): logger.info( "[sms] inbound from %s -> %s: %s", - _redact_phone(from_number), - _redact_phone(to_number), + redact_phone(from_number), + redact_phone(to_number), text[:80], ) diff --git a/gateway/platforms/telegram.py b/gateway/platforms/telegram.py index 8b4e43514b..2653296026 100644 --- a/gateway/platforms/telegram.py +++ b/gateway/platforms/telegram.py @@ -147,7 +147,6 @@ class TelegramAdapter(BasePlatformAdapter): self._text_batch_split_delay_seconds = float(os.getenv("HERMES_TELEGRAM_TEXT_BATCH_SPLIT_DELAY_SECONDS", "2.0")) self._pending_text_batches: Dict[str, MessageEvent] = {} self._pending_text_batch_tasks: Dict[str, asyncio.Task] = {} - self._token_lock_identity: Optional[str] = None self._polling_error_task: Optional[asyncio.Task] = None self._polling_conflict_count: int = 0 self._polling_network_error_count: int = 0 @@ -300,9 +299,11 @@ class TelegramAdapter(BasePlatformAdapter): # Exhausted retries — fatal message = ( - "Another Telegram bot poller is already using this token. " + "Another process is already polling this Telegram bot token " + "(possibly OpenClaw or another Hermes instance). " "Hermes stopped Telegram polling after %d retries. " - "Make sure only one gateway instance is running for this bot token." + "Only one poller can run per token — stop the other process " + "and restart with 'hermes start'." % MAX_CONFLICT_RETRIES ) logger.error("[%s] %s Original error: %s", self.name, message, error) @@ -497,23 +498,7 @@ class TelegramAdapter(BasePlatformAdapter): return False try: - from gateway.status import acquire_scoped_lock - - self._token_lock_identity = self.config.token - acquired, existing = acquire_scoped_lock( - "telegram-bot-token", - self._token_lock_identity, - metadata={"platform": self.platform.value}, - ) - if not acquired: - owner_pid = existing.get("pid") if isinstance(existing, dict) else None - message = ( - "Another local Hermes gateway is already using this Telegram bot token" - + (f" (PID {owner_pid})." if owner_pid else ".") - + " Stop the other gateway before starting a second Telegram poller." - ) - logger.error("[%s] %s", self.name, message) - self._set_fatal_error("telegram_token_lock", message, retryable=False) + if not self._acquire_platform_lock('telegram-bot-token', self.config.token, 'Telegram bot token'): return False # Build the application @@ -737,12 +722,7 @@ class TelegramAdapter(BasePlatformAdapter): return True except Exception as e: - if self._token_lock_identity: - try: - from gateway.status import release_scoped_lock - release_scoped_lock("telegram-bot-token", self._token_lock_identity) - except Exception: - pass + self._release_platform_lock() message = f"Telegram startup failed: {e}" self._set_fatal_error("telegram_connect_error", message, retryable=True) logger.error("[%s] Failed to connect to Telegram: %s", self.name, e, exc_info=True) @@ -768,12 +748,7 @@ class TelegramAdapter(BasePlatformAdapter): await self._app.shutdown() except Exception as e: logger.warning("[%s] Error during Telegram disconnect: %s", self.name, e, exc_info=True) - if self._token_lock_identity: - try: - from gateway.status import release_scoped_lock - release_scoped_lock("telegram-bot-token", self._token_lock_identity) - except Exception as e: - logger.warning("[%s] Error releasing Telegram token lock: %s", self.name, e, exc_info=True) + self._release_platform_lock() for task in self._pending_photo_batch_tasks.values(): if task and not task.done(): @@ -784,7 +759,6 @@ class TelegramAdapter(BasePlatformAdapter): self._mark_disconnected() self._app = None self._bot = None - self._token_lock_identity = None logger.info("[%s] Disconnected from Telegram", self.name) def _should_thread_reply(self, reply_to: Optional[str], chunk_index: int) -> bool: diff --git a/gateway/platforms/wecom.py b/gateway/platforms/wecom.py index 6fde73927b..a0e71e01b6 100644 --- a/gateway/platforms/wecom.py +++ b/gateway/platforms/wecom.py @@ -59,6 +59,7 @@ except ImportError: httpx = None # type: ignore[assignment] from gateway.config import Platform, PlatformConfig +from gateway.platforms.helpers import MessageDeduplicator from gateway.platforms.base import ( BasePlatformAdapter, MessageEvent, @@ -92,7 +93,6 @@ REQUEST_TIMEOUT_SECONDS = 15.0 HEARTBEAT_INTERVAL_SECONDS = 30.0 RECONNECT_BACKOFF = [2, 5, 10, 30, 60] -DEDUP_WINDOW_SECONDS = 300 DEDUP_MAX_SIZE = 1000 IMAGE_MAX_BYTES = 10 * 1024 * 1024 @@ -172,7 +172,7 @@ class WeComAdapter(BasePlatformAdapter): self._listen_task: Optional[asyncio.Task] = None self._heartbeat_task: Optional[asyncio.Task] = None self._pending_responses: Dict[str, asyncio.Future] = {} - self._seen_messages: Dict[str, float] = {} + self._dedup = MessageDeduplicator(max_size=DEDUP_MAX_SIZE) self._reply_req_ids: Dict[str, str] = {} # Text batching: merge rapid successive messages (Telegram-style). @@ -250,7 +250,7 @@ class WeComAdapter(BasePlatformAdapter): await self._http_client.aclose() self._http_client = None - self._seen_messages.clear() + self._dedup.clear() logger.info("[%s] Disconnected", self.name) async def _cleanup_ws(self) -> None: @@ -476,7 +476,7 @@ class WeComAdapter(BasePlatformAdapter): return msg_id = str(body.get("msgid") or self._payload_req_id(payload) or uuid.uuid4().hex) - if self._is_duplicate(msg_id): + if self._dedup.is_duplicate(msg_id): logger.debug("[%s] Duplicate message %s ignored", self.name, msg_id) return self._remember_reply_req_id(msg_id, self._payload_req_id(payload)) @@ -636,6 +636,13 @@ class WeComAdapter(BasePlatformAdapter): if voice_text: text_parts.append(voice_text) + # Extract appmsg title (filename) for WeCom AI Bot attachments + if msgtype == "appmsg": + appmsg = body.get("appmsg") if isinstance(body.get("appmsg"), dict) else {} + title = str(appmsg.get("title") or "").strip() + if title: + text_parts.append(title) + quote = body.get("quote") if isinstance(body.get("quote"), dict) else {} quote_type = str(quote.get("msgtype") or "").lower() if quote_type == "text": @@ -668,6 +675,13 @@ class WeComAdapter(BasePlatformAdapter): refs.append(("image", body["image"])) if msgtype == "file" and isinstance(body.get("file"), dict): refs.append(("file", body["file"])) + # Handle appmsg (WeCom AI Bot attachments with PDF/Word/Excel) + if msgtype == "appmsg" and isinstance(body.get("appmsg"), dict): + appmsg = body["appmsg"] + if isinstance(appmsg.get("file"), dict): + refs.append(("file", appmsg["file"])) + elif isinstance(appmsg.get("image"), dict): + refs.append(("image", appmsg["image"])) quote = body.get("quote") if isinstance(body.get("quote"), dict) else {} quote_type = str(quote.get("msgtype") or "").lower() @@ -825,24 +839,6 @@ class WeComAdapter(BasePlatformAdapter): wildcard = self._groups.get("*") return wildcard if isinstance(wildcard, dict) else {} - def _is_duplicate(self, msg_id: str) -> bool: - now = time.time() - if len(self._seen_messages) > DEDUP_MAX_SIZE: - cutoff = now - DEDUP_WINDOW_SECONDS - self._seen_messages = { - key: ts for key, ts in self._seen_messages.items() if ts > cutoff - } - if self._reply_req_ids: - self._reply_req_ids = { - key: value for key, value in self._reply_req_ids.items() if key in self._seen_messages - } - - if msg_id in self._seen_messages: - return True - - self._seen_messages[msg_id] = now - return False - def _remember_reply_req_id(self, message_id: str, req_id: str) -> None: normalized_message_id = str(message_id or "").strip() normalized_req_id = str(req_id or "").strip() diff --git a/gateway/platforms/weixin.py b/gateway/platforms/weixin.py index 42b0b7fffe..5821d922f8 100644 --- a/gateway/platforms/weixin.py +++ b/gateway/platforms/weixin.py @@ -53,6 +53,7 @@ except ImportError: # pragma: no cover - dependency gate CRYPTO_AVAILABLE = False from gateway.config import Platform, PlatformConfig +from gateway.platforms.helpers import MessageDeduplicator from gateway.platforms.base import ( BasePlatformAdapter, MessageEvent, @@ -63,6 +64,7 @@ from gateway.platforms.base import ( cache_image_from_bytes, ) from hermes_constants import get_hermes_home +from utils import atomic_json_write ILINK_BASE_URL = "https://ilinkai.weixin.qq.com" WEIXIN_CDN_BASE_URL = "https://novac2c.cdn.weixin.qq.com/c2c" @@ -206,7 +208,7 @@ def save_weixin_account( "saved_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), } path = _account_file(hermes_home, account_id) - path.write_text(json.dumps(payload, indent=2), encoding="utf-8") + atomic_json_write(path, payload) try: path.chmod(0o600) except OSError: @@ -269,7 +271,7 @@ class ContextTokenStore: if key.startswith(prefix) } try: - self._path(account_id).write_text(json.dumps(payload), encoding="utf-8") + atomic_json_write(self._path(account_id), payload) except Exception as exc: logger.warning("weixin: failed to persist context tokens for %s: %s", _safe_id(account_id), exc) @@ -755,23 +757,58 @@ def _pack_markdown_blocks_for_weixin(content: str, max_length: int) -> List[str] return packed -def _split_text_for_weixin_delivery(content: str, max_length: int) -> List[str]: +def _split_text_for_weixin_delivery( + content: str, max_length: int, split_per_line: bool = False, +) -> List[str]: """Split content into sequential Weixin messages. - Prefer one message per top-level line/markdown unit when the author used - explicit line breaks. Oversized units fall back to block-aware packing so - long code fences still split safely. - """ - if len(content) <= max_length and "\n" not in content: - return [content] + *compact* (default): Keep everything in a single message whenever it fits + within the platform limit, even when the author used explicit line breaks. + Only fall back to block-aware packing when the payload exceeds + ``max_length``. - chunks: List[str] = [] - for unit in _split_delivery_units_for_weixin(content): - if len(unit) <= max_length: - chunks.append(unit) - continue - chunks.extend(_pack_markdown_blocks_for_weixin(unit, max_length)) - return chunks or [content] + *per_line* (``split_per_line=True``): Legacy behavior — top-level line + breaks become separate chat messages; oversized units still use + block-aware packing. + + The active mode is controlled via ``config.yaml`` -> + ``platforms.weixin.extra.split_multiline_messages`` (``true`` / ``false``) + or the env var ``WEIXIN_SPLIT_MULTILINE_MESSAGES``. + """ + if split_per_line: + # Legacy: one message per top-level delivery unit. + if len(content) <= max_length and "\n" not in content: + return [content] + chunks: List[str] = [] + for unit in _split_delivery_units_for_weixin(content): + if len(unit) <= max_length: + chunks.append(unit) + continue + chunks.extend(_pack_markdown_blocks_for_weixin(unit, max_length)) + return chunks or [content] + + # Compact (default): single message when under the limit. + if len(content) <= max_length: + return [content] + return _pack_markdown_blocks_for_weixin(content, max_length) or [content] + + +def _coerce_bool(value: Any, default: bool = True) -> bool: + """Coerce a config value to bool, tolerating strings like ``"true"``.""" + if value is None: + return default + if isinstance(value, bool): + return value + if isinstance(value, (int, float)): + return bool(value) + text = str(value).strip().lower() + if not text: + return default + if text in {"1", "true", "yes", "on"}: + return True + if text in {"0", "false", "no", "off"}: + return False + return default def _extract_text(item_list: List[Dict[str, Any]]) -> str: @@ -833,7 +870,7 @@ def _load_sync_buf(hermes_home: str, account_id: str) -> str: def _save_sync_buf(hermes_home: str, account_id: str, sync_buf: str) -> None: path = _sync_buf_path(hermes_home, account_id) - path.write_text(json.dumps({"get_updates_buf": sync_buf}), encoding="utf-8") + atomic_json_write(path, {"get_updates_buf": sync_buf}) async def qr_login( @@ -972,8 +1009,7 @@ class WeixinAdapter(BasePlatformAdapter): self._typing_cache = TypingTicketCache() self._session: Optional[aiohttp.ClientSession] = None self._poll_task: Optional[asyncio.Task] = None - self._seen_messages: Dict[str, float] = {} - self._token_lock_identity: Optional[str] = None + self._dedup = MessageDeduplicator(ttl_seconds=MESSAGE_DEDUP_TTL_SECONDS) self._account_id = str(extra.get("account_id") or os.getenv("WEIXIN_ACCOUNT_ID", "")).strip() self._token = str(config.token or extra.get("token") or os.getenv("WEIXIN_TOKEN", "")).strip() @@ -981,6 +1017,16 @@ class WeixinAdapter(BasePlatformAdapter): self._cdn_base_url = str( extra.get("cdn_base_url") or os.getenv("WEIXIN_CDN_BASE_URL", WEIXIN_CDN_BASE_URL) ).strip().rstrip("/") + self._send_chunk_delay_seconds = float( + extra.get("send_chunk_delay_seconds") or os.getenv("WEIXIN_SEND_CHUNK_DELAY_SECONDS", "0.35") + ) + self._send_chunk_retries = int( + extra.get("send_chunk_retries") or os.getenv("WEIXIN_SEND_CHUNK_RETRIES", "2") + ) + self._send_chunk_retry_delay_seconds = float( + extra.get("send_chunk_retry_delay_seconds") + or os.getenv("WEIXIN_SEND_CHUNK_RETRY_DELAY_SECONDS", "1.0") + ) self._dm_policy = str(extra.get("dm_policy") or os.getenv("WEIXIN_DM_POLICY", "open")).strip().lower() self._group_policy = str(extra.get("group_policy") or os.getenv("WEIXIN_GROUP_POLICY", "disabled")).strip().lower() allow_from = extra.get("allow_from") @@ -991,6 +1037,11 @@ class WeixinAdapter(BasePlatformAdapter): group_allow_from = os.getenv("WEIXIN_GROUP_ALLOWED_USERS", "") self._allow_from = self._coerce_list(allow_from) self._group_allow_from = self._coerce_list(group_allow_from) + self._split_multiline_messages = _coerce_bool( + extra.get("split_multiline_messages") + or os.getenv("WEIXIN_SPLIT_MULTILINE_MESSAGES"), + default=False, + ) if self._account_id and not self._token: persisted = load_weixin_account(hermes_home, self._account_id) @@ -1026,23 +1077,7 @@ class WeixinAdapter(BasePlatformAdapter): return False try: - from gateway.status import acquire_scoped_lock - - self._token_lock_identity = self._token - acquired, existing = acquire_scoped_lock( - "weixin-bot-token", - self._token_lock_identity, - metadata={"platform": self.platform.value}, - ) - if not acquired: - owner_pid = existing.get("pid") if isinstance(existing, dict) else None - message = ( - "Another local Hermes gateway is already using this Weixin token" - + (f" (PID {owner_pid})." if owner_pid else ".") - + " Stop the other gateway before starting a second Weixin poller." - ) - logger.error("[%s] %s", self.name, message) - self._set_fatal_error("weixin_token_lock", message, retryable=False) + if not self._acquire_platform_lock('weixin-bot-token', self._token, 'Weixin bot token'): return False except Exception as exc: logger.debug("[%s] Token lock unavailable (non-fatal): %s", self.name, exc) @@ -1066,12 +1101,7 @@ class WeixinAdapter(BasePlatformAdapter): if self._session and not self._session.closed: await self._session.close() self._session = None - if self._token_lock_identity: - try: - from gateway.status import release_scoped_lock - release_scoped_lock("weixin-bot-token", self._token_lock_identity) - except Exception as exc: - logger.warning("[%s] Error releasing Weixin token lock: %s", self.name, exc, exc_info=True) + self._release_platform_lock() self._mark_disconnected() logger.info("[%s] Disconnected", self.name) @@ -1149,16 +1179,8 @@ class WeixinAdapter(BasePlatformAdapter): return message_id = str(message.get("message_id") or "").strip() - if message_id: - now = time.time() - self._seen_messages = { - key: value - for key, value in self._seen_messages.items() - if now - value < MESSAGE_DEDUP_TTL_SECONDS - } - if message_id in self._seen_messages: - return - self._seen_messages[message_id] = now + if message_id and self._dedup.is_duplicate(message_id): + return chat_type, effective_chat_id = _guess_chat_type(message, self._account_id) if chat_type == "group": @@ -1330,7 +1352,50 @@ class WeixinAdapter(BasePlatformAdapter): logger.debug("[%s] getConfig failed for %s: %s", self.name, _safe_id(user_id), exc) def _split_text(self, content: str) -> List[str]: - return _split_text_for_weixin_delivery(content, self.MAX_MESSAGE_LENGTH) + return _split_text_for_weixin_delivery( + content, self.MAX_MESSAGE_LENGTH, self._split_multiline_messages, + ) + + async def _send_text_chunk( + self, + *, + chat_id: str, + chunk: str, + context_token: Optional[str], + client_id: str, + ) -> None: + """Send a single text chunk with per-chunk retry and backoff.""" + last_error: Optional[Exception] = None + for attempt in range(self._send_chunk_retries + 1): + try: + await _send_message( + self._session, + base_url=self._base_url, + token=self._token, + to=chat_id, + text=chunk, + context_token=context_token, + client_id=client_id, + ) + return + except Exception as exc: + last_error = exc + if attempt >= self._send_chunk_retries: + break + wait = self._send_chunk_retry_delay_seconds * (attempt + 1) + logger.warning( + "[%s] send chunk failed to=%s attempt=%d/%d, retrying in %.2fs: %s", + self.name, + _safe_id(chat_id), + attempt + 1, + self._send_chunk_retries + 1, + wait, + exc, + ) + if wait > 0: + await asyncio.sleep(wait) + assert last_error is not None + raise last_error async def send( self, @@ -1344,18 +1409,18 @@ class WeixinAdapter(BasePlatformAdapter): context_token = self._token_store.get(self._account_id, chat_id) last_message_id: Optional[str] = None try: - for chunk in self._split_text(self.format_message(content)): + chunks = self._split_text(self.format_message(content)) + for idx, chunk in enumerate(chunks): client_id = f"hermes-weixin-{uuid.uuid4().hex}" - await _send_message( - self._session, - base_url=self._base_url, - token=self._token, - to=chat_id, - text=chunk, + await self._send_text_chunk( + chat_id=chat_id, + chunk=chunk, context_token=context_token, client_id=client_id, ) last_message_id = client_id + if idx < len(chunks) - 1 and self._send_chunk_delay_seconds > 0: + await asyncio.sleep(self._send_chunk_delay_seconds) return SendResult(success=True, message_id=last_message_id) except Exception as exc: logger.error("[%s] send failed to=%s: %s", self.name, _safe_id(chat_id), exc) diff --git a/gateway/platforms/whatsapp.py b/gateway/platforms/whatsapp.py index a6475dcb80..c616f72448 100644 --- a/gateway/platforms/whatsapp.py +++ b/gateway/platforms/whatsapp.py @@ -145,7 +145,6 @@ class WhatsAppAdapter(BasePlatformAdapter): self._bridge_log: Optional[Path] = None self._poll_task: Optional[asyncio.Task] = None self._http_session: Optional["aiohttp.ClientSession"] = None - self._session_lock_identity: Optional[str] = None def _whatsapp_require_mention(self) -> bool: configured = self.config.extra.get("require_mention") @@ -290,23 +289,7 @@ class WhatsAppAdapter(BasePlatformAdapter): # Acquire scoped lock to prevent duplicate sessions try: - from gateway.status import acquire_scoped_lock - - self._session_lock_identity = str(self._session_path) - acquired, existing = acquire_scoped_lock( - "whatsapp-session", - self._session_lock_identity, - metadata={"platform": self.platform.value}, - ) - if not acquired: - owner_pid = existing.get("pid") if isinstance(existing, dict) else None - message = ( - "Another local Hermes gateway is already using this WhatsApp session" - + (f" (PID {owner_pid})." if owner_pid else ".") - + " Stop the other gateway before starting a second WhatsApp bridge." - ) - logger.error("[%s] %s", self.name, message) - self._set_fatal_error("whatsapp_session_lock", message, retryable=False) + if not self._acquire_platform_lock('whatsapp-session', str(self._session_path), 'WhatsApp session'): return False except Exception as e: logger.warning("[%s] Could not acquire session lock (non-fatal): %s", self.name, e) @@ -468,12 +451,7 @@ class WhatsAppAdapter(BasePlatformAdapter): return True except Exception as e: - if self._session_lock_identity: - try: - from gateway.status import release_scoped_lock - release_scoped_lock("whatsapp-session", self._session_lock_identity) - except Exception: - pass + self._release_platform_lock() logger.error("[%s] Failed to start bridge: %s", self.name, e, exc_info=True) self._close_bridge_log() return False @@ -546,17 +524,11 @@ class WhatsAppAdapter(BasePlatformAdapter): await self._http_session.close() self._http_session = None - if self._session_lock_identity: - try: - from gateway.status import release_scoped_lock - release_scoped_lock("whatsapp-session", self._session_lock_identity) - except Exception as e: - logger.warning("[%s] Error releasing WhatsApp session lock: %s", self.name, e, exc_info=True) + self._release_platform_lock() self._mark_disconnected() self._bridge_process = None self._close_bridge_log() - self._session_lock_identity = None print(f"[{self.name}] Disconnected") async def send( diff --git a/gateway/run.py b/gateway/run.py index 362b8650b6..d577fd34e1 100644 --- a/gateway/run.py +++ b/gateway/run.py @@ -352,19 +352,14 @@ def _build_media_placeholder(event) -> str: return "\n".join(parts) -def _dequeue_pending_text(adapter, session_key: str) -> str | None: - """Consume and return the text of a pending queued message. +def _dequeue_pending_event(adapter, session_key: str) -> MessageEvent | None: + """Consume and return the full pending event for a session. - Preserves media context for captionless photo/document events by - building a placeholder so the message isn't silently dropped. + Queued follow-ups must preserve their media metadata so they can re-enter + the normal image/STT/document preprocessing path instead of being reduced + to a placeholder string. """ - event = adapter.get_pending_message(session_key) - if not event: - return None - text = event.text - if not text and getattr(event, "media_urls", None): - text = _build_media_placeholder(event) - return text + return adapter.get_pending_message(session_key) def _check_unavailable_skill(command_name: str) -> str | None: @@ -1465,7 +1460,18 @@ class GatewayRunner: logger.info("Recovered %s background process(es) from previous run", recovered) except Exception as e: logger.warning("Process checkpoint recovery: %s", e) - + + # Suspend sessions that were active when the gateway last exited. + # This prevents stuck sessions from being blindly resumed on restart, + # which can create an unrecoverable loop (#7536). Suspended sessions + # auto-reset on the next incoming message, giving the user a clean start. + try: + suspended = self.session_store.suspend_recently_active() + if suspended: + logger.info("Suspended %d in-flight session(s) from previous run", suspended) + except Exception as e: + logger.warning("Session suspension on startup failed: %s", e) + connected_count = 0 enabled_platform_count = 0 startup_nonretryable_errors: list[str] = [] @@ -2221,6 +2227,13 @@ class GatewayRunner: # are system-generated and must skip user authorization. if getattr(event, "internal", False): pass + elif source.user_id is None: + # Messages with no user identity (Telegram service messages, + # channel forwards, anonymous admin actions) cannot be + # authorized — drop silently instead of triggering the pairing + # flow with a None user_id. + logger.debug("Ignoring message with no user_id from %s", source.platform.value) + return None elif not self._is_user_authorized(source): logger.warning("Unauthorized user: %s (%s) on %s", source.user_id, source.user_name, source.platform.value) # In DMs: offer pairing code. In groups: silently ignore. @@ -2370,8 +2383,11 @@ class GatewayRunner: self._pending_messages.pop(_quick_key, None) if _quick_key in self._running_agents: del self._running_agents[_quick_key] - logger.info("HARD STOP for session %s — session lock released", _quick_key[:20]) - return "⚡ Force-stopped. The session is unlocked — you can send a new message." + # Mark session suspended so the next message starts fresh + # instead of resuming the stuck context (#7536). + self.session_store.suspend_session(_quick_key) + logger.info("HARD STOP for session %s — suspended, session lock released", _quick_key[:20]) + return "⚡ Force-stopped. The session is suspended — your next message will start fresh." # /reset and /new must bypass the running-agent guard so they # actually dispatch as commands instead of being queued as user @@ -2761,6 +2777,162 @@ class GatewayRunner: del self._running_agents[_quick_key] self._running_agents_ts.pop(_quick_key, None) + async def _prepare_inbound_message_text( + self, + *, + event: MessageEvent, + source: SessionSource, + history: List[Dict[str, Any]], + ) -> Optional[str]: + """Prepare inbound event text for the agent. + + Keep the normal inbound path and the queued follow-up path on the same + preprocessing pipeline so sender attribution, image enrichment, STT, + document notes, reply context, and @ references all behave the same. + """ + history = history or [] + message_text = event.text or "" + + _is_shared_thread = ( + source.chat_type != "dm" + and source.thread_id + and not getattr(self.config, "thread_sessions_per_user", False) + ) + if _is_shared_thread and source.user_name: + message_text = f"[{source.user_name}] {message_text}" + + if event.media_urls: + image_paths = [] + audio_paths = [] + for i, path in enumerate(event.media_urls): + mtype = event.media_types[i] if i < len(event.media_types) else "" + if mtype.startswith("image/") or event.message_type == MessageType.PHOTO: + image_paths.append(path) + if mtype.startswith("audio/") or event.message_type in (MessageType.VOICE, MessageType.AUDIO): + audio_paths.append(path) + + if image_paths: + message_text = await self._enrich_message_with_vision( + message_text, + image_paths, + ) + + if audio_paths: + message_text = await self._enrich_message_with_transcription( + message_text, + audio_paths, + ) + _stt_fail_markers = ( + "No STT provider", + "STT is disabled", + "can't listen", + "VOICE_TOOLS_OPENAI_KEY", + ) + if any(marker in message_text for marker in _stt_fail_markers): + _stt_adapter = self.adapters.get(source.platform) + _stt_meta = {"thread_id": source.thread_id} if source.thread_id else None + if _stt_adapter: + try: + _stt_msg = ( + "🎤 I received your voice message but can't transcribe it — " + "no speech-to-text provider is configured.\n\n" + "To enable voice: install faster-whisper " + "(`pip install faster-whisper` in the Hermes venv) " + "and set `stt.enabled: true` in config.yaml, " + "then /restart the gateway." + ) + if self._has_setup_skill(): + _stt_msg += "\n\nFor full setup instructions, type: `/skill hermes-agent-setup`" + await _stt_adapter.send( + source.chat_id, + _stt_msg, + metadata=_stt_meta, + ) + except Exception: + pass + + if event.media_urls and event.message_type == MessageType.DOCUMENT: + import mimetypes as _mimetypes + + _TEXT_EXTENSIONS = {".txt", ".md", ".csv", ".log", ".json", ".xml", ".yaml", ".yml", ".toml", ".ini", ".cfg"} + for i, path in enumerate(event.media_urls): + mtype = event.media_types[i] if i < len(event.media_types) else "" + if mtype in ("", "application/octet-stream"): + import os as _os2 + + _ext = _os2.path.splitext(path)[1].lower() + if _ext in _TEXT_EXTENSIONS: + mtype = "text/plain" + else: + guessed, _ = _mimetypes.guess_type(path) + if guessed: + mtype = guessed + if not mtype.startswith(("application/", "text/")): + continue + + import os as _os + import re as _re + + basename = _os.path.basename(path) + parts = basename.split("_", 2) + display_name = parts[2] if len(parts) >= 3 else basename + display_name = _re.sub(r'[^\w.\- ]', '_', display_name) + + if mtype.startswith("text/"): + context_note = ( + f"[The user sent a text document: '{display_name}'. " + f"Its content has been included below. " + f"The file is also saved at: {path}]" + ) + else: + context_note = ( + f"[The user sent a document: '{display_name}'. " + f"The file is saved at: {path}. " + f"Ask the user what they'd like you to do with it.]" + ) + message_text = f"{context_note}\n\n{message_text}" + + if getattr(event, "reply_to_text", None) and event.reply_to_message_id: + reply_snippet = event.reply_to_text[:500] + found_in_history = any( + reply_snippet[:200] in (msg.get("content") or "") + for msg in history + if msg.get("role") in ("assistant", "user", "tool") + ) + if not found_in_history: + message_text = f'[Replying to: "{reply_snippet}"]\n\n{message_text}' + + if "@" in message_text: + try: + from agent.context_references import preprocess_context_references_async + from agent.model_metadata import get_model_context_length + + _msg_cwd = os.environ.get("MESSAGING_CWD", os.path.expanduser("~")) + _msg_ctx_len = get_model_context_length( + self._model, + base_url=self._base_url or "", + ) + _ctx_result = await preprocess_context_references_async( + message_text, + cwd=_msg_cwd, + context_length=_msg_ctx_len, + allowed_root=_msg_cwd, + ) + if _ctx_result.blocked: + _adapter = self.adapters.get(source.platform) + if _adapter: + await _adapter.send( + source.chat_id, + "\n".join(_ctx_result.warnings) or "Context injection refused.", + ) + return None + if _ctx_result.expanded: + message_text = _ctx_result.message + except Exception as exc: + logger.debug("@ context reference expansion failed: %s", exc) + + return message_text + async def _handle_message_with_agent(self, event, source, _quick_key: str): """Inner handler that runs under the _running_agents sentinel guard.""" _msg_start_time = time.time() @@ -2812,7 +2984,9 @@ class GatewayRunner: # so the agent knows this is a fresh conversation (not an intentional /reset). if getattr(session_entry, 'was_auto_reset', False): reset_reason = getattr(session_entry, 'auto_reset_reason', None) or 'idle' - if reset_reason == "daily": + if reset_reason == "suspended": + context_note = "[System note: The user's previous session was stopped and suspended. This is a fresh conversation with no prior context.]" + elif reset_reason == "daily": context_note = "[System note: The user's session was automatically reset by the daily schedule. This is a fresh conversation with no prior context.]" else: context_note = "[System note: The user's previous session expired due to inactivity. This is a fresh conversation with no prior context.]" @@ -2829,7 +3003,9 @@ class GatewayRunner: ) platform_name = source.platform.value if source.platform else "" had_activity = getattr(session_entry, 'reset_had_activity', False) - should_notify = ( + # Suspended sessions always notify (they were explicitly stopped + # or crashed mid-operation) — skip the policy check. + should_notify = reset_reason == "suspended" or ( policy.notify and had_activity and platform_name not in policy.notify_exclude_platforms @@ -2837,7 +3013,9 @@ class GatewayRunner: if should_notify: adapter = self.adapters.get(source.platform) if adapter: - if reset_reason == "daily": + if reset_reason == "suspended": + reason_text = "previous session was stopped or interrupted" + elif reset_reason == "daily": reason_text = f"daily schedule at {policy.at_hour}:00" else: hours = policy.idle_minutes // 60 @@ -3195,149 +3373,13 @@ class GatewayRunner: # attachments (documents, audio, etc.) are not sent to the vision # tool even when they appear in the same message. # ----------------------------------------------------------------- - message_text = event.text or "" - - # ----------------------------------------------------------------- - # Sender attribution for shared thread sessions. - # - # When multiple users share a single thread session (the default for - # threads), prefix each message with [sender name] so the agent can - # tell participants apart. Skip for DMs (single-user by nature) and - # when per-user thread isolation is explicitly enabled. - # ----------------------------------------------------------------- - _is_shared_thread = ( - source.chat_type != "dm" - and source.thread_id - and not getattr(self.config, "thread_sessions_per_user", False) + message_text = await self._prepare_inbound_message_text( + event=event, + source=source, + history=history, ) - if _is_shared_thread and source.user_name: - message_text = f"[{source.user_name}] {message_text}" - - if event.media_urls: - image_paths = [] - for i, path in enumerate(event.media_urls): - # Check media_types if available; otherwise infer from message type - mtype = event.media_types[i] if i < len(event.media_types) else "" - is_image = ( - mtype.startswith("image/") - or event.message_type == MessageType.PHOTO - ) - if is_image: - image_paths.append(path) - if image_paths: - message_text = await self._enrich_message_with_vision( - message_text, image_paths - ) - - # ----------------------------------------------------------------- - # Auto-transcribe voice/audio messages sent by the user - # ----------------------------------------------------------------- - if event.media_urls: - audio_paths = [] - for i, path in enumerate(event.media_urls): - mtype = event.media_types[i] if i < len(event.media_types) else "" - is_audio = ( - mtype.startswith("audio/") - or event.message_type in (MessageType.VOICE, MessageType.AUDIO) - ) - if is_audio: - audio_paths.append(path) - if audio_paths: - message_text = await self._enrich_message_with_transcription( - message_text, audio_paths - ) - # If STT failed, send a direct message to the user so they - # know voice isn't configured — don't rely on the agent to - # relay the error clearly. - _stt_fail_markers = ( - "No STT provider", - "STT is disabled", - "can't listen", - "VOICE_TOOLS_OPENAI_KEY", - ) - if any(m in message_text for m in _stt_fail_markers): - _stt_adapter = self.adapters.get(source.platform) - _stt_meta = {"thread_id": source.thread_id} if source.thread_id else None - if _stt_adapter: - try: - _stt_msg = ( - "🎤 I received your voice message but can't transcribe it — " - "no speech-to-text provider is configured.\n\n" - "To enable voice: install faster-whisper " - "(`pip install faster-whisper` in the Hermes venv) " - "and set `stt.enabled: true` in config.yaml, " - "then /restart the gateway." - ) - # Point to setup skill if it's installed - if self._has_setup_skill(): - _stt_msg += "\n\nFor full setup instructions, type: `/skill hermes-agent-setup`" - await _stt_adapter.send( - source.chat_id, _stt_msg, - metadata=_stt_meta, - ) - except Exception: - pass - - # ----------------------------------------------------------------- - # Enrich document messages with context notes for the agent - # ----------------------------------------------------------------- - if event.media_urls and event.message_type == MessageType.DOCUMENT: - import mimetypes as _mimetypes - _TEXT_EXTENSIONS = {".txt", ".md", ".csv", ".log", ".json", ".xml", ".yaml", ".yml", ".toml", ".ini", ".cfg"} - for i, path in enumerate(event.media_urls): - mtype = event.media_types[i] if i < len(event.media_types) else "" - # Fall back to extension-based detection when MIME type is unreliable. - if mtype in ("", "application/octet-stream"): - import os as _os2 - _ext = _os2.path.splitext(path)[1].lower() - if _ext in _TEXT_EXTENSIONS: - mtype = "text/plain" - else: - guessed, _ = _mimetypes.guess_type(path) - if guessed: - mtype = guessed - if not mtype.startswith(("application/", "text/")): - continue - # Extract display filename by stripping the doc_{uuid12}_ prefix - import os as _os - basename = _os.path.basename(path) - # Format: doc_<12hex>_ - parts = basename.split("_", 2) - display_name = parts[2] if len(parts) >= 3 else basename - # Sanitize to prevent prompt injection via filenames - import re as _re - display_name = _re.sub(r'[^\w.\- ]', '_', display_name) - - if mtype.startswith("text/"): - context_note = ( - f"[The user sent a text document: '{display_name}'. " - f"Its content has been included below. " - f"The file is also saved at: {path}]" - ) - else: - context_note = ( - f"[The user sent a document: '{display_name}'. " - f"The file is saved at: {path}. " - f"Ask the user what they'd like you to do with it.]" - ) - message_text = f"{context_note}\n\n{message_text}" - - # ----------------------------------------------------------------- - # Inject reply context when user replies to a message not in history. - # Telegram (and other platforms) let users reply to specific messages, - # but if the quoted message is from a previous session, cron delivery, - # or background task, the agent has no context about what's being - # referenced. Prepend the quoted text so the agent understands. (#1594) - # ----------------------------------------------------------------- - if getattr(event, 'reply_to_text', None) and event.reply_to_message_id: - reply_snippet = event.reply_to_text[:500] - found_in_history = any( - reply_snippet[:200] in (msg.get("content") or "") - for msg in history - if msg.get("role") in ("assistant", "user", "tool") - ) - if not found_in_history: - message_text = f'[Replying to: "{reply_snippet}"]\n\n{message_text}' + if message_text is None: + return try: # Emit agent:start hook @@ -3349,30 +3391,6 @@ class GatewayRunner: } await self.hooks.emit("agent:start", hook_ctx) - # Expand @ context references (@file:, @folder:, @diff, etc.) - if "@" in message_text: - try: - from agent.context_references import preprocess_context_references_async - from agent.model_metadata import get_model_context_length - _msg_cwd = os.environ.get("MESSAGING_CWD", os.path.expanduser("~")) - _msg_ctx_len = get_model_context_length( - self._model, base_url=self._base_url or "") - _ctx_result = await preprocess_context_references_async( - message_text, cwd=_msg_cwd, - context_length=_msg_ctx_len, allowed_root=_msg_cwd) - if _ctx_result.blocked: - _adapter = self.adapters.get(source.platform) - if _adapter: - await _adapter.send( - source.chat_id, - "\n".join(_ctx_result.warnings) or "Context injection refused.", - ) - return - if _ctx_result.expanded: - message_text = _ctx_result.message - except Exception as exc: - logger.debug("@ context reference expansion failed: %s", exc) - # Run the agent agent_result = await self._run_agent( message=message_text, @@ -4010,25 +4028,31 @@ class GatewayRunner: handles /stop before this method is reached. This handler fires only through normal command dispatch (no running agent) or as a fallback. Force-clean the session lock in all cases for safety. + + When there IS a running/pending agent, the session is also marked + as *suspended* so the next message starts a fresh session instead + of resuming the stuck context (#7536). """ source = event.source session_entry = self.session_store.get_or_create_session(source) session_key = session_entry.session_key - + agent = self._running_agents.get(session_key) if agent is _AGENT_PENDING_SENTINEL: # Force-clean the sentinel so the session is unlocked. if session_key in self._running_agents: del self._running_agents[session_key] - logger.info("HARD STOP (pending) for session %s — sentinel cleared", session_key[:20]) - return "⚡ Force-stopped. The agent was still starting — session unlocked." + self.session_store.suspend_session(session_key) + logger.info("HARD STOP (pending) for session %s — suspended, sentinel cleared", session_key[:20]) + return "⚡ Force-stopped. The agent was still starting — your next message will start fresh." if agent: agent.interrupt("Stop requested") # Force-clean the session lock so a truly hung agent doesn't # keep it locked forever. if session_key in self._running_agents: del self._running_agents[session_key] - return "⚡ Force-stopped. The session is unlocked — you can send a new message." + self.session_store.suspend_session(session_key) + return "⚡ Force-stopped. Your next message will start a fresh session." else: return "No active task to stop." @@ -6694,6 +6718,8 @@ class GatewayRunner: chat_id=context.source.chat_id, chat_name=context.source.chat_name or "", thread_id=str(context.source.thread_id) if context.source.thread_id else "", + user_id=str(context.source.user_id) if context.source.user_id else "", + user_name=str(context.source.user_name) if context.source.user_name else "", ) def _clear_session_env(self, tokens: list) -> None: @@ -6906,6 +6932,8 @@ class GatewayRunner: platform_name = watcher.get("platform", "") chat_id = watcher.get("chat_id", "") thread_id = watcher.get("thread_id", "") + user_id = watcher.get("user_id", "") + user_name = watcher.get("user_name", "") agent_notify = watcher.get("notify_on_complete", False) notify_mode = self._load_background_notifications_mode() @@ -6961,6 +6989,8 @@ class GatewayRunner: platform=_platform_enum, chat_id=chat_id, thread_id=thread_id or None, + user_id=user_id or None, + user_name=user_name or None, ) synth_event = MessageEvent( text=synth_text, @@ -8115,17 +8145,16 @@ class GatewayRunner: # Get pending message from adapter. # Use session_key (not source.chat_id) to match adapter's storage keys. + pending_event = None pending = None if result and adapter and session_key: - if result.get("interrupted"): - pending = _dequeue_pending_text(adapter, session_key) - if not pending and result.get("interrupt_message"): - pending = result.get("interrupt_message") - else: - pending = _dequeue_pending_text(adapter, session_key) - if pending: - logger.debug("Processing queued message after agent completion: '%s...'", pending[:40]) - + pending_event = _dequeue_pending_event(adapter, session_key) + if result.get("interrupted") and not pending_event and result.get("interrupt_message"): + pending = result.get("interrupt_message") + elif pending_event: + pending = pending_event.text or _build_media_placeholder(pending_event) + logger.debug("Processing queued message after agent completion: '%s...'", pending[:40]) + # Safety net: if the pending text is a slash command (e.g. "/stop", # "/new"), discard it — commands should never be passed to the agent # as user input. The primary fix is in base.py (commands bypass the @@ -8143,27 +8172,29 @@ class GatewayRunner: "commands must not be passed as agent input", _pending_cmd_word, ) + pending_event = None pending = None except Exception: pass - if self._draining and pending: + if self._draining and (pending_event or pending): logger.info( "Discarding pending follow-up for session %s during gateway %s", session_key[:20] if session_key else "?", self._status_action_label(), ) + pending_event = None pending = None - if pending: + if pending_event or pending: logger.debug("Processing pending message: '%s...'", pending[:40]) - + # Clear the adapter's interrupt event so the next _run_agent call # doesn't immediately re-trigger the interrupt before the new agent # even makes its first API call (this was causing an infinite loop). if adapter and hasattr(adapter, '_active_sessions') and session_key and session_key in adapter._active_sessions: adapter._active_sessions[session_key].clear() - + # Cap recursion depth to prevent resource exhaustion when the # user sends multiple messages while the agent keeps failing. (#816) if _interrupt_depth >= self._MAX_INTERRUPT_DEPTH: @@ -8172,9 +8203,10 @@ class GatewayRunner: "queueing message instead of recursing.", _interrupt_depth, session_key, ) - # Queue the pending message for normal processing on next turn adapter = self.adapters.get(source.platform) - if adapter and hasattr(adapter, 'queue_message'): + if adapter and pending_event: + merge_pending_message_event(adapter._pending_messages, session_key, pending_event) + elif adapter and hasattr(adapter, 'queue_message'): adapter.queue_message(session_key, pending) return result_holder[0] or {"final_response": response, "messages": history} @@ -8189,23 +8221,37 @@ class GatewayRunner: if first_response and not _already_streamed: try: await adapter.send(source.chat_id, first_response, - metadata=getattr(event, "metadata", None)) + metadata={"thread_id": source.thread_id} if source.thread_id else None) except Exception as e: logger.warning("Failed to send first response before queued message: %s", e) # else: interrupted — discard the interrupted response ("Operation # interrupted." is just noise; the user already knows they sent a # new message). - # Process the pending message with updated history updated_history = result.get("messages", history) + next_source = source + next_message = pending + next_message_id = None + if pending_event is not None: + next_source = getattr(pending_event, "source", None) or source + next_message = await self._prepare_inbound_message_text( + event=pending_event, + source=next_source, + history=updated_history, + ) + if next_message is None: + return result + next_message_id = getattr(pending_event, "message_id", None) + return await self._run_agent( - message=pending, + message=next_message, context_prompt=context_prompt, history=updated_history, - source=source, + source=next_source, session_id=session_id, session_key=session_key, _interrupt_depth=_interrupt_depth + 1, + event_message_id=next_message_id, ) finally: # Stop progress sender, interrupt monitor, and notification task diff --git a/gateway/session.py b/gateway/session.py index 2b32c18895..96013df513 100644 --- a/gateway/session.py +++ b/gateway/session.py @@ -368,6 +368,11 @@ class SessionEntry: # survives gateway restarts (the old in-memory _pre_flushed_sessions # set was lost on restart, causing redundant re-flushes). memory_flushed: bool = False + + # When True the next call to get_or_create_session() will auto-reset + # this session (create a new session_id) so the user starts fresh. + # Set by /stop to break stuck-resume loops (#7536). + suspended: bool = False def to_dict(self) -> Dict[str, Any]: result = { @@ -387,6 +392,7 @@ class SessionEntry: "estimated_cost_usd": self.estimated_cost_usd, "cost_status": self.cost_status, "memory_flushed": self.memory_flushed, + "suspended": self.suspended, } if self.origin: result["origin"] = self.origin.to_dict() @@ -423,6 +429,7 @@ class SessionEntry: estimated_cost_usd=data.get("estimated_cost_usd", 0.0), cost_status=data.get("cost_status", "unknown"), memory_flushed=data.get("memory_flushed", False), + suspended=data.get("suspended", False), ) @@ -698,7 +705,12 @@ class SessionStore: if session_key in self._entries and not force_new: entry = self._entries[session_key] - reset_reason = self._should_reset(entry, source) + # Auto-reset sessions marked as suspended (e.g. after /stop + # broke a stuck loop — #7536). + if entry.suspended: + reset_reason = "suspended" + else: + reset_reason = self._should_reset(entry, source) if not reset_reason: entry.updated_at = now self._save() @@ -771,6 +783,44 @@ class SessionStore: entry.last_prompt_tokens = last_prompt_tokens self._save() + def suspend_session(self, session_key: str) -> bool: + """Mark a session as suspended so it auto-resets on next access. + + Used by ``/stop`` to prevent stuck sessions from being resumed + after a gateway restart (#7536). Returns True if the session + existed and was marked. + """ + with self._lock: + self._ensure_loaded_locked() + if session_key in self._entries: + self._entries[session_key].suspended = True + self._save() + return True + return False + + def suspend_recently_active(self, max_age_seconds: int = 120) -> int: + """Mark recently-active sessions as suspended. + + Called on gateway startup to prevent sessions that were likely + in-flight when the gateway last exited from being blindly resumed + (#7536). Only suspends sessions updated within *max_age_seconds* + to avoid resetting long-idle sessions that are harmless to resume. + Returns the number of sessions that were suspended. + """ + import time as _time + + cutoff = _time.time() - max_age_seconds + count = 0 + with self._lock: + self._ensure_loaded_locked() + for entry in self._entries.values(): + if not entry.suspended and entry.updated_at >= cutoff: + entry.suspended = True + count += 1 + if count: + self._save() + return count + def reset_session(self, session_key: str) -> Optional[SessionEntry]: """Force reset a session, creating a new session ID.""" db_end_session_id = None diff --git a/gateway/session_context.py b/gateway/session_context.py index 775cd8698b..6d676dc1ec 100644 --- a/gateway/session_context.py +++ b/gateway/session_context.py @@ -46,12 +46,16 @@ _SESSION_PLATFORM: ContextVar[str] = ContextVar("HERMES_SESSION_PLATFORM", defau _SESSION_CHAT_ID: ContextVar[str] = ContextVar("HERMES_SESSION_CHAT_ID", default="") _SESSION_CHAT_NAME: ContextVar[str] = ContextVar("HERMES_SESSION_CHAT_NAME", default="") _SESSION_THREAD_ID: ContextVar[str] = ContextVar("HERMES_SESSION_THREAD_ID", default="") +_SESSION_USER_ID: ContextVar[str] = ContextVar("HERMES_SESSION_USER_ID", default="") +_SESSION_USER_NAME: ContextVar[str] = ContextVar("HERMES_SESSION_USER_NAME", default="") _VAR_MAP = { "HERMES_SESSION_PLATFORM": _SESSION_PLATFORM, "HERMES_SESSION_CHAT_ID": _SESSION_CHAT_ID, "HERMES_SESSION_CHAT_NAME": _SESSION_CHAT_NAME, "HERMES_SESSION_THREAD_ID": _SESSION_THREAD_ID, + "HERMES_SESSION_USER_ID": _SESSION_USER_ID, + "HERMES_SESSION_USER_NAME": _SESSION_USER_NAME, } @@ -60,6 +64,8 @@ def set_session_vars( chat_id: str = "", chat_name: str = "", thread_id: str = "", + user_id: str = "", + user_name: str = "", ) -> list: """Set all session context variables and return reset tokens. @@ -74,6 +80,8 @@ def set_session_vars( _SESSION_CHAT_ID.set(chat_id), _SESSION_CHAT_NAME.set(chat_name), _SESSION_THREAD_ID.set(thread_id), + _SESSION_USER_ID.set(user_id), + _SESSION_USER_NAME.set(user_name), ] return tokens @@ -87,6 +95,8 @@ def clear_session_vars(tokens: list) -> None: _SESSION_CHAT_ID, _SESSION_CHAT_NAME, _SESSION_THREAD_ID, + _SESSION_USER_ID, + _SESSION_USER_NAME, ] for var, token in zip(vars_in_order, tokens): var.reset(token) diff --git a/hermes_cli/auth.py b/hermes_cli/auth.py index c209a8b47e..56b9fb63c2 100644 --- a/hermes_cli/auth.py +++ b/hermes_cli/auth.py @@ -250,9 +250,39 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = { api_key_env_vars=("HF_TOKEN",), base_url_env_var="HF_BASE_URL", ), + "xiaomi": ProviderConfig( + id="xiaomi", + name="Xiaomi MiMo", + auth_type="api_key", + inference_base_url="https://api.xiaomimimo.com/v1", + api_key_env_vars=("XIAOMI_API_KEY",), + base_url_env_var="XIAOMI_BASE_URL", + ), } +# ============================================================================= +# Anthropic Key Helper +# ============================================================================= + +def get_anthropic_key() -> str: + """Return the first usable Anthropic credential, or ``""``. + + Checks both the ``.env`` file (via ``get_env_value``) and the process + environment (``os.getenv``). The fallback order mirrors the + ``PROVIDER_REGISTRY["anthropic"].api_key_env_vars`` tuple: + + ANTHROPIC_API_KEY -> ANTHROPIC_TOKEN -> CLAUDE_CODE_OAUTH_TOKEN + """ + from hermes_cli.config import get_env_value + + for var in PROVIDER_REGISTRY["anthropic"].api_key_env_vars: + value = get_env_value(var) or os.getenv(var, "") + if value: + return value + return "" + + # ============================================================================= # Kimi Code Endpoint Detection # ============================================================================= @@ -908,6 +938,7 @@ def resolve_provider( "opencode": "opencode-zen", "zen": "opencode-zen", "qwen-portal": "qwen-oauth", "qwen-cli": "qwen-oauth", "qwen-oauth": "qwen-oauth", "hf": "huggingface", "hugging-face": "huggingface", "huggingface-hub": "huggingface", + "mimo": "xiaomi", "xiaomi-mimo": "xiaomi", "go": "opencode-go", "opencode-go-sub": "opencode-go", "kilo": "kilocode", "kilo-code": "kilocode", "kilo-gateway": "kilocode", # Local server aliases — route through the generic custom provider diff --git a/hermes_cli/claw.py b/hermes_cli/claw.py index 281ca37f56..d0bfd73d23 100644 --- a/hermes_cli/claw.py +++ b/hermes_cli/claw.py @@ -1,8 +1,9 @@ """hermes claw — OpenClaw migration commands. Usage: - hermes claw migrate # Interactive migration from ~/.openclaw - hermes claw migrate --dry-run # Preview what would be migrated + hermes claw migrate # Preview then migrate (always shows preview first) + hermes claw migrate --dry-run # Preview only, no changes + hermes claw migrate --yes # Skip confirmation prompt hermes claw migrate --preset full --overwrite # Full migration, overwrite conflicts hermes claw cleanup # Archive leftover OpenClaw directories hermes claw cleanup --dry-run # Preview what would be archived @@ -51,6 +52,41 @@ _OPENCLAW_SCRIPT_INSTALLED = ( # Known OpenClaw directory names (current + legacy) _OPENCLAW_DIR_NAMES = (".openclaw", ".clawdbot", ".moldbot") +def _warn_if_gateway_running(auto_yes: bool) -> None: + """Check if a Hermes gateway is running with connected platforms. + + Migrating bot tokens while the gateway is polling will cause conflicts + (e.g. Telegram 409 "terminated by other getUpdates request"). Warn the + user and let them decide whether to continue. + """ + from gateway.status import get_running_pid, read_runtime_status + + if not get_running_pid(): + return + + data = read_runtime_status() or {} + platforms = data.get("platforms") or {} + connected = [name for name, info in platforms.items() + if isinstance(info, dict) and info.get("state") == "connected"] + if not connected: + return + + print() + print_error( + "Hermes gateway is running with active connections: " + + ", ".join(connected) + ) + print_info( + "Migrating bot tokens while the gateway is active will cause " + "conflicts (Telegram, Discord, and Slack only allow one active " + "session per token)." + ) + print_info("Recommendation: stop the gateway first with 'hermes stop'.") + print() + if not auto_yes and not prompt_yes_no("Continue anyway?", default=False): + print_info("Migration cancelled. Stop the gateway and try again.") + sys.exit(0) + # State files commonly found in OpenClaw workspace directories that cause # confusion after migration (the agent discovers them and writes to them) _WORKSPACE_STATE_GLOBS = ( @@ -237,12 +273,12 @@ def _cmd_migrate(args): # Show what we're doing hermes_home = get_hermes_home() + auto_yes = getattr(args, "yes", False) print() print_header("Migration Settings") print_info(f"Source: {source_dir}") print_info(f"Target: {hermes_home}") print_info(f"Preset: {preset}") - print_info(f"Mode: {'dry run (preview only)' if dry_run else 'execute'}") print_info(f"Overwrite: {'yes' if overwrite else 'no (skip conflicts)'}") print_info(f"Secrets: {'yes (allowlisted only)' if migrate_secrets else 'no'}") if skill_conflict != "skip": @@ -251,31 +287,85 @@ def _cmd_migrate(args): print_info(f"Workspace: {workspace_target}") print() - # For execute mode (non-dry-run), confirm unless --yes was passed - if not dry_run and not getattr(args, "yes", False): - if not prompt_yes_no("Proceed with migration?", default=True): - print_info("Migration cancelled.") - return + # Check if a gateway is running with connected platforms — migrating tokens + # while the gateway is active will cause conflicts (e.g. Telegram 409). + _warn_if_gateway_running(auto_yes) # Ensure config.yaml exists before migration tries to read it config_path = get_config_path() if not config_path.exists(): save_config(load_config()) - # Load and run the migration + # Load the migration module try: mod = _load_migration_module(script_path) if mod is None: print_error("Could not load migration script.") return + except Exception as e: + print() + print_error(f"Could not load migration script: {e}") + logger.debug("OpenClaw migration error", exc_info=True) + return - selected = mod.resolve_selected_options(None, None, preset=preset) - ws_target = Path(workspace_target).resolve() if workspace_target else None + selected = mod.resolve_selected_options(None, None, preset=preset) + ws_target = Path(workspace_target).resolve() if workspace_target else None + # ── Phase 1: Always preview first ────────────────────────── + try: + preview = mod.Migrator( + source_root=source_dir.resolve(), + target_root=hermes_home.resolve(), + execute=False, + workspace_target=ws_target, + overwrite=overwrite, + migrate_secrets=migrate_secrets, + output_dir=None, + selected_options=selected, + preset_name=preset, + skill_conflict_mode=skill_conflict, + ) + preview_report = preview.migrate() + except Exception as e: + print() + print_error(f"Migration preview failed: {e}") + logger.debug("OpenClaw migration preview error", exc_info=True) + return + + preview_summary = preview_report.get("summary", {}) + preview_count = preview_summary.get("migrated", 0) + + if preview_count == 0: + print() + print_info("Nothing to migrate from OpenClaw.") + _print_migration_report(preview_report, dry_run=True) + return + + print() + print_header(f"Migration Preview — {preview_count} item(s) would be imported") + print_info("No changes have been made yet. Review the list below:") + _print_migration_report(preview_report, dry_run=True) + + # If --dry-run, stop here + if dry_run: + return + + # ── Phase 2: Confirm and execute ─────────────────────────── + print() + if not auto_yes: + if not sys.stdin.isatty(): + print_info("Non-interactive session — preview only.") + print_info("To execute, re-run with: hermes claw migrate --yes") + return + if not prompt_yes_no("Proceed with migration?", default=True): + print_info("Migration cancelled.") + return + + try: migrator = mod.Migrator( source_root=source_dir.resolve(), target_root=hermes_home.resolve(), - execute=not dry_run, + execute=True, workspace_target=ws_target, overwrite=overwrite, migrate_secrets=migrate_secrets, @@ -292,11 +382,11 @@ def _cmd_migrate(args): return # Print results - _print_migration_report(report, dry_run) + _print_migration_report(report, dry_run=False) - # After successful non-dry-run migration, offer to archive the source directory - if not dry_run and report.get("summary", {}).get("migrated", 0) > 0: - _offer_source_archival(source_dir, getattr(args, "yes", False)) + # After successful migration, offer to archive the source directory + if report.get("summary", {}).get("migrated", 0) > 0: + _offer_source_archival(source_dir, auto_yes) def _offer_source_archival(source_dir: Path, auto_yes: bool = False): @@ -330,6 +420,11 @@ def _offer_source_archival(source_dir: Path, auto_yes: bool = False): print_info("You can always rename it back if needed.") print() + if not auto_yes and not sys.stdin.isatty(): + print_info("Non-interactive session — skipping archival.") + print_info("Run later with: hermes claw cleanup") + return + if auto_yes or prompt_yes_no(f"Archive {source_dir} now?", default=True): try: archive_path = _archive_directory(source_dir) @@ -433,6 +528,9 @@ def _cmd_cleanup(args): if dry_run: archive_path = _archive_directory(source_dir, dry_run=True) print_info(f"Would archive: {source_dir} → {archive_path}") + elif not auto_yes and not sys.stdin.isatty(): + print_info(f"Non-interactive session — would archive: {source_dir}") + print_info("To execute, re-run with: hermes claw cleanup --yes") else: if auto_yes or prompt_yes_no(f"Archive {source_dir}?", default=True): try: diff --git a/hermes_cli/cli_output.py b/hermes_cli/cli_output.py new file mode 100644 index 0000000000..3d454eb308 --- /dev/null +++ b/hermes_cli/cli_output.py @@ -0,0 +1,79 @@ +"""Shared CLI output helpers for Hermes CLI modules. + +Extracts the identical ``print_info/success/warning/error`` and ``prompt()`` +functions previously duplicated across setup.py, tools_config.py, +mcp_config.py, and memory_setup.py. +""" + +import getpass +import sys + +from hermes_cli.colors import Colors, color + + +# ─── Print Helpers ──────────────────────────────────────────────────────────── + + +def print_info(text: str) -> None: + """Print a dim informational message.""" + print(color(f" {text}", Colors.DIM)) + + +def print_success(text: str) -> None: + """Print a green success message with ✓ prefix.""" + print(color(f"✓ {text}", Colors.GREEN)) + + +def print_warning(text: str) -> None: + """Print a yellow warning message with ⚠ prefix.""" + print(color(f"⚠ {text}", Colors.YELLOW)) + + +def print_error(text: str) -> None: + """Print a red error message with ✗ prefix.""" + print(color(f"✗ {text}", Colors.RED)) + + +def print_header(text: str) -> None: + """Print a bold yellow header.""" + print(color(f"\n {text}", Colors.YELLOW)) + + +# ─── Input Prompts ──────────────────────────────────────────────────────────── + + +def prompt( + question: str, + default: str | None = None, + password: bool = False, +) -> str: + """Prompt the user for input with optional default and password masking. + + Replaces the four independent ``_prompt()`` / ``prompt()`` implementations + in setup.py, tools_config.py, mcp_config.py, and memory_setup.py. + + Returns the user's input (stripped), or *default* if the user presses Enter. + Returns empty string on Ctrl-C or EOF. + """ + suffix = f" [{default}]" if default else "" + display = color(f" {question}{suffix}: ", Colors.YELLOW) + + try: + if password: + value = getpass.getpass(display) + else: + value = input(display) + value = value.strip() + return value if value else (default or "") + except (KeyboardInterrupt, EOFError): + print() + return "" + + +def prompt_yes_no(question: str, default: bool = True) -> bool: + """Prompt for a yes/no answer. Returns bool.""" + hint = "Y/n" if default else "y/N" + answer = prompt(f"{question} ({hint})") + if not answer: + return default + return answer.lower().startswith("y") diff --git a/hermes_cli/config.py b/hermes_cli/config.py index e088bdfdf9..1545d15aad 100644 --- a/hermes_cli/config.py +++ b/hermes_cli/config.py @@ -32,7 +32,6 @@ _ENV_VAR_NAME_RE = re.compile(r"^[A-Za-z_][A-Za-z0-9_]*$") _EXTRA_ENV_KEYS = frozenset({ "OPENAI_API_KEY", "OPENAI_BASE_URL", "ANTHROPIC_API_KEY", "ANTHROPIC_TOKEN", - "AUXILIARY_VISION_MODEL", "DISCORD_HOME_CHANNEL", "TELEGRAM_HOME_CHANNEL", "SIGNAL_ACCOUNT", "SIGNAL_HTTP_URL", "SIGNAL_ALLOWED_USERS", "SIGNAL_GROUP_ALLOWED_USERS", @@ -868,6 +867,21 @@ OPTIONAL_ENV_VARS = { "category": "provider", "advanced": True, }, + "XIAOMI_API_KEY": { + "description": "Xiaomi MiMo API key for MiMo models (mimo-v2-pro, mimo-v2-omni, mimo-v2-flash)", + "prompt": "Xiaomi MiMo API Key", + "url": "https://platform.xiaomimimo.com", + "password": True, + "category": "provider", + }, + "XIAOMI_BASE_URL": { + "description": "Xiaomi MiMo base URL override (default: https://api.xiaomimimo.com/v1)", + "prompt": "Xiaomi base URL (leave empty for default)", + "url": None, + "password": False, + "category": "provider", + "advanced": True, + }, # ── Tool API keys ── "EXA_API_KEY": { @@ -1483,7 +1497,7 @@ _KNOWN_ROOT_KEYS = { # Valid fields inside a custom_providers list entry _VALID_CUSTOM_PROVIDER_FIELDS = { - "name", "base_url", "api_key", "api_mode", "models", + "name", "base_url", "api_key", "api_mode", "model", "models", "context_length", "rate_limit_delay", } @@ -2568,7 +2582,8 @@ def show_config(): for env_key, name in keys: value = get_env_value(env_key) print(f" {name:<14} {redact_key(value)}") - anthropic_value = get_env_value("ANTHROPIC_TOKEN") or get_env_value("ANTHROPIC_API_KEY") + from hermes_cli.auth import get_anthropic_key + anthropic_value = get_anthropic_key() print(f" {'Anthropic':<14} {redact_key(anthropic_value)}") # Model settings @@ -2784,8 +2799,8 @@ def set_config_value(key: str, value: str): # Write only user config back (not the full merged defaults) ensure_hermes_home() - with open(config_path, 'w', encoding="utf-8") as f: - yaml.dump(user_config, f, default_flow_style=False, sort_keys=False) + from utils import atomic_yaml_write + atomic_yaml_write(config_path, user_config, sort_keys=False) # Keep .env in sync for keys that terminal_tool reads directly from env vars. # config.yaml is authoritative, but terminal_tool only reads TERMINAL_ENV etc. diff --git a/hermes_cli/doctor.py b/hermes_cli/doctor.py index 46242b68cc..13c904692c 100644 --- a/hermes_cli/doctor.py +++ b/hermes_cli/doctor.py @@ -51,6 +51,7 @@ _PROVIDER_ENV_HINTS = ( "AI_GATEWAY_API_KEY", "OPENCODE_ZEN_API_KEY", "OPENCODE_GO_API_KEY", + "XIAOMI_API_KEY", ) @@ -335,8 +336,8 @@ def run_doctor(args): model_section[k] = raw_config.pop(k) else: raw_config.pop(k) - with open(config_path, "w") as f: - yaml.dump(raw_config, f, default_flow_style=False) + from utils import atomic_yaml_write + atomic_yaml_write(config_path, raw_config) check_ok("Migrated stale root-level keys into model section") fixed_count += 1 else: @@ -685,7 +686,8 @@ def run_doctor(args): else: check_warn("OpenRouter API", "(not configured)") - anthropic_key = os.getenv("ANTHROPIC_TOKEN") or os.getenv("ANTHROPIC_API_KEY") + from hermes_cli.auth import get_anthropic_key + anthropic_key = get_anthropic_key() if anthropic_key: print(" Checking Anthropic API...", end="", flush=True) try: diff --git a/hermes_cli/gateway.py b/hermes_cli/gateway.py index b29511dd59..505bad0b51 100644 --- a/hermes_cli/gateway.py +++ b/hermes_cli/gateway.py @@ -157,30 +157,54 @@ def _request_gateway_self_restart(pid: int) -> bool: return True -def find_gateway_pids(exclude_pids: set | None = None) -> list: +def find_gateway_pids(exclude_pids: set | None = None, all_profiles: bool = False) -> list: """Find PIDs of running gateway processes. Args: exclude_pids: PIDs to exclude from the result (e.g. service-managed PIDs that should not be killed during a stale-process sweep). + all_profiles: When ``True``, return gateway PIDs across **all** + profiles (the pre-7923 global behaviour). ``hermes update`` + needs this because a code update affects every profile. + When ``False`` (default), only PIDs belonging to the current + Hermes profile are returned. """ - pids = [] _exclude = exclude_pids or set() + pids = [pid for pid in _get_service_pids() if pid not in _exclude] patterns = [ "hermes_cli.main gateway", + "hermes_cli.main --profile", + "hermes_cli.main -p", "hermes_cli/main.py gateway", + "hermes_cli/main.py --profile", + "hermes_cli/main.py -p", "hermes gateway", "gateway/run.py", ] + current_home = str(get_hermes_home().resolve()) + current_profile_arg = _profile_arg(current_home) + current_profile_name = current_profile_arg.split()[-1] if current_profile_arg else "" + + def _matches_current_profile(command: str) -> bool: + if current_profile_name: + return ( + f"--profile {current_profile_name}" in command + or f"-p {current_profile_name}" in command + or f"HERMES_HOME={current_home}" in command + ) + + if "--profile " in command or " -p " in command: + return False + if "HERMES_HOME=" in command and f"HERMES_HOME={current_home}" not in command: + return False + return True try: if is_windows(): - # Windows: use wmic to search command lines result = subprocess.run( ["wmic", "process", "get", "ProcessId,CommandLine", "/FORMAT:LIST"], capture_output=True, text=True, timeout=10 ) - # Parse WMIC LIST output: blocks of "CommandLine=...\nProcessId=...\n" current_cmd = "" for line in result.stdout.split('\n'): line = line.strip() @@ -188,7 +212,7 @@ def find_gateway_pids(exclude_pids: set | None = None) -> list: current_cmd = line[len("CommandLine="):] elif line.startswith("ProcessId="): pid_str = line[len("ProcessId="):] - if any(p in current_cmd for p in patterns): + if any(p in current_cmd for p in patterns) and (all_profiles or _matches_current_profile(current_cmd)): try: pid = int(pid_str) if pid != os.getpid() and pid not in pids and pid not in _exclude: @@ -198,41 +222,57 @@ def find_gateway_pids(exclude_pids: set | None = None) -> list: current_cmd = "" else: result = subprocess.run( - ["ps", "aux"], + ["ps", "eww", "-ax", "-o", "pid=,command="], capture_output=True, text=True, timeout=10, ) for line in result.stdout.split('\n'): - # Skip grep and current process - if 'grep' in line or str(os.getpid()) in line: + stripped = line.strip() + if not stripped or 'grep' in stripped: continue - for pattern in patterns: - if pattern in line: - parts = line.split() - if len(parts) > 1: - try: - pid = int(parts[1]) - if pid not in pids and pid not in _exclude: - pids.append(pid) - except ValueError: - continue - break - except Exception: + + pid = None + command = "" + + parts = stripped.split(None, 1) + if len(parts) == 2: + try: + pid = int(parts[0]) + command = parts[1] + except ValueError: + pid = None + + if pid is None: + aux_parts = stripped.split() + if len(aux_parts) > 10 and aux_parts[1].isdigit(): + pid = int(aux_parts[1]) + command = " ".join(aux_parts[10:]) + + if pid is None: + continue + if pid == os.getpid() or pid in pids or pid in _exclude: + continue + if any(pattern in command for pattern in patterns) and (all_profiles or _matches_current_profile(command)): + pids.append(pid) + except (OSError, subprocess.TimeoutExpired): pass return pids -def kill_gateway_processes(force: bool = False, exclude_pids: set | None = None) -> int: +def kill_gateway_processes(force: bool = False, exclude_pids: set | None = None, + all_profiles: bool = False) -> int: """Kill any running gateway processes. Returns count killed. Args: force: Use the platform's force-kill mechanism instead of graceful terminate. exclude_pids: PIDs to skip (e.g. service-managed PIDs that were just restarted and should not be killed). + all_profiles: When ``True``, kill across all profiles. Passed + through to :func:`find_gateway_pids`. """ - pids = find_gateway_pids(exclude_pids=exclude_pids) + pids = find_gateway_pids(exclude_pids=exclude_pids, all_profiles=all_profiles) killed = 0 for pid in pids: @@ -633,6 +673,17 @@ def print_systemd_linger_guidance() -> None: print(" If you want the gateway user service to survive logout, run:") print(" sudo loginctl enable-linger $USER") +def _launchd_user_home() -> Path: + """Return the real macOS user home for launchd artifacts. + + Profile-mode Hermes often sets ``HOME`` to a profile-scoped directory, but + launchd user agents still live under the actual account home. + """ + import pwd + + return Path(pwd.getpwuid(os.getuid()).pw_dir) + + def get_launchd_plist_path() -> Path: """Return the launchd plist path, scoped per profile. @@ -641,7 +692,7 @@ def get_launchd_plist_path() -> Path: """ suffix = _profile_suffix() name = f"ai.hermes.gateway-{suffix}" if suffix else "ai.hermes.gateway" - return Path.home() / "Library" / "LaunchAgents" / f"{name}.plist" + return _launchd_user_home() / "Library" / "LaunchAgents" / f"{name}.plist" def _detect_venv_dir() -> Path | None: """Detect the active virtualenv directory. @@ -839,6 +890,25 @@ def _normalize_service_definition(text: str) -> str: return "\n".join(line.rstrip() for line in text.strip().splitlines()) +def _normalize_launchd_plist_for_comparison(text: str) -> str: + """Normalize launchd plist text for staleness checks. + + The generated plist intentionally captures a broad PATH assembled from the + invoking shell so user-installed tools remain reachable under launchd. + That makes raw text comparison unstable across shells, so ignore the PATH + payload when deciding whether the installed plist is stale. + """ + import re + + normalized = _normalize_service_definition(text) + return re.sub( + r'(PATH\s*)(.*?)()', + r'\1__HERMES_PATH__\3', + normalized, + flags=re.S, + ) + + def systemd_unit_is_current(system: bool = False) -> bool: unit_path = get_systemd_unit_path(system=system) if not unit_path.exists(): @@ -1220,7 +1290,7 @@ def launchd_plist_is_current() -> bool: installed = plist_path.read_text(encoding="utf-8") expected = generate_launchd_plist() - return _normalize_service_definition(installed) == _normalize_service_definition(expected) + return _normalize_launchd_plist_for_comparison(installed) == _normalize_launchd_plist_for_comparison(expected) def refresh_launchd_plist_if_needed() -> bool: @@ -1981,6 +2051,36 @@ def _setup_whatsapp(): cmd_whatsapp(argparse.Namespace()) +def _setup_email(): + """Configure Email via the standard platform setup.""" + email_platform = next(p for p in _PLATFORMS if p["key"] == "email") + _setup_standard_platform(email_platform) + + +def _setup_sms(): + """Configure SMS (Twilio) via the standard platform setup.""" + sms_platform = next(p for p in _PLATFORMS if p["key"] == "sms") + _setup_standard_platform(sms_platform) + + +def _setup_dingtalk(): + """Configure DingTalk via the standard platform setup.""" + dingtalk_platform = next(p for p in _PLATFORMS if p["key"] == "dingtalk") + _setup_standard_platform(dingtalk_platform) + + +def _setup_feishu(): + """Configure Feishu / Lark via the standard platform setup.""" + feishu_platform = next(p for p in _PLATFORMS if p["key"] == "feishu") + _setup_standard_platform(feishu_platform) + + +def _setup_wecom(): + """Configure WeCom (Enterprise WeChat) via the standard platform setup.""" + wecom_platform = next(p for p in _PLATFORMS if p["key"] == "wecom") + _setup_standard_platform(wecom_platform) + + def _is_service_installed() -> bool: """Check if the gateway is installed as a system service.""" if supports_systemd_services(): @@ -2540,7 +2640,7 @@ def gateway_command(args): service_available = True except subprocess.CalledProcessError: pass - killed = kill_gateway_processes() + killed = kill_gateway_processes(all_profiles=True) total = killed + (1 if service_available else 0) if total: print(f"✓ Stopped {total} gateway process(es) across all profiles") diff --git a/hermes_cli/main.py b/hermes_cli/main.py index 577aa67a74..c9c2471dd9 100644 --- a/hermes_cli/main.py +++ b/hermes_cli/main.py @@ -606,18 +606,58 @@ def _print_tui_exit_summary(session_id: Optional[str]) -> None: ) -def _find_bundled_tui() -> Optional[Path]: - """Find a bundled copy of the TUI. - Does *not* read from the `npm run build` dist dir, - as this would be a footgun when developing - """ - bundled_tui_dir = os.environ.get("HERMES_TUI_DIR") - if bundled_tui_dir and (Path(bundled_tui_dir) / "dist" / "entry.js").exists(): - return Path(bundled_tui_dir) +def _find_bundled_tui(tui_dir: Path) -> Optional[Path]: + """Directory whose dist/entry.js we should run: HERMES_TUI_DIR first, else repo ui-tui.""" + env = os.environ.get("HERMES_TUI_DIR") + if env: + p = Path(env) + if (p / "dist" / "entry.js").exists(): + return p + if (tui_dir / "dist" / "entry.js").exists(): + return tui_dir return None -def _make_tui_argv(tui_dir: Path) -> tuple[list[str], Path]: - """Gets argv to run tui + the working directory. Will npm install deps in dev mode.""" + +def _tui_build_needed(tui_dir: Path) -> bool: + entry = tui_dir / "dist" / "entry.js" + if not entry.exists(): + return True + dist_m = entry.stat().st_mtime + skip = frozenset({"node_modules", "dist"}) + for dirpath, dirnames, filenames in os.walk(tui_dir, topdown=True): + dirnames[:] = [d for d in dirnames if d not in skip] + for fn in filenames: + if fn.endswith((".ts", ".tsx")): + if os.path.getmtime(os.path.join(dirpath, fn)) > dist_m: + return True + for meta in ("package.json", "package-lock.json", "tsconfig.json", "tsconfig.build.json"): + mp = tui_dir / meta + if mp.exists() and mp.stat().st_mtime > dist_m: + return True + return False + + +def _hermes_ink_bundle_stale(tui_dir: Path) -> bool: + ink_root = tui_dir / "packages" / "hermes-ink" + bundle = ink_root / "dist" / "ink-bundle.js" + if not bundle.exists(): + return True + bm = bundle.stat().st_mtime + skip = frozenset({"node_modules", "dist"}) + for dirpath, dirnames, filenames in os.walk(ink_root, topdown=True): + dirnames[:] = [d for d in dirnames if d not in skip] + for fn in filenames: + if fn.endswith((".ts", ".tsx")): + if os.path.getmtime(os.path.join(dirpath, fn)) > bm: + return True + mp = ink_root / "package.json" + if mp.exists() and mp.stat().st_mtime > bm: + return True + return False + + +def _make_tui_argv(tui_dir: Path, tui_dev: bool) -> tuple[list[str], Path]: + """Ink TUI: --dev → tsx src; else node dist (HERMES_TUI_DIR or ui-tui, build when stale).""" def _node_bin(bin: str)-> str: path = shutil.which(bin) if not path: @@ -625,17 +665,15 @@ def _make_tui_argv(tui_dir: Path) -> tuple[list[str], Path]: sys.exit(1) return path - # use prebuilt TUI if it exists - bundled = _find_bundled_tui() - if bundled: - node = _node_bin("node") - return [node, str(bundled / "dist" / "entry.js")], bundled + # pre-built dist (nix / HERMES_TUI_DIR) needs no npm at all. + if not tui_dev: + bundled = _find_bundled_tui(tui_dir) + if bundled: + node = _node_bin("node") + return [node, str(bundled / "dist" / "entry.js")], bundled - # dev mode - run via tsx - - # install deps if needed + npm = _node_bin("npm") if not (tui_dir / "node_modules").exists(): - npm = _node_bin("npm") print("Installing TUI dependencies…") result = subprocess.run( [npm, "install", "--silent", "--no-fund", "--no-audit", "--progress=false"], @@ -652,23 +690,60 @@ def _make_tui_argv(tui_dir: Path) -> tuple[list[str], Path]: print(preview) sys.exit(1) - tsx = tui_dir / "node_modules" / ".bin" / "tsx" - if tsx.exists(): - return [str(tsx), "src/entry.tsx"], tui_dir + if tui_dev: + if _hermes_ink_bundle_stale(tui_dir): + result = subprocess.run( + [npm, "run", "build", "--prefix", "packages/hermes-ink"], + cwd=str(tui_dir), + capture_output=True, + text=True, + ) + if result.returncode != 0: + combined = f"{result.stdout or ''}{result.stderr or ''}".strip() + preview = "\n".join(combined.splitlines()[-30:]) + print("@hermes/ink build failed.") + if preview: + print(preview) + sys.exit(1) + tsx = tui_dir / "node_modules" / ".bin" / "tsx" + if tsx.exists(): + return [str(tsx), "src/entry.tsx"], tui_dir + return [npm, "start"], tui_dir - npm = _node_bin("npm") - return [npm, "start"], tui_dir + if _tui_build_needed(tui_dir): + result = subprocess.run( + [npm, "run", "build"], + cwd=str(tui_dir), + capture_output=True, + text=True, + ) + if result.returncode != 0: + combined = f"{result.stdout or ''}{result.stderr or ''}".strip() + preview = "\n".join(combined.splitlines()[-30:]) + print("TUI build failed.") + if preview: + print(preview) + sys.exit(1) -def _launch_tui(resume_session_id: Optional[str] = None): + root = _find_bundled_tui(tui_dir) + if not root: + print("TUI build did not produce dist/entry.js") + sys.exit(1) + + node = _node_bin("node") + return [node, str(root / "dist" / "entry.js")], root + +def _launch_tui(resume_session_id: Optional[str] = None, tui_dev: bool = False): """Replace current process with the Ink TUI.""" tui_dir = PROJECT_ROOT / "ui-tui" env = os.environ.copy() - env["HERMES_ROOT"] = os.environ.get("HERMES_ROOT", str(PROJECT_ROOT)) + env["HERMES_PYTHON_SRC_ROOT"] = os.environ.get("HERMES_PYTHON_SRC_ROOT", str(PROJECT_ROOT)) + env.setdefault("HERMES_CWD", os.getcwd()) if resume_session_id: env["HERMES_TUI_RESUME"] = resume_session_id - argv, cwd = _make_tui_argv(tui_dir) + argv, cwd = _make_tui_argv(tui_dir, tui_dev) try: code = subprocess.call(argv, cwd=str(cwd), env=env) except KeyboardInterrupt: @@ -718,9 +793,6 @@ def cmd_chat(args): # If resolution fails, keep the original value — _init_agent will # report "Session not found" with the original input - if use_tui: - _launch_tui(getattr(args, "resume", None)) - # First-run guard: check if any provider is configured before launching if not _has_any_provider_configured(): print() @@ -770,6 +842,13 @@ def cmd_chat(args): if getattr(args, "source", None): os.environ["HERMES_SESSION_SOURCE"] = args.source + + if use_tui: + _launch_tui( + getattr(args, "resume", None), + tui_dev=getattr(args, "tui_dev", False), + ) + # Import and run the CLI from cli import main as cli_main @@ -1069,6 +1148,7 @@ def select_provider_and_model(args=None): "kilocode": "Kilo Code", "alibaba": "Alibaba Cloud (DashScope)", "huggingface": "Hugging Face", + "xiaomi": "Xiaomi MiMo", "custom": "Custom endpoint", } active_label = provider_labels.get(active, active) if active else "none" @@ -1101,6 +1181,7 @@ def select_provider_and_model(args=None): ("opencode-go", "OpenCode Go (open models, $10/month subscription)"), ("ai-gateway", "AI Gateway (Vercel — 200+ models, pay-per-use)"), ("alibaba", "Alibaba Cloud / DashScope Coding (Qwen + multi-provider)"), + ("xiaomi", "Xiaomi MiMo (MiMo-V2 models — pro, omni, flash)"), ] def _named_custom_provider_map(cfg) -> dict[str, dict[str, str]]: @@ -1212,7 +1293,7 @@ def select_provider_and_model(args=None): _model_flow_anthropic(config, current_model) elif selected_provider == "kimi-coding": _model_flow_kimi(config, current_model) - elif selected_provider in ("gemini", "zai", "minimax", "minimax-cn", "kilocode", "opencode-zen", "opencode-go", "ai-gateway", "alibaba", "huggingface"): + elif selected_provider in ("gemini", "zai", "minimax", "minimax-cn", "kilocode", "opencode-zen", "opencode-go", "ai-gateway", "alibaba", "huggingface", "xiaomi"): _model_flow_api_key_provider(config, selected_provider, current_model) # ── Post-switch cleanup: clear stale OPENAI_BASE_URL ────────────── @@ -2682,13 +2763,8 @@ def _model_flow_anthropic(config, current_model=""): from hermes_cli.models import _PROVIDER_MODELS # Check ALL credential sources - existing_key = ( - get_env_value("ANTHROPIC_TOKEN") - or os.getenv("ANTHROPIC_TOKEN", "") - or get_env_value("ANTHROPIC_API_KEY") - or os.getenv("ANTHROPIC_API_KEY", "") - or os.getenv("CLAUDE_CODE_OAUTH_TOKEN", "") - ) + from hermes_cli.auth import get_anthropic_key + existing_key = get_anthropic_key() cc_available = False try: from agent.anthropic_adapter import read_claude_code_credentials, is_claude_code_token_valid @@ -3062,6 +3138,8 @@ def _update_via_zip(args): ) _install_python_dependencies_with_optional_fallback(pip_cmd) + _update_node_dependencies() + # Sync skills try: from tools.skills_sync import sync_skills @@ -3581,9 +3659,42 @@ def _install_python_dependencies_with_optional_fallback( print(f" ⚠ Skipped optional extras that still failed: {', '.join(failed_extras)}") +def _update_node_dependencies() -> None: + npm = shutil.which("npm") + if not npm: + return + + paths = ( + ("repo root", PROJECT_ROOT), + ("ui-tui", PROJECT_ROOT / "ui-tui"), + ) + if not any((path / "package.json").exists() for _, path in paths): + return + + print("→ Updating Node.js dependencies...") + for label, path in paths: + if not (path / "package.json").exists(): + continue + + result = subprocess.run( + [npm, "install", "--silent", "--no-fund", "--no-audit", "--progress=false"], + cwd=path, + capture_output=True, + text=True, + check=False, + ) + if result.returncode == 0: + print(f" ✓ {label}") + continue + + print(f" ⚠ npm install failed in {label}") + stderr = (result.stderr or "").strip() + if stderr: + print(f" {stderr.splitlines()[-1]}") + + def cmd_update(args): """Update Hermes Agent to the latest version.""" - import shutil from hermes_cli.config import is_managed, managed_error if is_managed(): @@ -3802,13 +3913,8 @@ def cmd_update(args): ) _install_python_dependencies_with_optional_fallback(pip_cmd) - # Check for Node.js deps - if (PROJECT_ROOT / "package.json").exists(): - import shutil - if shutil.which("npm"): - print("→ Updating Node.js dependencies...") - subprocess.run(["npm", "install", "--silent"], cwd=PROJECT_ROOT, check=False) - + _update_node_dependencies() + print() print("✓ Code updated!") @@ -4014,7 +4120,7 @@ def cmd_update(args): # Exclude PIDs that belong to just-restarted services so we don't # immediately kill the process that systemd/launchd just spawned. service_pids = _get_service_pids() - manual_pids = find_gateway_pids(exclude_pids=service_pids) + manual_pids = find_gateway_pids(exclude_pids=service_pids, all_profiles=True) for pid in manual_pids: try: os.kill(pid, _signal.SIGTERM) @@ -4463,7 +4569,14 @@ For more help on a command: default=False, help="Launch the Ink-based terminal UI instead of the classic REPL" ) - + parser.add_argument( + "--dev", + dest="tui_dev", + action="store_true", + default=False, + help="With --tui: run TypeScript sources via tsx (skip dist build)", + ) + subparsers = parser.add_subparsers(dest="command", help="Command to run") # ========================================================================= @@ -4498,7 +4611,7 @@ For more help on a command: ) chat_parser.add_argument( "--provider", - choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode"], + choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "xiaomi"], default=None, help="Inference provider (default: auto)" ) @@ -4569,6 +4682,13 @@ For more help on a command: default=False, help="Launch the Ink-based terminal UI instead of the classic REPL" ) + chat_parser.add_argument( + "--dev", + dest="tui_dev", + action="store_true", + default=False, + help="With --tui: run TypeScript sources via tsx (skip dist build)", + ) chat_parser.set_defaults(func=cmd_chat) # ========================================================================= @@ -5558,7 +5678,8 @@ For more help on a command: claw_migrate = claw_subparsers.add_parser( "migrate", help="Migrate from OpenClaw to Hermes", - description="Import settings, memories, skills, and API keys from an OpenClaw installation" + description="Import settings, memories, skills, and API keys from an OpenClaw installation. " + "Always shows a preview before making changes." ) claw_migrate.add_argument( "--source", @@ -5567,7 +5688,7 @@ For more help on a command: claw_migrate.add_argument( "--dry-run", action="store_true", - help="Preview what would be migrated without making changes" + help="Preview only — stop after showing what would be migrated" ) claw_migrate.add_argument( "--preset", diff --git a/hermes_cli/mcp_config.py b/hermes_cli/mcp_config.py index 9154ed50a3..cf2dde0892 100644 --- a/hermes_cli/mcp_config.py +++ b/hermes_cli/mcp_config.py @@ -57,19 +57,8 @@ def _confirm(question: str, default: bool = True) -> bool: def _prompt(question: str, *, password: bool = False, default: str = "") -> str: - display = f" {question}" - if default: - display += f" [{default}]" - display += ": " - try: - if password: - value = getpass.getpass(color(display, Colors.YELLOW)) - else: - value = input(color(display, Colors.YELLOW)) - return value.strip() or default - except (KeyboardInterrupt, EOFError): - print() - return default + from hermes_cli.cli_output import prompt as _shared_prompt + return _shared_prompt(question, default=default, password=password) # ─── Config Helpers ─────────────────────────────────────────────────────────── diff --git a/hermes_cli/memory_setup.py b/hermes_cli/memory_setup.py index 2843f4f444..1aa4313676 100644 --- a/hermes_cli/memory_setup.py +++ b/hermes_cli/memory_setup.py @@ -25,85 +25,13 @@ def _curses_select(title: str, items: list[tuple[str, str]], default: int = 0) - items: list of (label, description) tuples. Returns selected index, or default on escape/quit. """ - try: - import curses - result = [default] - - def _menu(stdscr): - curses.curs_set(0) - if curses.has_colors(): - curses.start_color() - curses.use_default_colors() - curses.init_pair(1, curses.COLOR_GREEN, -1) - curses.init_pair(2, curses.COLOR_YELLOW, -1) - curses.init_pair(3, curses.COLOR_CYAN, -1) - cursor = default - - while True: - stdscr.clear() - max_y, max_x = stdscr.getmaxyx() - - # Title - try: - stdscr.addnstr(0, 0, title, max_x - 1, - curses.A_BOLD | (curses.color_pair(2) if curses.has_colors() else 0)) - stdscr.addnstr(1, 0, " ↑↓ navigate ⏎ select q quit", max_x - 1, - curses.color_pair(3) if curses.has_colors() else curses.A_DIM) - except curses.error: - pass - - for i, (label, desc) in enumerate(items): - y = i + 3 - if y >= max_y - 1: - break - arrow = "→" if i == cursor else " " - line = f" {arrow} {label}" - if desc: - line += f" {desc}" - - attr = curses.A_NORMAL - if i == cursor: - attr = curses.A_BOLD - if curses.has_colors(): - attr |= curses.color_pair(1) - try: - stdscr.addnstr(y, 0, line[:max_x - 1], max_x - 1, attr) - except curses.error: - pass - - stdscr.refresh() - key = stdscr.getch() - - if key in (curses.KEY_UP, ord('k')): - cursor = (cursor - 1) % len(items) - elif key in (curses.KEY_DOWN, ord('j')): - cursor = (cursor + 1) % len(items) - elif key in (curses.KEY_ENTER, 10, 13): - result[0] = cursor - return - elif key in (27, ord('q')): - return - - curses.wrapper(_menu) - return result[0] - - except Exception: - # Fallback: numbered input - print(f"\n {title}\n") - for i, (label, desc) in enumerate(items): - marker = "→" if i == default else " " - d = f" {desc}" if desc else "" - print(f" {marker} {i + 1}. {label}{d}") - while True: - try: - val = input(f"\n Select [1-{len(items)}] ({default + 1}): ") - if not val: - return default - idx = int(val) - 1 - if 0 <= idx < len(items): - return idx - except (ValueError, EOFError): - return default + from hermes_cli.curses_ui import curses_radiolist + # Format (label, desc) tuples into display strings + display_items = [ + f"{label} {desc}" if desc else label + for label, desc in items + ] + return curses_radiolist(title, display_items, selected=default, cancel_returns=default) def _prompt(label: str, default: str | None = None, secret: bool = False) -> str: diff --git a/hermes_cli/model_normalize.py b/hermes_cli/model_normalize.py index 780c638f50..8c0c30fbfa 100644 --- a/hermes_cli/model_normalize.py +++ b/hermes_cli/model_normalize.py @@ -92,6 +92,7 @@ _MATCHING_PREFIX_STRIP_PROVIDERS: frozenset[str] = frozenset({ "minimax-cn", "alibaba", "qwen-oauth", + "xiaomi", "custom", }) diff --git a/hermes_cli/models.py b/hermes_cli/models.py index 5da9824f30..17c1072dbe 100644 --- a/hermes_cli/models.py +++ b/hermes_cli/models.py @@ -188,6 +188,11 @@ _PROVIDER_MODELS: dict[str, list[str]] = { "deepseek-chat", "deepseek-reasoner", ], + "xiaomi": [ + "mimo-v2-pro", + "mimo-v2-omni", + "mimo-v2-flash", + ], "opencode-zen": [ "gpt-5.4-pro", "gpt-5.4", @@ -493,6 +498,7 @@ _PROVIDER_LABELS = { "alibaba": "Alibaba Cloud (DashScope)", "qwen-oauth": "Qwen OAuth (Portal)", "huggingface": "Hugging Face", + "xiaomi": "Xiaomi MiMo", "custom": "Custom endpoint", } @@ -535,6 +541,8 @@ _PROVIDER_ALIASES = { "hf": "huggingface", "hugging-face": "huggingface", "huggingface-hub": "huggingface", + "mimo": "xiaomi", + "xiaomi-mimo": "xiaomi", } @@ -819,7 +827,7 @@ def list_available_providers() -> list[dict[str, str]]: "openrouter", "nous", "openai-codex", "copilot", "copilot-acp", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic", "alibaba", - "qwen-oauth", + "qwen-oauth", "xiaomi", "opencode-zen", "opencode-go", "ai-gateway", "deepseek", "custom", ] diff --git a/hermes_cli/platforms.py b/hermes_cli/platforms.py new file mode 100644 index 0000000000..18307912b1 --- /dev/null +++ b/hermes_cli/platforms.py @@ -0,0 +1,45 @@ +""" +Shared platform registry for Hermes Agent. + +Single source of truth for platform metadata consumed by both +skills_config (label display) and tools_config (default toolset +resolution). Import ``PLATFORMS`` from here instead of maintaining +duplicate dicts in each module. +""" + +from collections import OrderedDict +from typing import NamedTuple + + +class PlatformInfo(NamedTuple): + """Metadata for a single platform entry.""" + label: str + default_toolset: str + + +# Ordered so that TUI menus are deterministic. +PLATFORMS: OrderedDict[str, PlatformInfo] = OrderedDict([ + ("cli", PlatformInfo(label="🖥️ CLI", default_toolset="hermes-cli")), + ("telegram", PlatformInfo(label="📱 Telegram", default_toolset="hermes-telegram")), + ("discord", PlatformInfo(label="💬 Discord", default_toolset="hermes-discord")), + ("slack", PlatformInfo(label="💼 Slack", default_toolset="hermes-slack")), + ("whatsapp", PlatformInfo(label="📱 WhatsApp", default_toolset="hermes-whatsapp")), + ("signal", PlatformInfo(label="📡 Signal", default_toolset="hermes-signal")), + ("bluebubbles", PlatformInfo(label="💙 BlueBubbles", default_toolset="hermes-bluebubbles")), + ("email", PlatformInfo(label="📧 Email", default_toolset="hermes-email")), + ("homeassistant", PlatformInfo(label="🏠 Home Assistant", default_toolset="hermes-homeassistant")), + ("mattermost", PlatformInfo(label="💬 Mattermost", default_toolset="hermes-mattermost")), + ("matrix", PlatformInfo(label="💬 Matrix", default_toolset="hermes-matrix")), + ("dingtalk", PlatformInfo(label="💬 DingTalk", default_toolset="hermes-dingtalk")), + ("feishu", PlatformInfo(label="🪽 Feishu", default_toolset="hermes-feishu")), + ("wecom", PlatformInfo(label="💬 WeCom", default_toolset="hermes-wecom")), + ("weixin", PlatformInfo(label="💬 Weixin", default_toolset="hermes-weixin")), + ("webhook", PlatformInfo(label="🔗 Webhook", default_toolset="hermes-webhook")), + ("api_server", PlatformInfo(label="🌐 API Server", default_toolset="hermes-api-server")), +]) + + +def platform_label(key: str, default: str = "") -> str: + """Return the display label for a platform key, or *default*.""" + info = PLATFORMS.get(key) + return info.label if info is not None else default diff --git a/hermes_cli/providers.py b/hermes_cli/providers.py index 78be527db7..a997634983 100644 --- a/hermes_cli/providers.py +++ b/hermes_cli/providers.py @@ -132,6 +132,10 @@ HERMES_OVERLAYS: Dict[str, HermesOverlay] = { base_url_override="https://api.x.ai/v1", base_url_env_var="XAI_BASE_URL", ), + "xiaomi": HermesOverlay( + transport="openai_chat", + base_url_env_var="XIAOMI_BASE_URL", + ), } @@ -222,6 +226,10 @@ ALIASES: Dict[str, str] = { "hugging-face": "huggingface", "huggingface-hub": "huggingface", + # xiaomi + "mimo": "xiaomi", + "xiaomi-mimo": "xiaomi", + # Local server aliases → virtual "local" concept (resolved via user config) "lmstudio": "lmstudio", "lm-studio": "lmstudio", @@ -242,6 +250,7 @@ _LABEL_OVERRIDES: Dict[str, str] = { "nous": "Nous Portal", "openai-codex": "OpenAI Codex", "copilot-acp": "GitHub Copilot ACP", + "xiaomi": "Xiaomi MiMo", "local": "Local endpoint", } diff --git a/hermes_cli/runtime_provider.py b/hermes_cli/runtime_provider.py index 3d1333c26f..cd0b667225 100644 --- a/hermes_cli/runtime_provider.py +++ b/hermes_cli/runtime_provider.py @@ -304,6 +304,9 @@ def _get_named_custom_provider(requested_provider: str) -> Optional[Dict[str, An api_mode = _parse_api_mode(entry.get("api_mode")) if api_mode: result["api_mode"] = api_mode + model_name = str(entry.get("model", "") or "").strip() + if model_name: + result["model"] = model_name return result return None @@ -329,6 +332,11 @@ def _resolve_named_custom_runtime( # Check if a credential pool exists for this custom endpoint pool_result = _try_resolve_from_custom_pool(base_url, "custom", custom_provider.get("api_mode")) if pool_result: + # Propagate the model name even when using pooled credentials — + # the pool doesn't know about the custom_providers model field. + model_name = custom_provider.get("model") + if model_name: + pool_result["model"] = model_name return pool_result api_key_candidates = [ @@ -339,7 +347,7 @@ def _resolve_named_custom_runtime( ] api_key = next((candidate for candidate in api_key_candidates if has_usable_secret(candidate)), "") - return { + result = { "provider": "custom", "api_mode": custom_provider.get("api_mode") or _detect_api_mode_for_url(base_url) @@ -348,6 +356,11 @@ def _resolve_named_custom_runtime( "api_key": api_key or "no-key-required", "source": f"custom_provider:{custom_provider.get('name', requested_provider)}", } + # Propagate the model name so callers can override self.model when the + # provider name differs from the actual model string the API expects. + if custom_provider.get("model"): + result["model"] = custom_provider["model"] + return result def _resolve_openrouter_runtime( diff --git a/hermes_cli/setup.py b/hermes_cli/setup.py index ca877606fd..a25ce84914 100644 --- a/hermes_cli/setup.py +++ b/hermes_cli/setup.py @@ -197,24 +197,12 @@ def print_header(title: str): print(color(f"◆ {title}", Colors.CYAN, Colors.BOLD)) -def print_info(text: str): - """Print info text.""" - print(color(f" {text}", Colors.DIM)) - - -def print_success(text: str): - """Print success message.""" - print(color(f"✓ {text}", Colors.GREEN)) - - -def print_warning(text: str): - """Print warning message.""" - print(color(f"⚠ {text}", Colors.YELLOW)) - - -def print_error(text: str): - """Print error message.""" - print(color(f"✗ {text}", Colors.RED)) +from hermes_cli.cli_output import ( # noqa: E402 + print_error, + print_info, + print_success, + print_warning, +) def is_interactive_stdin() -> bool: @@ -269,80 +257,9 @@ def prompt(question: str, default: str = None, password: bool = False) -> str: def _curses_prompt_choice(question: str, choices: list, default: int = 0) -> int: - """Single-select menu using curses to avoid simple_term_menu rendering bugs.""" - try: - import curses - result_holder = [default] - - def _curses_menu(stdscr): - curses.curs_set(0) - if curses.has_colors(): - curses.start_color() - curses.use_default_colors() - curses.init_pair(1, curses.COLOR_GREEN, -1) - curses.init_pair(2, curses.COLOR_YELLOW, -1) - cursor = default - scroll_offset = 0 - - while True: - stdscr.clear() - max_y, max_x = stdscr.getmaxyx() - - # Rows available for list items: rows 2..(max_y-2) inclusive. - visible = max(1, max_y - 3) - - # Scroll the viewport so the cursor is always visible. - if cursor < scroll_offset: - scroll_offset = cursor - elif cursor >= scroll_offset + visible: - scroll_offset = cursor - visible + 1 - scroll_offset = max(0, min(scroll_offset, max(0, len(choices) - visible))) - - try: - stdscr.addnstr( - 0, - 0, - question, - max_x - 1, - curses.A_BOLD | (curses.color_pair(2) if curses.has_colors() else 0), - ) - except curses.error: - pass - - for row, i in enumerate(range(scroll_offset, min(scroll_offset + visible, len(choices)))): - y = row + 2 - if y >= max_y - 1: - break - arrow = "→" if i == cursor else " " - line = f" {arrow} {choices[i]}" - attr = curses.A_NORMAL - if i == cursor: - attr = curses.A_BOLD - if curses.has_colors(): - attr |= curses.color_pair(1) - try: - stdscr.addnstr(y, 0, line, max_x - 1, attr) - except curses.error: - pass - - stdscr.refresh() - key = stdscr.getch() - if key in (curses.KEY_UP, ord("k")): - cursor = (cursor - 1) % len(choices) - elif key in (curses.KEY_DOWN, ord("j")): - cursor = (cursor + 1) % len(choices) - elif key in (curses.KEY_ENTER, 10, 13): - result_holder[0] = cursor - return - elif key in (27, ord("q")): - return - - curses.wrapper(_curses_menu) - from hermes_cli.curses_ui import flush_stdin - flush_stdin() - return result_holder[0] - except Exception: - return -1 + """Single-select menu using curses. Delegates to curses_radiolist.""" + from hermes_cli.curses_ui import curses_radiolist + return curses_radiolist(question, choices, selected=default, cancel_returns=-1) @@ -2052,6 +1969,42 @@ def _setup_weixin(): _gateway_setup_weixin() +def _setup_signal(): + """Configure Signal via gateway setup.""" + from hermes_cli.gateway import _setup_signal as _gateway_setup_signal + _gateway_setup_signal() + + +def _setup_email(): + """Configure Email via gateway setup.""" + from hermes_cli.gateway import _setup_email as _gateway_setup_email + _gateway_setup_email() + + +def _setup_sms(): + """Configure SMS (Twilio) via gateway setup.""" + from hermes_cli.gateway import _setup_sms as _gateway_setup_sms + _gateway_setup_sms() + + +def _setup_dingtalk(): + """Configure DingTalk via gateway setup.""" + from hermes_cli.gateway import _setup_dingtalk as _gateway_setup_dingtalk + _gateway_setup_dingtalk() + + +def _setup_feishu(): + """Configure Feishu / Lark via gateway setup.""" + from hermes_cli.gateway import _setup_feishu as _gateway_setup_feishu + _gateway_setup_feishu() + + +def _setup_wecom(): + """Configure WeCom (Enterprise WeChat) via gateway setup.""" + from hermes_cli.gateway import _setup_wecom as _gateway_setup_wecom + _gateway_setup_wecom() + + def _setup_bluebubbles(): """Configure BlueBubbles iMessage gateway.""" print_header("BlueBubbles (iMessage)") @@ -2168,9 +2121,15 @@ _GATEWAY_PLATFORMS = [ ("Telegram", "TELEGRAM_BOT_TOKEN", _setup_telegram), ("Discord", "DISCORD_BOT_TOKEN", _setup_discord), ("Slack", "SLACK_BOT_TOKEN", _setup_slack), + ("Signal", "SIGNAL_HTTP_URL", _setup_signal), + ("Email", "EMAIL_ADDRESS", _setup_email), + ("SMS (Twilio)", "TWILIO_ACCOUNT_SID", _setup_sms), ("Matrix", "MATRIX_ACCESS_TOKEN", _setup_matrix), ("Mattermost", "MATTERMOST_TOKEN", _setup_mattermost), ("WhatsApp", "WHATSAPP_ENABLED", _setup_whatsapp), + ("DingTalk", "DINGTALK_CLIENT_ID", _setup_dingtalk), + ("Feishu / Lark", "FEISHU_APP_ID", _setup_feishu), + ("WeCom (Enterprise WeChat)", "WECOM_BOT_ID", _setup_wecom), ("Weixin (WeChat)", "WEIXIN_ACCOUNT_ID", _setup_weixin), ("BlueBubbles (iMessage)", "BLUEBUBBLES_SERVER_URL", _setup_bluebubbles), ("Webhooks (GitHub, GitLab, etc.)", "WEBHOOK_ENABLED", _setup_webhooks), @@ -2212,10 +2171,17 @@ def setup_gateway(config: dict): get_env_value("TELEGRAM_BOT_TOKEN") or get_env_value("DISCORD_BOT_TOKEN") or get_env_value("SLACK_BOT_TOKEN") + or get_env_value("SIGNAL_HTTP_URL") + or get_env_value("EMAIL_ADDRESS") + or get_env_value("TWILIO_ACCOUNT_SID") or get_env_value("MATTERMOST_TOKEN") or get_env_value("MATRIX_ACCESS_TOKEN") or get_env_value("MATRIX_PASSWORD") or get_env_value("WHATSAPP_ENABLED") + or get_env_value("DINGTALK_CLIENT_ID") + or get_env_value("FEISHU_APP_ID") + or get_env_value("WECOM_BOT_ID") + or get_env_value("WEIXIN_ACCOUNT_ID") or get_env_value("BLUEBUBBLES_SERVER_URL") or get_env_value("WEBHOOK_ENABLED") ) @@ -2404,12 +2370,30 @@ def _get_section_config_summary(config: dict, section_key: str) -> Optional[str] platforms.append("Discord") if get_env_value("SLACK_BOT_TOKEN"): platforms.append("Slack") - if get_env_value("WHATSAPP_PHONE_NUMBER_ID"): - platforms.append("WhatsApp") if get_env_value("SIGNAL_ACCOUNT"): platforms.append("Signal") + if get_env_value("EMAIL_ADDRESS"): + platforms.append("Email") + if get_env_value("TWILIO_ACCOUNT_SID"): + platforms.append("SMS") + if get_env_value("MATRIX_ACCESS_TOKEN") or get_env_value("MATRIX_PASSWORD"): + platforms.append("Matrix") + if get_env_value("MATTERMOST_TOKEN"): + platforms.append("Mattermost") + if get_env_value("WHATSAPP_PHONE_NUMBER_ID"): + platforms.append("WhatsApp") + if get_env_value("DINGTALK_CLIENT_ID"): + platforms.append("DingTalk") + if get_env_value("FEISHU_APP_ID"): + platforms.append("Feishu") + if get_env_value("WECOM_BOT_ID"): + platforms.append("WeCom") + if get_env_value("WEIXIN_ACCOUNT_ID"): + platforms.append("Weixin") if get_env_value("BLUEBUBBLES_SERVER_URL"): platforms.append("BlueBubbles") + if get_env_value("WEBHOOK_ENABLED"): + platforms.append("Webhooks") if platforms: return ", ".join(platforms) return None # No platforms configured — section must run diff --git a/hermes_cli/skills_config.py b/hermes_cli/skills_config.py index b017361fee..92424a0ca3 100644 --- a/hermes_cli/skills_config.py +++ b/hermes_cli/skills_config.py @@ -15,25 +15,12 @@ from typing import List, Optional, Set from hermes_cli.config import load_config, save_config from hermes_cli.colors import Colors, color +from hermes_cli.platforms import PLATFORMS as _PLATFORMS, platform_label -PLATFORMS = { - "cli": "🖥️ CLI", - "telegram": "📱 Telegram", - "discord": "💬 Discord", - "slack": "💼 Slack", - "whatsapp": "📱 WhatsApp", - "signal": "📡 Signal", - "bluebubbles": "💬 BlueBubbles", - "email": "📧 Email", - "homeassistant": "🏠 Home Assistant", - "mattermost": "💬 Mattermost", - "matrix": "💬 Matrix", - "dingtalk": "💬 DingTalk", - "feishu": "🪽 Feishu", - "wecom": "💬 WeCom", - "weixin": "💬 Weixin", - "webhook": "🔗 Webhook", -} +# Backward-compatible view: {key: label_string} so existing code that +# iterates ``PLATFORMS.items()`` or calls ``PLATFORMS.get(key)`` keeps +# working without changes to every call site. +PLATFORMS = {k: info.label for k, info in _PLATFORMS.items() if k != "api_server"} # ─── Config Helpers ─────────────────────────────────────────────────────────── diff --git a/hermes_cli/status.py b/hermes_cli/status.py index baba4f359d..7a7a9c645d 100644 --- a/hermes_cli/status.py +++ b/hermes_cli/status.py @@ -141,11 +141,8 @@ def show_status(args): display = redact_key(value) if not show_all else value print(f" {name:<12} {check_mark(has_key)} {display}") - anthropic_value = ( - get_env_value("ANTHROPIC_TOKEN") - or get_env_value("ANTHROPIC_API_KEY") - or "" - ) + from hermes_cli.auth import get_anthropic_key + anthropic_value = get_anthropic_key() anthropic_display = redact_key(anthropic_value) if not show_all else anthropic_value print(f" {'Anthropic':<12} {check_mark(bool(anthropic_value))} {anthropic_display}") diff --git a/hermes_cli/tools_config.py b/hermes_cli/tools_config.py index 91c41dce5d..343007cabc 100644 --- a/hermes_cli/tools_config.py +++ b/hermes_cli/tools_config.py @@ -33,33 +33,13 @@ PROJECT_ROOT = Path(__file__).parent.parent.resolve() # ─── UI Helpers (shared with setup.py) ──────────────────────────────────────── -def _print_info(text: str): - print(color(f" {text}", Colors.DIM)) - -def _print_success(text: str): - print(color(f"✓ {text}", Colors.GREEN)) - -def _print_warning(text: str): - print(color(f"⚠ {text}", Colors.YELLOW)) - -def _print_error(text: str): - print(color(f"✗ {text}", Colors.RED)) - -def _prompt(question: str, default: str = None, password: bool = False) -> str: - if default: - display = f"{question} [{default}]: " - else: - display = f"{question}: " - try: - if password: - import getpass - value = getpass.getpass(color(display, Colors.YELLOW)) - else: - value = input(color(display, Colors.YELLOW)) - return value.strip() or default or "" - except (KeyboardInterrupt, EOFError): - print() - return default or "" +from hermes_cli.cli_output import ( # noqa: E402 — late import block + print_error as _print_error, + print_info as _print_info, + print_success as _print_success, + print_warning as _print_warning, + prompt as _prompt, +) # ─── Toolset Registry ───────────────────────────────────────────────────────── @@ -118,25 +98,14 @@ def _get_plugin_toolset_keys() -> set: except Exception: return set() -# Platform display config +# Platform display config — derived from the canonical registry so every +# module shares the same data. Kept as dict-of-dicts for backward +# compatibility with existing ``PLATFORMS[key]["label"]`` access patterns. +from hermes_cli.platforms import PLATFORMS as _PLATFORMS_REGISTRY + PLATFORMS = { - "cli": {"label": "🖥️ CLI", "default_toolset": "hermes-cli"}, - "telegram": {"label": "📱 Telegram", "default_toolset": "hermes-telegram"}, - "discord": {"label": "💬 Discord", "default_toolset": "hermes-discord"}, - "slack": {"label": "💼 Slack", "default_toolset": "hermes-slack"}, - "whatsapp": {"label": "📱 WhatsApp", "default_toolset": "hermes-whatsapp"}, - "signal": {"label": "📡 Signal", "default_toolset": "hermes-signal"}, - "bluebubbles": {"label": "💙 BlueBubbles", "default_toolset": "hermes-bluebubbles"}, - "homeassistant": {"label": "🏠 Home Assistant", "default_toolset": "hermes-homeassistant"}, - "email": {"label": "📧 Email", "default_toolset": "hermes-email"}, - "matrix": {"label": "💬 Matrix", "default_toolset": "hermes-matrix"}, - "dingtalk": {"label": "💬 DingTalk", "default_toolset": "hermes-dingtalk"}, - "feishu": {"label": "🪽 Feishu", "default_toolset": "hermes-feishu"}, - "wecom": {"label": "💬 WeCom", "default_toolset": "hermes-wecom"}, - "weixin": {"label": "💬 Weixin", "default_toolset": "hermes-weixin"}, - "api_server": {"label": "🌐 API Server", "default_toolset": "hermes-api-server"}, - "mattermost": {"label": "💬 Mattermost", "default_toolset": "hermes-mattermost"}, - "webhook": {"label": "🔗 Webhook", "default_toolset": "hermes-webhook"}, + k: {"label": info.label, "default_toolset": info.default_toolset} + for k, info in _PLATFORMS_REGISTRY.items() } @@ -677,86 +646,9 @@ def _toolset_has_keys(ts_key: str, config: dict = None) -> bool: # ─── Menu Helpers ───────────────────────────────────────────────────────────── def _prompt_choice(question: str, choices: list, default: int = 0) -> int: - """Single-select menu (arrow keys). Uses curses to avoid simple_term_menu - rendering bugs in tmux, iTerm, and other non-standard terminals.""" - - # Curses-based single-select — works in tmux, iTerm, and standard terminals - try: - import curses - result_holder = [default] - - def _curses_menu(stdscr): - curses.curs_set(0) - if curses.has_colors(): - curses.start_color() - curses.use_default_colors() - curses.init_pair(1, curses.COLOR_GREEN, -1) - curses.init_pair(2, curses.COLOR_YELLOW, -1) - cursor = default - - while True: - stdscr.clear() - max_y, max_x = stdscr.getmaxyx() - try: - stdscr.addnstr(0, 0, question, max_x - 1, - curses.A_BOLD | (curses.color_pair(2) if curses.has_colors() else 0)) - except curses.error: - pass - - for i, c in enumerate(choices): - y = i + 2 - if y >= max_y - 1: - break - arrow = "→" if i == cursor else " " - line = f" {arrow} {c}" - attr = curses.A_NORMAL - if i == cursor: - attr = curses.A_BOLD - if curses.has_colors(): - attr |= curses.color_pair(1) - try: - stdscr.addnstr(y, 0, line, max_x - 1, attr) - except curses.error: - pass - - stdscr.refresh() - key = stdscr.getch() - - if key in (curses.KEY_UP, ord('k')): - cursor = (cursor - 1) % len(choices) - elif key in (curses.KEY_DOWN, ord('j')): - cursor = (cursor + 1) % len(choices) - elif key in (curses.KEY_ENTER, 10, 13): - result_holder[0] = cursor - return - elif key in (27, ord('q')): - return - - curses.wrapper(_curses_menu) - from hermes_cli.curses_ui import flush_stdin - flush_stdin() - return result_holder[0] - - except Exception: - pass - - # Fallback: numbered input (Windows without curses, etc.) - print(color(question, Colors.YELLOW)) - for i, c in enumerate(choices): - marker = "●" if i == default else "○" - style = Colors.GREEN if i == default else "" - print(color(f" {marker} {i+1}. {c}", style) if style else f" {marker} {i+1}. {c}") - while True: - try: - val = input(color(f" Select [1-{len(choices)}] ({default + 1}): ", Colors.DIM)) - if not val: - return default - idx = int(val) - 1 - if 0 <= idx < len(choices): - return idx - except (ValueError, KeyboardInterrupt, EOFError): - print() - return default + """Single-select menu (arrow keys). Delegates to curses_radiolist.""" + from hermes_cli.curses_ui import curses_radiolist + return curses_radiolist(question, choices, selected=default, cancel_returns=default) # ─── Token Estimation ──────────────────────────────────────────────────────── diff --git a/hermes_constants.py b/hermes_constants.py index 7d149f404e..85955d5482 100644 --- a/hermes_constants.py +++ b/hermes_constants.py @@ -189,6 +189,33 @@ def is_wsl() -> bool: return _wsl_detected +# ─── Well-Known Paths ───────────────────────────────────────────────────────── + + +def get_config_path() -> Path: + """Return the path to ``config.yaml`` under HERMES_HOME. + + Replaces the ``get_hermes_home() / "config.yaml"`` pattern repeated + in 7+ files (skill_utils.py, hermes_logging.py, hermes_time.py, etc.). + """ + return get_hermes_home() / "config.yaml" + + +def get_skills_dir() -> Path: + """Return the path to the skills directory under HERMES_HOME.""" + return get_hermes_home() / "skills" + + +def get_logs_dir() -> Path: + """Return the path to the logs directory under HERMES_HOME.""" + return get_hermes_home() / "logs" + + +def get_env_path() -> Path: + """Return the path to the ``.env`` file under HERMES_HOME.""" + return get_hermes_home() / ".env" + + OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1" OPENROUTER_MODELS_URL = f"{OPENROUTER_BASE_URL}/models" diff --git a/hermes_logging.py b/hermes_logging.py index 5d71590c3f..b765e94640 100644 --- a/hermes_logging.py +++ b/hermes_logging.py @@ -18,7 +18,7 @@ from logging.handlers import RotatingFileHandler from pathlib import Path from typing import Optional -from hermes_constants import get_hermes_home +from hermes_constants import get_config_path, get_hermes_home # Sentinel to track whether setup_logging() has already run. The function # is idempotent — calling it twice is safe but the second call is a no-op @@ -246,7 +246,7 @@ def _read_logging_config(): """ try: import yaml - config_path = get_hermes_home() / "config.yaml" + config_path = get_config_path() if config_path.exists(): with open(config_path, "r", encoding="utf-8") as f: cfg = yaml.safe_load(f) or {} diff --git a/hermes_time.py b/hermes_time.py index f7d085544b..9f172d28ff 100644 --- a/hermes_time.py +++ b/hermes_time.py @@ -16,7 +16,7 @@ crashes due to a bad timezone string. import logging import os from datetime import datetime -from hermes_constants import get_hermes_home +from hermes_constants import get_config_path from typing import Optional logger = logging.getLogger(__name__) @@ -48,8 +48,7 @@ def _resolve_timezone_name() -> str: # 2. config.yaml ``timezone`` key try: import yaml - hermes_home = get_hermes_home() - config_path = hermes_home / "config.yaml" + config_path = get_config_path() if config_path.exists(): with open(config_path) as f: cfg = yaml.safe_load(f) or {} diff --git a/nix/tui.nix b/nix/tui.nix index a077dc2d43..93973019f5 100644 --- a/nix/tui.nix +++ b/nix/tui.nix @@ -4,7 +4,7 @@ let src = ../ui-tui; npmDeps = pkgs.fetchNpmDeps { inherit src; - hash = "sha256-QQixyLmsn5+Y1daHifzDaNQbaoZjm+ezGrGoLXcc95U="; + hash = "sha256-+EhRRuvXi5hJupseHblF+MGxs84ijRMIH4qt5+2yYi8="; }; packageJson = builtins.fromJSON (builtins.readFile (src + "/package.json")); @@ -28,6 +28,10 @@ pkgs.buildNpmPackage { # runtime node_modules cp -r node_modules $out/lib/hermes-tui/node_modules + # @hermes/ink is a file: dependency, we need to copy it in fr + rm -f $out/lib/hermes-tui/node_modules/@hermes/ink + cp -r packages/hermes-ink $out/lib/hermes-tui/node_modules/@hermes/ink + # package.json needed for "type": "module" resolution cp package.json $out/lib/hermes-tui/ @@ -36,7 +40,7 @@ pkgs.buildNpmPackage { nativeBuildInputs = [ (pkgs.writeShellScriptBin "update_tui_lockfile" '' - set -euo pipefail + set -euox pipefail # get root of repo REPO_ROOT=$(git rev-parse --show-toplevel) @@ -45,7 +49,7 @@ pkgs.buildNpmPackage { cd "$REPO_ROOT/ui-tui" rm -rf node_modules/ npm cache clean --force - npm install + CI=true npm install # ci env var to suppress annoying unicode install banner lag ${pkgs.lib.getExe npm-lockfile-fix} ./package-lock.json NIX_FILE="$REPO_ROOT/nix/tui.nix" @@ -65,7 +69,7 @@ pkgs.buildNpmPackage { STAMP_VALUE="${npmLockHash}" if [ ! -f "$STAMP" ] || [ "$(cat "$STAMP")" != "$STAMP_VALUE" ]; then echo "hermes-tui: installing npm dependencies..." - cd ui-tui && npm install --silent --no-fund --no-audit 2>/dev/null && cd .. + cd ui-tui && CI=true npm install --silent --no-fund --no-audit 2>/dev/null && cd .. mkdir -p .nix-stamps echo "$STAMP_VALUE" > "$STAMP" fi diff --git a/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py b/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py index 5e0f76db28..759b798a56 100644 --- a/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py +++ b/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py @@ -617,6 +617,19 @@ class Migrator: candidate = self.source_root / rel if candidate.exists(): return candidate + # OpenClaw renamed workspace/ to workspace-main/ (and workspace-{agentId} + # for multi-agent). Try the new path as a fallback. + if rel.startswith("workspace/"): + suffix = rel[len("workspace/"):] + for variant in ("workspace-main", "workspace-assistant"): + alt = self.source_root / variant / suffix + if alt.exists(): + return alt + elif rel.startswith("workspace.default/"): + suffix = rel[len("workspace.default/"):] + alt = self.source_root / "workspace-main" / suffix + if alt.exists(): + return alt return None def resolve_skill_destination(self, destination: Path) -> Path: @@ -1033,11 +1046,8 @@ class Migrator: def migrate_secret_settings(self, config: Dict[str, Any]) -> None: secret_additions: Dict[str, str] = {} - telegram_token = ( - config.get("channels", {}) - .get("telegram", {}) - .get("botToken") - ) + tg_cfg = config.get("channels", {}).get("telegram", {}) + telegram_token = self._get_channel_field(tg_cfg, "botToken") if isinstance(tg_cfg, dict) else None if isinstance(telegram_token, str) and telegram_token.strip(): secret_additions["TELEGRAM_BOT_TOKEN"] = telegram_token.strip() @@ -1057,15 +1067,28 @@ class Migrator: """Resolve a channel config value that may be a SecretRef.""" return resolve_secret_input(value, self.load_openclaw_env()) + @staticmethod + def _get_channel_field(ch_cfg: Dict[str, Any], field: str) -> Any: + """Get a field from channel config, checking both flat and accounts.default layout.""" + val = ch_cfg.get(field) + if val is not None: + return val + accounts = ch_cfg.get("accounts") + if isinstance(accounts, dict): + default = accounts.get("default") + if isinstance(default, dict): + return default.get(field) + return None + def migrate_discord_settings(self, config: Optional[Dict[str, Any]] = None) -> None: config = config or self.load_openclaw_config() additions: Dict[str, str] = {} discord = config.get("channels", {}).get("discord", {}) if isinstance(discord, dict): - token = discord.get("token") + token = self._get_channel_field(discord, "token") if isinstance(token, str) and token.strip(): additions["DISCORD_BOT_TOKEN"] = token.strip() - allow_from = discord.get("allowFrom", []) + allow_from = self._get_channel_field(discord, "allowFrom") or [] if isinstance(allow_from, list): users = [str(u).strip() for u in allow_from if str(u).strip()] if users: @@ -1080,13 +1103,13 @@ class Migrator: additions: Dict[str, str] = {} slack = config.get("channels", {}).get("slack", {}) if isinstance(slack, dict): - bot_token = slack.get("botToken") + bot_token = self._get_channel_field(slack, "botToken") if isinstance(bot_token, str) and bot_token.strip(): additions["SLACK_BOT_TOKEN"] = bot_token.strip() - app_token = slack.get("appToken") + app_token = self._get_channel_field(slack, "appToken") if isinstance(app_token, str) and app_token.strip(): additions["SLACK_APP_TOKEN"] = app_token.strip() - allow_from = slack.get("allowFrom", []) + allow_from = self._get_channel_field(slack, "allowFrom") or [] if isinstance(allow_from, list): users = [str(u).strip() for u in allow_from if str(u).strip()] if users: @@ -1101,7 +1124,7 @@ class Migrator: additions: Dict[str, str] = {} whatsapp = config.get("channels", {}).get("whatsapp", {}) if isinstance(whatsapp, dict): - allow_from = whatsapp.get("allowFrom", []) + allow_from = self._get_channel_field(whatsapp, "allowFrom") or [] if isinstance(allow_from, list): users = [str(u).strip() for u in allow_from if str(u).strip()] if users: @@ -1116,13 +1139,13 @@ class Migrator: additions: Dict[str, str] = {} signal = config.get("channels", {}).get("signal", {}) if isinstance(signal, dict): - account = signal.get("account") + account = self._get_channel_field(signal, "account") if isinstance(account, str) and account.strip(): additions["SIGNAL_ACCOUNT"] = account.strip() - http_url = signal.get("httpUrl") + http_url = self._get_channel_field(signal, "httpUrl") if isinstance(http_url, str) and http_url.strip(): additions["SIGNAL_HTTP_URL"] = http_url.strip() - allow_from = signal.get("allowFrom", []) + allow_from = self._get_channel_field(signal, "allowFrom") or [] if isinstance(allow_from, list): users = [str(u).strip() for u in allow_from if str(u).strip()] if users: @@ -1161,6 +1184,16 @@ class Migrator: raw_key = provider_cfg.get("apiKey") api_key = resolve_secret_input(raw_key, openclaw_env) if not api_key: + # Warn if a SecretRef with file/exec source was silently unresolvable + if isinstance(raw_key, dict) and raw_key.get("source") in ("file", "exec"): + self.record( + "provider-keys", + self.source_root / "openclaw.json", + None, + "skipped", + f"Provider '{provider_name}' uses a {raw_key['source']}-backed SecretRef " + f"that cannot be auto-migrated. Add this key manually via: hermes config set", + ) continue base_url = provider_cfg.get("baseUrl", "") @@ -1224,6 +1257,21 @@ class Migrator: if val and hermes_key not in secret_additions: secret_additions[hermes_key] = val + # Check the openclaw.json "env" sub-object — some OpenClaw setups + # store API keys here instead of in a separate .env file. + # Keys can be at env. or env.vars.. + json_env = config.get("env") + if isinstance(json_env, dict): + env_vars = json_env.get("vars") + sources = [json_env] + if isinstance(env_vars, dict): + sources.append(env_vars) + for src in sources: + for oc_key, hermes_key in env_key_mapping.items(): + val = src.get(oc_key) + if isinstance(val, str) and val.strip() and hermes_key not in secret_additions: + secret_additions[hermes_key] = val.strip() + # Check per-agent auth-profiles.json for additional credentials auth_profiles_path = self.source_root / "agents" / "main" / "agent" / "auth-profiles.json" if auth_profiles_path.exists(): @@ -1324,8 +1372,9 @@ class Migrator: tts_data: Dict[str, Any] = {} provider = tts.get("provider") - if isinstance(provider, str) and provider in ("elevenlabs", "openai", "edge"): - tts_data["provider"] = provider + if isinstance(provider, str) and provider in ("elevenlabs", "openai", "edge", "microsoft"): + # OpenClaw renamed "edge" to "microsoft"; Hermes still uses "edge" + tts_data["provider"] = "edge" if provider == "microsoft" else provider # TTS provider settings live under messages.tts.providers.{provider} # in OpenClaw (not messages.tts.elevenlabs directly) @@ -1374,9 +1423,9 @@ class Migrator: tts_data["openai"] = oai_settings edge_tts = ( - (providers.get("edge") or {}) - if isinstance(providers.get("edge"), dict) else - (tts.get("edge") or {}) + (providers.get("edge") or providers.get("microsoft") or {}) + if isinstance(providers.get("edge"), dict) or isinstance(providers.get("microsoft"), dict) else + (tts.get("edge") or tts.get("microsoft") or {}) ) if isinstance(edge_tts, dict): edge_voice = edge_tts.get("voice") @@ -1890,11 +1939,11 @@ class Migrator: if defaults.get("thinkingDefault"): # Map OpenClaw thinking -> Hermes reasoning_effort thinking = defaults["thinkingDefault"] - if thinking in ("always", "high"): + if thinking in ("always", "high", "xhigh"): agent_cfg["reasoning_effort"] = "high" - elif thinking in ("auto", "medium"): + elif thinking in ("auto", "medium", "adaptive"): agent_cfg["reasoning_effort"] = "medium" - elif thinking in ("off", "low", "none"): + elif thinking in ("off", "low", "none", "minimal"): agent_cfg["reasoning_effort"] = "low" changes = True @@ -2099,10 +2148,14 @@ class Migrator: f"Provider '{prov_name}' already exists") continue - api_type = prov_cfg.get("apiType") or prov_cfg.get("type") or "openai" + api_type = prov_cfg.get("apiType") or prov_cfg.get("api") or prov_cfg.get("type") or "openai" api_mode_map = { "openai": "chat_completions", + "openai-completions": "chat_completions", + "openai-responses": "chat_completions", "anthropic": "anthropic_messages", + "anthropic-messages": "anthropic_messages", + "google-generative-ai": "chat_completions", "cohere": "chat_completions", } entry = { @@ -2142,7 +2195,7 @@ class Migrator: # Extended channel token/allowlist mapping CHANNEL_ENV_MAP = { - "matrix": {"token": "MATRIX_ACCESS_TOKEN", "allowFrom": "MATRIX_ALLOWED_USERS", + "matrix": {"token": "MATRIX...OKEN", "tokenField": "accessToken", "allowFrom": "MATRIX_ALLOWED_USERS", "extras": {"homeserverUrl": "MATRIX_HOMESERVER_URL", "userId": "MATRIX_USER_ID"}}, "mattermost": {"token": "MATTERMOST_BOT_TOKEN", "allowFrom": "MATTERMOST_ALLOWED_USERS", "extras": {"url": "MATTERMOST_URL", "teamId": "MATTERMOST_TEAM_ID"}}, @@ -2160,19 +2213,21 @@ class Migrator: if not ch_cfg: continue - # Extract tokens - if ch_mapping.get("token") and ch_cfg.get("botToken") and self.migrate_secrets: - self._set_env_var(ch_mapping["token"], ch_cfg["botToken"], - f"channels.{ch_name}.botToken") - if ch_mapping.get("allowFrom") and ch_cfg.get("allowFrom"): - allow_val = ch_cfg["allowFrom"] + # Extract tokens (check flat path, then accounts.default) + token_field = ch_mapping.get("tokenField", "botToken") + bot_token = self._get_channel_field(ch_cfg, token_field) + if ch_mapping.get("token") and bot_token and self.migrate_secrets: + self._set_env_var(ch_mapping["token"], str(bot_token), + f"channels.{ch_name}.{token_field}") + allow_val = self._get_channel_field(ch_cfg, "allowFrom") + if ch_mapping.get("allowFrom") and allow_val: if isinstance(allow_val, list): allow_val = ",".join(str(x) for x in allow_val) self._set_env_var(ch_mapping["allowFrom"], str(allow_val), f"channels.{ch_name}.allowFrom") # Extra fields for oc_key, env_key in (ch_mapping.get("extras") or {}).items(): - val = ch_cfg.get(oc_key) + val = self._get_channel_field(ch_cfg, oc_key) if val: if isinstance(val, list): val = ",".join(str(x) for x in val) @@ -2495,6 +2550,33 @@ class Migrator: elif has_cron_store_archive: notes.append("- Run `hermes cron` to recreate scheduled tasks (see archived cron-store)") + # Check if skills were imported + has_skills = any(i.kind == "skills" and i.status == "migrated" for i in self.items) + if has_skills: + notes.extend([ + "", + "## Imported Skills", + "", + "Imported skills require a new session to take effect. After migration,", + "restart your agent or start a new chat session, then run `/skills`", + "to verify they loaded correctly.", + "", + ]) + + # Check if WhatsApp was detected + has_whatsapp = any(i.kind == "whatsapp-settings" and i.status == "migrated" for i in self.items) + if has_whatsapp: + notes.extend([ + "", + "## WhatsApp Requires Re-Pairing", + "", + "WhatsApp uses QR-code pairing, not token-based auth. Your allowlist", + "was migrated, but you must re-pair the device by running:", + "", + " hermes whatsapp", + "", + ]) + notes.extend([ "- Run `hermes gateway install` if you need the gateway service", "- Review `~/.hermes/config.yaml` for any adjustments", diff --git a/run_agent.py b/run_agent.py index 36ddaa0a6f..d9066fa6fb 100644 --- a/run_agent.py +++ b/run_agent.py @@ -739,6 +739,7 @@ class AIAgent: # Interrupt mechanism for breaking out of tool loops self._interrupt_requested = False self._interrupt_message = None # Optional message that triggered interrupt + self._execution_thread_id: int | None = None # Set at run_conversation() start self._client_lock = threading.RLock() # Subagent delegation state @@ -1406,6 +1407,12 @@ class AIAgent: else: print(f"📊 Context limit: {self.context_compressor.context_length:,} tokens (auto-compression disabled)") + # Check immediately so CLI users see the warning at startup. + # Gateway status_callback is not yet wired, so any warning is stored + # in _compression_warning and replayed in the first run_conversation(). + self._compression_warning = None + self._check_compression_model_feasibility() + # Snapshot primary runtime for per-turn restoration. When fallback # activates during a turn, the next turn restores these values so the # preferred model gets a fresh attempt each time. Uses a single dict @@ -1697,6 +1704,104 @@ class AIAgent: except Exception: logger.debug("status_callback error in _emit_status", exc_info=True) + def _check_compression_model_feasibility(self) -> None: + """Warn at session start if the auxiliary compression model's context + window is smaller than the main model's compression threshold. + + When the auxiliary model cannot fit the content that needs summarising, + compression will either fail outright (the LLM call errors) or produce + a severely truncated summary. + + Called during ``__init__`` so CLI users see the warning immediately + (via ``_vprint``). The gateway sets ``status_callback`` *after* + construction, so ``_replay_compression_warning()`` re-sends the + stored warning through the callback on the first + ``run_conversation()`` call. + """ + if not self.compression_enabled: + return + try: + from agent.auxiliary_client import get_text_auxiliary_client + from agent.model_metadata import get_model_context_length + + client, aux_model = get_text_auxiliary_client("compression") + if client is None or not aux_model: + msg = ( + "⚠ No auxiliary LLM provider configured — context " + "compression will drop middle turns without a summary. " + "Run `hermes setup` or set OPENROUTER_API_KEY." + ) + self._compression_warning = msg + self._emit_status(msg) + logger.warning( + "No auxiliary LLM provider for compression — " + "summaries will be unavailable." + ) + return + + aux_base_url = str(getattr(client, "base_url", "")) + aux_api_key = str(getattr(client, "api_key", "")) + aux_context = get_model_context_length( + aux_model, + base_url=aux_base_url, + api_key=aux_api_key, + ) + + threshold = self.context_compressor.threshold_tokens + if aux_context < threshold: + # Suggest a threshold that would fit the aux model, + # rounded down to a clean percentage. + safe_pct = int((aux_context / self.context_compressor.context_length) * 100) + msg = ( + f"⚠ Compression model ({aux_model}) context " + f"is {aux_context:,} tokens, but the main model's " + f"compression threshold is {threshold:,} tokens. " + f"Context compression will not be possible — the " + f"content to summarise will exceed the auxiliary " + f"model's context window.\n" + f" Fix options (config.yaml):\n" + f" 1. Use a larger compression model:\n" + f" auxiliary:\n" + f" compression:\n" + f" model: \n" + f" 2. Lower the compression threshold to fit " + f"the current model:\n" + f" compression:\n" + f" threshold: 0.{safe_pct:02d}" + ) + self._compression_warning = msg + self._emit_status(msg) + logger.warning( + "Auxiliary compression model %s has %d token context, " + "below the main model's compression threshold of %d " + "tokens — compression summaries will fail or be " + "severely truncated.", + aux_model, + aux_context, + threshold, + ) + except Exception as exc: + logger.debug( + "Compression feasibility check failed (non-fatal): %s", exc + ) + + def _replay_compression_warning(self) -> None: + """Re-send the compression warning through ``status_callback``. + + During ``__init__`` the gateway's ``status_callback`` is not yet + wired, so ``_emit_status`` only reaches ``_vprint`` (CLI). This + method is called once at the start of the first + ``run_conversation()`` — by then the gateway has set the callback, + so every platform (Telegram, Discord, Slack, etc.) receives the + warning. + """ + msg = getattr(self, "_compression_warning", None) + if msg and self.status_callback: + try: + self.status_callback("lifecycle", msg) + except Exception: + pass + def _is_direct_openai_url(self, base_url: str = None) -> bool: """Return True when a base URL targets OpenAI's native API.""" url = (base_url or self._base_url_lower).lower() @@ -2728,8 +2833,10 @@ class AIAgent: """ self._interrupt_requested = True self._interrupt_message = message - # Signal all tools to abort any in-flight operations immediately - _set_interrupt(True) + # Signal all tools to abort any in-flight operations immediately. + # Scope the interrupt to this agent's execution thread so other + # agents running in the same process (gateway) are not affected. + _set_interrupt(True, self._execution_thread_id) # Propagate interrupt to any running child agents (subagent delegation) with self._active_children_lock: children_copy = list(self._active_children) @@ -2742,10 +2849,10 @@ class AIAgent: print("\n⚡ Interrupt requested" + (f": '{message[:40]}...'" if message and len(message) > 40 else f": '{message}'" if message else "")) def clear_interrupt(self) -> None: - """Clear any pending interrupt request and the global tool interrupt signal.""" + """Clear any pending interrupt request and the per-thread tool interrupt signal.""" self._interrupt_requested = False self._interrupt_message = None - _set_interrupt(False) + _set_interrupt(False, self._execution_thread_id) def _touch_activity(self, desc: str) -> None: """Update the last-activity timestamp and description (thread-safe).""" @@ -3339,6 +3446,7 @@ class AIAgent: def _chat_messages_to_responses_input(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """Convert internal chat-style messages to Responses input items.""" items: List[Dict[str, Any]] = [] + seen_item_ids: set = set() for msg in messages: if not isinstance(msg, dict): @@ -3359,7 +3467,12 @@ class AIAgent: if isinstance(codex_reasoning, list): for ri in codex_reasoning: if isinstance(ri, dict) and ri.get("encrypted_content"): + item_id = ri.get("id") + if item_id and item_id in seen_item_ids: + continue items.append(ri) + if item_id: + seen_item_ids.add(item_id) has_codex_reasoning = True if content_text.strip(): @@ -3439,6 +3552,7 @@ class AIAgent: raise ValueError("Codex Responses input must be a list of input items.") normalized: List[Dict[str, Any]] = [] + seen_ids: set = set() for idx, item in enumerate(raw_items): if not isinstance(item, dict): raise ValueError(f"Codex Responses input[{idx}] must be an object.") @@ -3491,8 +3605,12 @@ class AIAgent: if item_type == "reasoning": encrypted = item.get("encrypted_content") if isinstance(encrypted, str) and encrypted: - reasoning_item = {"type": "reasoning", "encrypted_content": encrypted} item_id = item.get("id") + if isinstance(item_id, str) and item_id: + if item_id in seen_ids: + continue + seen_ids.add(item_id) + reasoning_item = {"type": "reasoning", "encrypted_content": encrypted} if isinstance(item_id, str) and item_id: reasoning_item["id"] = item_id summary = item.get("summary") @@ -7469,6 +7587,12 @@ class AIAgent: ) except Exception: pass + # Replay compression warning through status_callback for gateway + # platforms (the callback was not wired during __init__). + if self._compression_warning: + self._replay_compression_warning() + self._compression_warning = None # send once + # NOTE: _turns_since_memory and _iters_since_skill are NOT reset here. # They are initialized in __init__ and must persist across run_conversation # calls so that nudge logic accumulates correctly in CLI mode. @@ -7690,6 +7814,11 @@ class AIAgent: compression_attempts = 0 _turn_exit_reason = "unknown" # Diagnostic: why the loop ended + # Record the execution thread so interrupt()/clear_interrupt() can + # scope the tool-level interrupt signal to THIS agent's thread only. + # Must be set before clear_interrupt() which uses it. + self._execution_thread_id = threading.current_thread().ident + # Clear any stale interrupt state at start self.clear_interrupt() @@ -8168,8 +8297,24 @@ class AIAgent: _text_parts.append(getattr(_blk, "text", "")) _trunc_content = "\n".join(_text_parts) if _text_parts else None + # A response is "thinking exhausted" only when the model + # actually produced reasoning blocks but no visible text after + # them. Models that do not use tags (e.g. GLM-4.7 on + # NVIDIA Build, minimax) may return content=None or an empty + # string for unrelated reasons — treat those as normal + # truncations that deserve continuation retries, not as + # thinking-budget exhaustion. + _has_think_tags = bool( + _trunc_content and re.search( + r'<(?:think|thinking|reasoning|REASONING_SCRATCHPAD)[^>]*>', + _trunc_content, + re.IGNORECASE, + ) + ) _thinking_exhausted = ( - not _trunc_has_tool_calls and ( + not _trunc_has_tool_calls + and _has_think_tags + and ( (_trunc_content is not None and not self._has_content_after_think_block(_trunc_content)) or _trunc_content is None ) @@ -9397,12 +9542,41 @@ class AIAgent: invalid_json_args.append((tc.function.name, str(e))) if invalid_json_args: + # Check if the invalid JSON is due to truncation rather + # than a model formatting mistake. Routers sometimes + # rewrite finish_reason from "length" to "tool_calls", + # hiding the truncation from the length handler above. + # Detect truncation: args that don't end with } or ] + # (after stripping whitespace) are cut off mid-stream. + _truncated = any( + not (tc.function.arguments or "").rstrip().endswith(("}", "]")) + for tc in assistant_message.tool_calls + if tc.function.name in {n for n, _ in invalid_json_args} + ) + if _truncated: + self._vprint( + f"{self.log_prefix}⚠️ Truncated tool call arguments detected " + f"(finish_reason={finish_reason!r}) — refusing to execute.", + force=True, + ) + self._invalid_json_retries = 0 + self._cleanup_task_resources(effective_task_id) + self._persist_session(messages, conversation_history) + return { + "final_response": None, + "messages": messages, + "api_calls": api_call_count, + "completed": False, + "partial": True, + "error": "Response truncated due to output length limit", + } + # Track retries for invalid JSON arguments self._invalid_json_retries += 1 - + tool_name, error_msg = invalid_json_args[0] self._vprint(f"{self.log_prefix}⚠️ Invalid JSON in tool call arguments for '{tool_name}': {error_msg}") - + if self._invalid_json_retries < 3: self._vprint(f"{self.log_prefix}🔄 Retrying API call ({self._invalid_json_retries}/3)...") # Don't add anything to messages, just retry the API call diff --git a/scripts/whatsapp-bridge/package-lock.json b/scripts/whatsapp-bridge/package-lock.json index 01af1c15a0..23ea30a092 100644 --- a/scripts/whatsapp-bridge/package-lock.json +++ b/scripts/whatsapp-bridge/package-lock.json @@ -8,7 +8,7 @@ "name": "hermes-whatsapp-bridge", "version": "1.0.0", "dependencies": { - "@whiskeysockets/baileys": "7.0.0-rc.9", + "@whiskeysockets/baileys": "WhiskeySockets/Baileys#fix/abprops-abt-fetch", "express": "^4.21.0", "pino": "^9.0.0", "qrcode-terminal": "^0.12.0" @@ -730,21 +730,22 @@ } }, "node_modules/@whiskeysockets/baileys": { + "name": "baileys", "version": "7.0.0-rc.9", - "resolved": "https://registry.npmjs.org/@whiskeysockets/baileys/-/baileys-7.0.0-rc.9.tgz", - "integrity": "sha512-YFm5gKXfDP9byCXCW3OPHKXLzrAKzolzgVUlRosHHgwbnf2YOO3XknkMm6J7+F0ns8OA0uuSBhgkRHTDtqkacw==", + "resolved": "git+ssh://git@github.com/WhiskeySockets/Baileys.git#01047debd81beb20da7b7779b08edcb06aa03770", "hasInstallScript": true, "license": "MIT", "dependencies": { "@cacheable/node-cache": "^1.4.0", "@hapi/boom": "^9.1.3", "async-mutex": "^0.5.0", - "libsignal": "git+https://github.com/whiskeysockets/libsignal-node.git", + "libsignal": "git+https://github.com/whiskeysockets/libsignal-node", "lru-cache": "^11.1.0", "music-metadata": "^11.7.0", "p-queue": "^9.0.0", "pino": "^9.6", "protobufjs": "^7.2.4", + "whatsapp-rust-bridge": "0.5.2", "ws": "^8.13.0" }, "engines": { @@ -2125,6 +2126,12 @@ "node": ">= 0.8" } }, + "node_modules/whatsapp-rust-bridge": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/whatsapp-rust-bridge/-/whatsapp-rust-bridge-0.5.2.tgz", + "integrity": "sha512-6KBRNvxg6WMIwZ/euA8qVzj16qxMBzLllfmaJIP1JGAAfSvwn6nr8JDOMXeqpXPEOl71UfOG+79JwKEoT2b1Fw==", + "license": "MIT" + }, "node_modules/win-guid": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/win-guid/-/win-guid-0.2.1.tgz", diff --git a/scripts/whatsapp-bridge/package.json b/scripts/whatsapp-bridge/package.json index 7db81f699e..2d32560f44 100644 --- a/scripts/whatsapp-bridge/package.json +++ b/scripts/whatsapp-bridge/package.json @@ -8,7 +8,7 @@ "start": "node bridge.js" }, "dependencies": { - "@whiskeysockets/baileys": "7.0.0-rc.9", + "@whiskeysockets/baileys": "WhiskeySockets/Baileys#fix/abprops-abt-fetch", "express": "^4.21.0", "qrcode-terminal": "^0.12.0", "pino": "^9.0.0" diff --git a/tests/agent/test_local_stream_timeout.py b/tests/agent/test_local_stream_timeout.py index 929f2e3c84..8184dd2d49 100644 --- a/tests/agent/test_local_stream_timeout.py +++ b/tests/agent/test_local_stream_timeout.py @@ -22,6 +22,9 @@ class TestLocalStreamReadTimeout: "http://0.0.0.0:5000", "http://192.168.1.100:8000", "http://10.0.0.5:1234", + "http://host.docker.internal:11434", + "http://host.containers.internal:11434", + "http://host.lima.internal:11434", ]) def test_local_endpoint_bumps_read_timeout(self, base_url): """Local endpoint + default timeout -> bumps to base_timeout.""" @@ -68,3 +71,38 @@ class TestLocalStreamReadTimeout: if _stream_read_timeout == 120.0 and base_url and is_local_endpoint(base_url): _stream_read_timeout = _base_timeout assert _stream_read_timeout == 120.0 + + +class TestIsLocalEndpoint: + """Direct unit tests for is_local_endpoint.""" + + @pytest.mark.parametrize("url", [ + "http://localhost:11434", + "http://127.0.0.1:8080", + "http://0.0.0.0:5000", + "http://[::1]:11434", + "http://192.168.1.100:8000", + "http://10.0.0.5:1234", + "http://172.17.0.1:11434", + ]) + def test_classic_local_addresses(self, url): + assert is_local_endpoint(url) is True + + @pytest.mark.parametrize("url", [ + "http://host.docker.internal:11434", + "http://host.docker.internal:8080/v1", + "http://gateway.docker.internal:11434", + "http://host.containers.internal:11434", + "http://host.lima.internal:11434", + ]) + def test_container_dns_names(self, url): + assert is_local_endpoint(url) is True + + @pytest.mark.parametrize("url", [ + "https://api.openai.com", + "https://openrouter.ai/api", + "https://api.anthropic.com", + "https://evil.docker.internal.example.com", + ]) + def test_remote_endpoints(self, url): + assert is_local_endpoint(url) is False diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index ef17af10bc..d9ca627c4f 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -211,7 +211,8 @@ def make_adapter(platform: Platform, runner=None): config = PlatformConfig(enabled=True, token="e2e-test-token") if platform == Platform.DISCORD: - with patch.object(DiscordAdapter, "_load_participated_threads", return_value=set()): + from gateway.platforms.helpers import ThreadParticipationTracker + with patch.object(ThreadParticipationTracker, "_load", return_value=set()): adapter = DiscordAdapter(config) platform_key = Platform.DISCORD elif platform == Platform.SLACK: diff --git a/tests/gateway/test_api_server.py b/tests/gateway/test_api_server.py index afc3ce9ce9..2be01fc2d1 100644 --- a/tests/gateway/test_api_server.py +++ b/tests/gateway/test_api_server.py @@ -409,11 +409,50 @@ class TestChatCompletionsEndpoint: ) assert resp.status == 200 assert "text/event-stream" in resp.headers.get("Content-Type", "") + assert resp.headers.get("X-Accel-Buffering") == "no" body = await resp.text() assert "data: " in body assert "[DONE]" in body assert "Hello!" in body + @pytest.mark.asyncio + async def test_stream_sends_keepalive_during_quiet_tool_gap(self, adapter): + """Idle SSE streams should send keepalive comments while tools run silently.""" + import asyncio + import gateway.platforms.api_server as api_server_mod + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + async def _mock_run_agent(**kwargs): + cb = kwargs.get("stream_delta_callback") + if cb: + cb("Working") + await asyncio.sleep(0.65) + cb("...done") + return ( + {"final_response": "Working...done", "messages": [], "api_calls": 1}, + {"input_tokens": 10, "output_tokens": 5, "total_tokens": 15}, + ) + + with ( + patch.object(api_server_mod, "CHAT_COMPLETIONS_SSE_KEEPALIVE_SECONDS", 0.01), + patch.object(adapter, "_run_agent", side_effect=_mock_run_agent), + ): + resp = await cli.post( + "/v1/chat/completions", + json={ + "model": "test", + "messages": [{"role": "user", "content": "do the thing"}], + "stream": True, + }, + ) + assert resp.status == 200 + body = await resp.text() + assert ": keepalive" in body + assert "Working" in body + assert "...done" in body + assert "[DONE]" in body + @pytest.mark.asyncio async def test_stream_survives_tool_call_none_sentinel(self, adapter): """stream_delta_callback(None) mid-stream (tool calls) must NOT kill the SSE stream. diff --git a/tests/gateway/test_dingtalk.py b/tests/gateway/test_dingtalk.py index 5c73253fbf..5271136502 100644 --- a/tests/gateway/test_dingtalk.py +++ b/tests/gateway/test_dingtalk.py @@ -119,28 +119,29 @@ class TestDeduplication: def test_first_message_not_duplicate(self): from gateway.platforms.dingtalk import DingTalkAdapter adapter = DingTalkAdapter(PlatformConfig(enabled=True)) - assert adapter._is_duplicate("msg-1") is False + assert adapter._dedup.is_duplicate("msg-1") is False def test_second_same_message_is_duplicate(self): from gateway.platforms.dingtalk import DingTalkAdapter adapter = DingTalkAdapter(PlatformConfig(enabled=True)) - adapter._is_duplicate("msg-1") - assert adapter._is_duplicate("msg-1") is True + adapter._dedup.is_duplicate("msg-1") + assert adapter._dedup.is_duplicate("msg-1") is True def test_different_messages_not_duplicate(self): from gateway.platforms.dingtalk import DingTalkAdapter adapter = DingTalkAdapter(PlatformConfig(enabled=True)) - adapter._is_duplicate("msg-1") - assert adapter._is_duplicate("msg-2") is False + adapter._dedup.is_duplicate("msg-1") + assert adapter._dedup.is_duplicate("msg-2") is False def test_cache_cleanup_on_overflow(self): - from gateway.platforms.dingtalk import DingTalkAdapter, DEDUP_MAX_SIZE + from gateway.platforms.dingtalk import DingTalkAdapter adapter = DingTalkAdapter(PlatformConfig(enabled=True)) + max_size = adapter._dedup._max_size # Fill beyond max - for i in range(DEDUP_MAX_SIZE + 10): - adapter._is_duplicate(f"msg-{i}") + for i in range(max_size + 10): + adapter._dedup.is_duplicate(f"msg-{i}") # Cache should have been pruned - assert len(adapter._seen_messages) <= DEDUP_MAX_SIZE + 10 + assert len(adapter._dedup._seen) <= max_size + 10 # --------------------------------------------------------------------------- @@ -253,13 +254,13 @@ class TestConnect: from gateway.platforms.dingtalk import DingTalkAdapter adapter = DingTalkAdapter(PlatformConfig(enabled=True)) adapter._session_webhooks["a"] = "http://x" - adapter._seen_messages["b"] = 1.0 + adapter._dedup._seen["b"] = 1.0 adapter._http_client = AsyncMock() adapter._stream_task = None await adapter.disconnect() assert len(adapter._session_webhooks) == 0 - assert len(adapter._seen_messages) == 0 + assert len(adapter._dedup._seen) == 0 assert adapter._http_client is None diff --git a/tests/gateway/test_discord_connect.py b/tests/gateway/test_discord_connect.py index dd594cf7ed..9f094dd0dd 100644 --- a/tests/gateway/test_discord_connect.py +++ b/tests/gateway/test_discord_connect.py @@ -137,4 +137,4 @@ async def test_connect_releases_token_lock_on_timeout(monkeypatch): assert ok is False assert released == [("discord-bot-token", "test-token")] - assert adapter._token_lock_identity is None + assert adapter._platform_lock_identity is None diff --git a/tests/gateway/test_discord_free_response.py b/tests/gateway/test_discord_free_response.py index bc63c14f5a..29f65efc67 100644 --- a/tests/gateway/test_discord_free_response.py +++ b/tests/gateway/test_discord_free_response.py @@ -302,7 +302,7 @@ async def test_discord_bot_thread_skips_mention_requirement(adapter, monkeypatch monkeypatch.setenv("DISCORD_AUTO_THREAD", "false") # Simulate bot having previously participated in thread 456 - adapter._bot_participated_threads.add("456") + adapter._threads.mark("456") thread = FakeThread(channel_id=456, name="existing thread") message = make_message(channel=thread, content="follow-up without mention") @@ -344,7 +344,7 @@ async def test_discord_auto_thread_tracks_participation(adapter, monkeypatch): await adapter._handle_message(message) - assert "555" in adapter._bot_participated_threads + assert "555" in adapter._threads @pytest.mark.asyncio @@ -358,4 +358,4 @@ async def test_discord_thread_participation_tracked_on_dispatch(adapter, monkeyp await adapter._handle_message(message) - assert "777" in adapter._bot_participated_threads + assert "777" in adapter._threads diff --git a/tests/gateway/test_discord_thread_persistence.py b/tests/gateway/test_discord_thread_persistence.py index 0288b620d2..083f61ac7c 100644 --- a/tests/gateway/test_discord_thread_persistence.py +++ b/tests/gateway/test_discord_thread_persistence.py @@ -1,6 +1,6 @@ """Tests for Discord thread participation persistence. -Verifies that _bot_participated_threads survives adapter restarts by +Verifies that _threads (ThreadParticipationTracker) survives adapter restarts by being persisted to ~/.hermes/discord_threads.json. """ @@ -25,13 +25,13 @@ class TestDiscordThreadPersistence: def test_starts_empty_when_no_state_file(self, tmp_path): adapter = self._make_adapter(tmp_path) - assert adapter._bot_participated_threads == set() + assert "$nonexistent" not in adapter._threads def test_track_thread_persists_to_disk(self, tmp_path): adapter = self._make_adapter(tmp_path) with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}): - adapter._track_thread("111") - adapter._track_thread("222") + adapter._threads.mark("111") + adapter._threads.mark("222") state_file = tmp_path / "discord_threads.json" assert state_file.exists() @@ -42,42 +42,43 @@ class TestDiscordThreadPersistence: """Threads tracked by one adapter instance are visible to the next.""" adapter1 = self._make_adapter(tmp_path) with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}): - adapter1._track_thread("aaa") - adapter1._track_thread("bbb") + adapter1._threads.mark("aaa") + adapter1._threads.mark("bbb") adapter2 = self._make_adapter(tmp_path) - assert "aaa" in adapter2._bot_participated_threads - assert "bbb" in adapter2._bot_participated_threads + assert "aaa" in adapter2._threads + assert "bbb" in adapter2._threads def test_duplicate_track_does_not_double_save(self, tmp_path): adapter = self._make_adapter(tmp_path) with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}): - adapter._track_thread("111") - adapter._track_thread("111") # no-op + adapter._threads.mark("111") + adapter._threads.mark("111") # no-op saved = json.loads((tmp_path / "discord_threads.json").read_text()) assert saved.count("111") == 1 def test_caps_at_max_tracked_threads(self, tmp_path): adapter = self._make_adapter(tmp_path) - adapter._MAX_TRACKED_THREADS = 5 + adapter._threads._max_tracked = 5 with patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}): for i in range(10): - adapter._track_thread(str(i)) + adapter._threads.mark(str(i)) - assert len(adapter._bot_participated_threads) == 5 + saved = json.loads((tmp_path / "discord_threads.json").read_text()) + assert len(saved) == 5 def test_corrupted_state_file_falls_back_to_empty(self, tmp_path): state_file = tmp_path / "discord_threads.json" state_file.write_text("not valid json{{{") adapter = self._make_adapter(tmp_path) - assert adapter._bot_participated_threads == set() + assert "$nonexistent" not in adapter._threads def test_missing_hermes_home_does_not_crash(self, tmp_path): """Load/save tolerate missing directories.""" fake_home = tmp_path / "nonexistent" / "deep" with patch.dict(os.environ, {"HERMES_HOME": str(fake_home)}): - from gateway.platforms.discord import DiscordAdapter - # _load should return empty set, not crash - threads = DiscordAdapter._load_participated_threads() - assert threads == set() + from gateway.platforms.helpers import ThreadParticipationTracker + # ThreadParticipationTracker should return empty set, not crash + tracker = ThreadParticipationTracker("discord") + assert "$test" not in tracker diff --git a/tests/gateway/test_internal_event_bypass_pairing.py b/tests/gateway/test_internal_event_bypass_pairing.py index 05b093b04a..46a96e5aa2 100644 --- a/tests/gateway/test_internal_event_bypass_pairing.py +++ b/tests/gateway/test_internal_event_bypass_pairing.py @@ -195,6 +195,105 @@ async def test_internal_event_does_not_trigger_pairing(monkeypatch, tmp_path): ) +@pytest.mark.asyncio +async def test_notify_on_complete_preserves_user_identity(monkeypatch, tmp_path): + """Synthetic completion event should carry user_id and user_name from the watcher.""" + import tools.process_registry as pr_module + + sessions = [ + SimpleNamespace( + output_buffer="done\n", exited=True, exit_code=0, command="echo test" + ), + ] + monkeypatch.setattr(pr_module, "process_registry", _FakeRegistry(sessions)) + + async def _instant_sleep(*_a, **_kw): + pass + monkeypatch.setattr(asyncio, "sleep", _instant_sleep) + + runner = _build_runner(monkeypatch, tmp_path) + adapter = runner.adapters[Platform.DISCORD] + + watcher = _watcher_dict_with_notify() + watcher["user_id"] = "user-42" + watcher["user_name"] = "alice" + + await runner._run_process_watcher(watcher) + + assert adapter.handle_message.await_count == 1 + event = adapter.handle_message.await_args.args[0] + assert event.source.user_id == "user-42" + assert event.source.user_name == "alice" + + +@pytest.mark.asyncio +async def test_none_user_id_skips_pairing(monkeypatch, tmp_path): + """A non-internal event with user_id=None should be silently dropped.""" + import gateway.run as gateway_run + + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + (tmp_path / "config.yaml").write_text("", encoding="utf-8") + + runner = GatewayRunner(GatewayConfig()) + adapter = SimpleNamespace(send=AsyncMock()) + runner.adapters[Platform.TELEGRAM] = adapter + + source = SessionSource( + platform=Platform.TELEGRAM, + chat_id="123", + chat_type="dm", + user_id=None, + ) + event = MessageEvent( + text="service message", + source=source, + internal=False, + ) + + result = await runner._handle_message(event) + + # Should return None (dropped) and NOT send any pairing message + assert result is None + assert adapter.send.await_count == 0 + + +@pytest.mark.asyncio +async def test_none_user_id_does_not_generate_pairing_code(monkeypatch, tmp_path): + """A message with user_id=None must never call generate_code.""" + import gateway.run as gateway_run + + monkeypatch.setattr(gateway_run, "_hermes_home", tmp_path) + (tmp_path / "config.yaml").write_text("", encoding="utf-8") + + runner = GatewayRunner(GatewayConfig()) + adapter = SimpleNamespace(send=AsyncMock()) + runner.adapters[Platform.DISCORD] = adapter + + generate_called = False + original_generate = runner.pairing_store.generate_code + + def tracking_generate(*args, **kwargs): + nonlocal generate_called + generate_called = True + return original_generate(*args, **kwargs) + + runner.pairing_store.generate_code = tracking_generate + + source = SessionSource( + platform=Platform.DISCORD, + chat_id="456", + chat_type="dm", + user_id=None, + ) + event = MessageEvent(text="anonymous", source=source, internal=False) + + await runner._handle_message(event) + + assert not generate_called, ( + "Pairing code should NOT be generated for messages with user_id=None" + ) + + @pytest.mark.asyncio async def test_non_internal_event_without_user_triggers_pairing(monkeypatch, tmp_path): """Verify the normal (non-internal) path still triggers pairing for unknown users.""" diff --git a/tests/gateway/test_matrix_mention.py b/tests/gateway/test_matrix_mention.py index d36c2b7657..873b873c23 100644 --- a/tests/gateway/test_matrix_mention.py +++ b/tests/gateway/test_matrix_mention.py @@ -247,7 +247,7 @@ async def test_require_mention_bot_participated_thread(monkeypatch): monkeypatch.setenv("MATRIX_AUTO_THREAD", "false") adapter = _make_adapter() - adapter._bot_participated_threads.add("$thread1") + adapter._threads.mark("$thread1") event = _make_event("hello without mention", thread_id="$thread1") @@ -298,7 +298,7 @@ async def test_auto_thread_preserves_existing_thread(monkeypatch): monkeypatch.delenv("MATRIX_AUTO_THREAD", raising=False) adapter = _make_adapter() - adapter._bot_participated_threads.add("$thread_root") + adapter._threads.mark("$thread_root") event = _make_event("reply in thread", thread_id="$thread_root") await adapter._on_room_message(event) @@ -340,17 +340,17 @@ async def test_auto_thread_disabled(monkeypatch): @pytest.mark.asyncio async def test_auto_thread_tracks_participation(monkeypatch): - """Auto-created threads are tracked in _bot_participated_threads.""" + """Auto-created threads are tracked in _threads.""" monkeypatch.setenv("MATRIX_REQUIRE_MENTION", "false") monkeypatch.delenv("MATRIX_AUTO_THREAD", raising=False) adapter = _make_adapter() event = _make_event("hello", event_id="$msg1") - with patch.object(adapter, "_save_participated_threads"): + with patch.object(adapter._threads, "_save"): await adapter._on_room_message(event) - assert "$msg1" in adapter._bot_participated_threads + assert "$msg1" in adapter._threads # --------------------------------------------------------------------------- @@ -361,56 +361,54 @@ async def test_auto_thread_tracks_participation(monkeypatch): class TestThreadPersistence: def test_empty_state_file(self, tmp_path, monkeypatch): """No state file → empty set.""" - from gateway.platforms.matrix import MatrixAdapter + from gateway.platforms.helpers import ThreadParticipationTracker monkeypatch.setattr( - MatrixAdapter, "_thread_state_path", - staticmethod(lambda: tmp_path / "matrix_threads.json"), + ThreadParticipationTracker, "_state_path", + lambda self: tmp_path / "matrix_threads.json", ) adapter = _make_adapter() - loaded = adapter._load_participated_threads() - assert loaded == set() + assert "$nonexistent" not in adapter._threads def test_track_thread_persists(self, tmp_path, monkeypatch): - """_track_thread writes to disk.""" - from gateway.platforms.matrix import MatrixAdapter + """mark() writes to disk.""" + from gateway.platforms.helpers import ThreadParticipationTracker state_path = tmp_path / "matrix_threads.json" monkeypatch.setattr( - MatrixAdapter, "_thread_state_path", - staticmethod(lambda: state_path), + ThreadParticipationTracker, "_state_path", + lambda self: state_path, ) adapter = _make_adapter() - adapter._track_thread("$thread_abc") + adapter._threads.mark("$thread_abc") data = json.loads(state_path.read_text()) assert "$thread_abc" in data def test_threads_survive_reload(self, tmp_path, monkeypatch): """Persisted threads are loaded by a new adapter instance.""" - from gateway.platforms.matrix import MatrixAdapter + from gateway.platforms.helpers import ThreadParticipationTracker state_path = tmp_path / "matrix_threads.json" state_path.write_text(json.dumps(["$t1", "$t2"])) monkeypatch.setattr( - MatrixAdapter, "_thread_state_path", - staticmethod(lambda: state_path), + ThreadParticipationTracker, "_state_path", + lambda self: state_path, ) adapter = _make_adapter() - assert "$t1" in adapter._bot_participated_threads - assert "$t2" in adapter._bot_participated_threads + assert "$t1" in adapter._threads + assert "$t2" in adapter._threads def test_cap_max_tracked_threads(self, tmp_path, monkeypatch): - """Thread set is trimmed to _MAX_TRACKED_THREADS.""" - from gateway.platforms.matrix import MatrixAdapter + """Thread set is trimmed to max_tracked.""" + from gateway.platforms.helpers import ThreadParticipationTracker state_path = tmp_path / "matrix_threads.json" monkeypatch.setattr( - MatrixAdapter, "_thread_state_path", - staticmethod(lambda: state_path), + ThreadParticipationTracker, "_state_path", + lambda self: state_path, ) adapter = _make_adapter() - adapter._MAX_TRACKED_THREADS = 5 + adapter._threads._max_tracked = 5 for i in range(10): - adapter._bot_participated_threads.add(f"$t{i}") - adapter._save_participated_threads() + adapter._threads.mark(f"$t{i}") data = json.loads(state_path.read_text()) assert len(data) == 5 @@ -447,7 +445,7 @@ async def test_dm_mention_thread_creates_thread(monkeypatch): _set_dm(adapter) event = _make_event("@hermes:example.org help me", event_id="$dm1") - with patch.object(adapter, "_save_participated_threads"): + with patch.object(adapter._threads, "_save"): await adapter._on_room_message(event) adapter.handle_message.assert_awaited_once() @@ -480,7 +478,7 @@ async def test_dm_mention_thread_preserves_existing_thread(monkeypatch): adapter = _make_adapter() _set_dm(adapter) - adapter._bot_participated_threads.add("$existing_thread") + adapter._threads.mark("$existing_thread") event = _make_event("@hermes:example.org help me", thread_id="$existing_thread") await adapter._on_room_message(event) @@ -491,7 +489,7 @@ async def test_dm_mention_thread_preserves_existing_thread(monkeypatch): @pytest.mark.asyncio async def test_dm_mention_thread_tracks_participation(monkeypatch): - """DM mention-thread tracks the thread in _bot_participated_threads.""" + """DM mention-thread tracks the thread in _threads.""" monkeypatch.setenv("MATRIX_DM_MENTION_THREADS", "true") monkeypatch.setenv("MATRIX_AUTO_THREAD", "false") @@ -499,10 +497,10 @@ async def test_dm_mention_thread_tracks_participation(monkeypatch): _set_dm(adapter) event = _make_event("@hermes:example.org help", event_id="$dm1") - with patch.object(adapter, "_save_participated_threads"): + with patch.object(adapter._threads, "_save"): await adapter._on_room_message(event) - assert "$dm1" in adapter._bot_participated_threads + assert "$dm1" in adapter._threads # --------------------------------------------------------------------------- diff --git a/tests/gateway/test_mattermost.py b/tests/gateway/test_mattermost.py index 7d47c0a3e1..56e46f6364 100644 --- a/tests/gateway/test_mattermost.py +++ b/tests/gateway/test_mattermost.py @@ -614,25 +614,27 @@ class TestMattermostDedup: assert self.adapter.handle_message.call_count == 2 def test_prune_seen_clears_expired(self): - """_prune_seen should remove entries older than _SEEN_TTL.""" + """Dedup cache should remove entries older than TTL on overflow.""" now = time.time() + dedup = self.adapter._dedup # Fill with enough expired entries to trigger pruning - for i in range(self.adapter._SEEN_MAX + 10): - self.adapter._seen_posts[f"old_{i}"] = now - 600 # 10 min ago + for i in range(dedup._max_size + 10): + dedup._seen[f"old_{i}"] = now - 600 # 10 min ago (older than default TTL) # Add a fresh one - self.adapter._seen_posts["fresh"] = now + dedup._seen["fresh"] = now - self.adapter._prune_seen() + # Trigger pruning by calling is_duplicate with a new entry (over max_size) + dedup.is_duplicate("trigger_prune") # Old entries should be pruned, fresh one kept - assert "fresh" in self.adapter._seen_posts - assert len(self.adapter._seen_posts) < self.adapter._SEEN_MAX + assert "fresh" in dedup._seen + assert len(dedup._seen) < dedup._max_size + 10 def test_seen_cache_tracks_post_ids(self): - """Posts are tracked in _seen_posts dict.""" - self.adapter._seen_posts["test_post"] = time.time() - assert "test_post" in self.adapter._seen_posts + """Posts are tracked in the dedup cache.""" + self.adapter._dedup._seen["test_post"] = time.time() + assert "test_post" in self.adapter._dedup._seen # --------------------------------------------------------------------------- diff --git a/tests/gateway/test_queue_consumption.py b/tests/gateway/test_queue_consumption.py index 2a4dd4ff02..50effc139d 100644 --- a/tests/gateway/test_queue_consumption.py +++ b/tests/gateway/test_queue_consumption.py @@ -10,6 +10,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest +from gateway.run import _dequeue_pending_event from gateway.platforms.base import ( BasePlatformAdapter, MessageEvent, @@ -79,6 +80,26 @@ class TestQueueMessageStorage: # Should be consumed (cleared) assert adapter.get_pending_message(session_key) is None + def test_dequeue_pending_event_preserves_voice_media_metadata(self): + adapter = _StubAdapter() + session_key = "telegram:user:voice" + event = MessageEvent( + text="", + message_type=MessageType.VOICE, + source=MagicMock(chat_id="123", platform=Platform.TELEGRAM), + message_id="voice-q1", + media_urls=["/tmp/voice.ogg"], + media_types=["audio/ogg"], + ) + adapter._pending_messages[session_key] = event + + retrieved = _dequeue_pending_event(adapter, session_key) + + assert retrieved is event + assert retrieved.media_urls == ["/tmp/voice.ogg"] + assert retrieved.media_types == ["audio/ogg"] + assert adapter.get_pending_message(session_key) is None + def test_queue_does_not_set_interrupt_event(self): """The whole point of /queue — no interrupt signal.""" adapter = _StubAdapter() diff --git a/tests/gateway/test_session_env.py b/tests/gateway/test_session_env.py index a7f1345b77..b75e267f11 100644 --- a/tests/gateway/test_session_env.py +++ b/tests/gateway/test_session_env.py @@ -18,6 +18,8 @@ def test_set_session_env_sets_contextvars(monkeypatch): chat_id="-1001", chat_name="Group", chat_type="group", + user_id="123456", + user_name="alice", thread_id="17585", ) context = SessionContext(source=source, connected_platforms=[], home_channels={}) @@ -25,6 +27,8 @@ def test_set_session_env_sets_contextvars(monkeypatch): monkeypatch.delenv("HERMES_SESSION_PLATFORM", raising=False) monkeypatch.delenv("HERMES_SESSION_CHAT_ID", raising=False) monkeypatch.delenv("HERMES_SESSION_CHAT_NAME", raising=False) + monkeypatch.delenv("HERMES_SESSION_USER_ID", raising=False) + monkeypatch.delenv("HERMES_SESSION_USER_NAME", raising=False) monkeypatch.delenv("HERMES_SESSION_THREAD_ID", raising=False) tokens = runner._set_session_env(context) @@ -33,6 +37,8 @@ def test_set_session_env_sets_contextvars(monkeypatch): assert get_session_env("HERMES_SESSION_PLATFORM") == "telegram" assert get_session_env("HERMES_SESSION_CHAT_ID") == "-1001" assert get_session_env("HERMES_SESSION_CHAT_NAME") == "Group" + assert get_session_env("HERMES_SESSION_USER_ID") == "123456" + assert get_session_env("HERMES_SESSION_USER_NAME") == "alice" assert get_session_env("HERMES_SESSION_THREAD_ID") == "17585" # os.environ should NOT be touched @@ -50,6 +56,8 @@ def test_clear_session_env_restores_previous_state(monkeypatch): monkeypatch.delenv("HERMES_SESSION_PLATFORM", raising=False) monkeypatch.delenv("HERMES_SESSION_CHAT_ID", raising=False) monkeypatch.delenv("HERMES_SESSION_CHAT_NAME", raising=False) + monkeypatch.delenv("HERMES_SESSION_USER_ID", raising=False) + monkeypatch.delenv("HERMES_SESSION_USER_NAME", raising=False) monkeypatch.delenv("HERMES_SESSION_THREAD_ID", raising=False) source = SessionSource( @@ -57,12 +65,15 @@ def test_clear_session_env_restores_previous_state(monkeypatch): chat_id="-1001", chat_name="Group", chat_type="group", + user_id="123456", + user_name="alice", thread_id="17585", ) context = SessionContext(source=source, connected_platforms=[], home_channels={}) tokens = runner._set_session_env(context) assert get_session_env("HERMES_SESSION_PLATFORM") == "telegram" + assert get_session_env("HERMES_SESSION_USER_ID") == "123456" runner._clear_session_env(tokens) @@ -70,6 +81,8 @@ def test_clear_session_env_restores_previous_state(monkeypatch): assert get_session_env("HERMES_SESSION_PLATFORM") == "" assert get_session_env("HERMES_SESSION_CHAT_ID") == "" assert get_session_env("HERMES_SESSION_CHAT_NAME") == "" + assert get_session_env("HERMES_SESSION_USER_ID") == "" + assert get_session_env("HERMES_SESSION_USER_NAME") == "" assert get_session_env("HERMES_SESSION_THREAD_ID") == "" diff --git a/tests/gateway/test_signal.py b/tests/gateway/test_signal.py index ae985300d1..265f9be783 100644 --- a/tests/gateway/test_signal.py +++ b/tests/gateway/test_signal.py @@ -114,16 +114,16 @@ class TestSignalAdapterInit: class TestSignalHelpers: def test_redact_phone_long(self): - from gateway.platforms.signal import _redact_phone - assert _redact_phone("+15551234567") == "+155****4567" + from gateway.platforms.helpers import redact_phone + assert redact_phone("+155****4567") == "+155****4567" def test_redact_phone_short(self): - from gateway.platforms.signal import _redact_phone - assert _redact_phone("+12345") == "+1****45" + from gateway.platforms.helpers import redact_phone + assert redact_phone("+12345") == "+1****45" def test_redact_phone_empty(self): - from gateway.platforms.signal import _redact_phone - assert _redact_phone("") == "" + from gateway.platforms.helpers import redact_phone + assert redact_phone("") == "" def test_parse_comma_list(self): from gateway.platforms.signal import _parse_comma_list diff --git a/tests/gateway/test_sms.py b/tests/gateway/test_sms.py index 54c1edf237..d8a1589bdf 100644 --- a/tests/gateway/test_sms.py +++ b/tests/gateway/test_sms.py @@ -1,11 +1,14 @@ """Tests for SMS (Twilio) platform integration. Covers config loading, format/truncate, echo prevention, -requirements check, and toolset verification. +requirements check, toolset verification, and Twilio signature validation. """ +import base64 +import hashlib +import hmac import os -from unittest.mock import patch +from unittest.mock import AsyncMock, MagicMock, patch import pytest @@ -213,3 +216,335 @@ class TestSmsToolset: from tools.cronjob_tools import CRONJOB_SCHEMA deliver_desc = CRONJOB_SCHEMA["parameters"]["properties"]["deliver"]["description"] assert "sms" in deliver_desc.lower() + + +# ── Webhook host configuration ───────────────────────────────────── + +class TestWebhookHostConfig: + """Verify SMS_WEBHOOK_HOST env var and default.""" + + def test_default_host_is_all_interfaces(self): + from gateway.platforms.sms import DEFAULT_WEBHOOK_HOST + assert DEFAULT_WEBHOOK_HOST == "0.0.0.0" + + def test_host_from_env(self): + from gateway.platforms.sms import SmsAdapter + + env = { + "TWILIO_ACCOUNT_SID": "ACtest", + "TWILIO_AUTH_TOKEN": "tok", + "TWILIO_PHONE_NUMBER": "+15550001111", + "SMS_WEBHOOK_HOST": "127.0.0.1", + } + with patch.dict(os.environ, env): + pc = PlatformConfig(enabled=True, api_key="tok") + adapter = SmsAdapter(pc) + assert adapter._webhook_host == "127.0.0.1" + + def test_webhook_url_from_env(self): + from gateway.platforms.sms import SmsAdapter + + env = { + "TWILIO_ACCOUNT_SID": "ACtest", + "TWILIO_AUTH_TOKEN": "tok", + "TWILIO_PHONE_NUMBER": "+15550001111", + "SMS_WEBHOOK_URL": "https://example.com/webhooks/twilio", + } + with patch.dict(os.environ, env): + pc = PlatformConfig(enabled=True, api_key="tok") + adapter = SmsAdapter(pc) + assert adapter._webhook_url == "https://example.com/webhooks/twilio" + + def test_webhook_url_stripped(self): + from gateway.platforms.sms import SmsAdapter + + env = { + "TWILIO_ACCOUNT_SID": "ACtest", + "TWILIO_AUTH_TOKEN": "tok", + "TWILIO_PHONE_NUMBER": "+15550001111", + "SMS_WEBHOOK_URL": " https://example.com/webhooks/twilio ", + } + with patch.dict(os.environ, env): + pc = PlatformConfig(enabled=True, api_key="tok") + adapter = SmsAdapter(pc) + assert adapter._webhook_url == "https://example.com/webhooks/twilio" + + +# ── Startup guard (fail-closed) ──────────────────────────────────── + +class TestStartupGuard: + """Adapter must refuse to start without SMS_WEBHOOK_URL.""" + + def _make_adapter(self, extra_env=None): + from gateway.platforms.sms import SmsAdapter + + env = { + "TWILIO_ACCOUNT_SID": "ACtest", + "TWILIO_AUTH_TOKEN": "tok", + "TWILIO_PHONE_NUMBER": "+15550001111", + } + if extra_env: + env.update(extra_env) + with patch.dict(os.environ, env, clear=False): + pc = PlatformConfig(enabled=True, api_key="tok") + adapter = SmsAdapter(pc) + return adapter + + @pytest.mark.asyncio + async def test_refuses_start_without_webhook_url(self): + adapter = self._make_adapter() + result = await adapter.connect() + assert result is False + + @pytest.mark.asyncio + async def test_insecure_flag_allows_start_without_url(self): + mock_session = AsyncMock() + with patch.dict(os.environ, {"SMS_INSECURE_NO_SIGNATURE": "true"}), \ + patch("aiohttp.web.AppRunner") as mock_runner_cls, \ + patch("aiohttp.web.TCPSite") as mock_site_cls, \ + patch("aiohttp.ClientSession", return_value=mock_session): + mock_runner_cls.return_value.setup = AsyncMock() + mock_runner_cls.return_value.cleanup = AsyncMock() + mock_site_cls.return_value.start = AsyncMock() + adapter = self._make_adapter() + result = await adapter.connect() + assert result is True + await adapter.disconnect() + + @pytest.mark.asyncio + async def test_webhook_url_allows_start(self): + mock_session = AsyncMock() + with patch("aiohttp.web.AppRunner") as mock_runner_cls, \ + patch("aiohttp.web.TCPSite") as mock_site_cls, \ + patch("aiohttp.ClientSession", return_value=mock_session): + mock_runner_cls.return_value.setup = AsyncMock() + mock_runner_cls.return_value.cleanup = AsyncMock() + mock_site_cls.return_value.start = AsyncMock() + adapter = self._make_adapter( + extra_env={"SMS_WEBHOOK_URL": "https://example.com/webhooks/twilio"} + ) + result = await adapter.connect() + assert result is True + await adapter.disconnect() + + +# ── Twilio signature validation ──────────────────────────────────── + +def _compute_twilio_signature(auth_token, url, params): + """Reference implementation of Twilio's signature algorithm.""" + data_to_sign = url + for key in sorted(params.keys()): + data_to_sign += key + params[key] + mac = hmac.new( + auth_token.encode("utf-8"), + data_to_sign.encode("utf-8"), + hashlib.sha1, + ) + return base64.b64encode(mac.digest()).decode("utf-8") + + +class TestTwilioSignatureValidation: + """Unit tests for SmsAdapter._validate_twilio_signature.""" + + def _make_adapter(self, auth_token="test_token_secret"): + from gateway.platforms.sms import SmsAdapter + + env = { + "TWILIO_ACCOUNT_SID": "ACtest", + "TWILIO_AUTH_TOKEN": auth_token, + "TWILIO_PHONE_NUMBER": "+15550001111", + } + with patch.dict(os.environ, env): + pc = PlatformConfig(enabled=True, api_key=auth_token) + adapter = SmsAdapter(pc) + return adapter + + def test_valid_signature_accepted(self): + adapter = self._make_adapter() + url = "https://example.com/webhooks/twilio" + params = {"From": "+15551234567", "Body": "hello", "To": "+15550001111"} + sig = _compute_twilio_signature("test_token_secret", url, params) + assert adapter._validate_twilio_signature(url, params, sig) is True + + def test_invalid_signature_rejected(self): + adapter = self._make_adapter() + url = "https://example.com/webhooks/twilio" + params = {"From": "+15551234567", "Body": "hello"} + assert adapter._validate_twilio_signature(url, params, "badsig") is False + + def test_wrong_token_rejected(self): + adapter = self._make_adapter(auth_token="correct_token") + url = "https://example.com/webhooks/twilio" + params = {"From": "+15551234567", "Body": "hello"} + sig = _compute_twilio_signature("wrong_token", url, params) + assert adapter._validate_twilio_signature(url, params, sig) is False + + def test_params_sorted_by_key(self): + """Signature must be computed with params sorted alphabetically.""" + adapter = self._make_adapter() + url = "https://example.com/webhooks/twilio" + params = {"Zebra": "last", "Alpha": "first", "Middle": "mid"} + sig = _compute_twilio_signature("test_token_secret", url, params) + assert adapter._validate_twilio_signature(url, params, sig) is True + + def test_empty_param_values_included(self): + """Blank values must be included in signature computation.""" + adapter = self._make_adapter() + url = "https://example.com/webhooks/twilio" + params = {"From": "+15551234567", "Body": "", "SmsStatus": "received"} + sig = _compute_twilio_signature("test_token_secret", url, params) + assert adapter._validate_twilio_signature(url, params, sig) is True + + def test_url_matters(self): + """Different URLs produce different signatures.""" + adapter = self._make_adapter() + params = {"Body": "hello"} + sig = _compute_twilio_signature( + "test_token_secret", "https://a.com/webhooks/twilio", params + ) + assert adapter._validate_twilio_signature( + "https://b.com/webhooks/twilio", params, sig + ) is False + + def test_port_variant_443_matches_without_port(self): + """Signature for https URL with :443 validates against URL without port.""" + adapter = self._make_adapter() + params = {"From": "+15551234567", "Body": "hello"} + sig = _compute_twilio_signature( + "test_token_secret", "https://example.com:443/webhooks/twilio", params + ) + assert adapter._validate_twilio_signature( + "https://example.com/webhooks/twilio", params, sig + ) is True + + def test_port_variant_without_port_matches_443(self): + """Signature for https URL without port validates against URL with :443.""" + adapter = self._make_adapter() + params = {"From": "+15551234567", "Body": "hello"} + sig = _compute_twilio_signature( + "test_token_secret", "https://example.com/webhooks/twilio", params + ) + assert adapter._validate_twilio_signature( + "https://example.com:443/webhooks/twilio", params, sig + ) is True + + def test_non_standard_port_no_variant(self): + """Non-standard port must NOT match URL without port.""" + adapter = self._make_adapter() + params = {"From": "+15551234567", "Body": "hello"} + sig = _compute_twilio_signature( + "test_token_secret", "https://example.com/webhooks/twilio", params + ) + assert adapter._validate_twilio_signature( + "https://example.com:8080/webhooks/twilio", params, sig + ) is False + + def test_port_variant_http_80(self): + """Port variant also works for http with port 80.""" + adapter = self._make_adapter() + params = {"From": "+15551234567", "Body": "hello"} + sig = _compute_twilio_signature( + "test_token_secret", "http://example.com:80/webhooks/twilio", params + ) + assert adapter._validate_twilio_signature( + "http://example.com/webhooks/twilio", params, sig + ) is True + + +# ── Webhook signature enforcement (handler-level) ────────────────── + +class TestWebhookSignatureEnforcement: + """Integration tests for signature validation in _handle_webhook.""" + + def _make_adapter(self, webhook_url=""): + from gateway.platforms.sms import SmsAdapter + + env = { + "TWILIO_ACCOUNT_SID": "ACtest", + "TWILIO_AUTH_TOKEN": "test_token_secret", + "TWILIO_PHONE_NUMBER": "+15550001111", + "SMS_WEBHOOK_URL": webhook_url, + } + with patch.dict(os.environ, env): + pc = PlatformConfig(enabled=True, api_key="test_token_secret") + adapter = SmsAdapter(pc) + adapter._message_handler = AsyncMock() + return adapter + + def _mock_request(self, body, headers=None): + request = MagicMock() + request.read = AsyncMock(return_value=body) + request.headers = headers or {} + return request + + @pytest.mark.asyncio + async def test_insecure_flag_skips_validation(self): + """With SMS_INSECURE_NO_SIGNATURE=true and no URL, requests are accepted.""" + env = {"SMS_INSECURE_NO_SIGNATURE": "true"} + with patch.dict(os.environ, env): + adapter = self._make_adapter(webhook_url="") + body = b"From=%2B15551234567&To=%2B15550001111&Body=hello&MessageSid=SM123" + request = self._mock_request(body) + resp = await adapter._handle_webhook(request) + assert resp.status == 200 + + @pytest.mark.asyncio + async def test_insecure_flag_with_url_still_validates(self): + """When both SMS_WEBHOOK_URL and SMS_INSECURE_NO_SIGNATURE are set, + validation stays active (URL takes precedence).""" + adapter = self._make_adapter(webhook_url="https://example.com/webhooks/twilio") + body = b"From=%2B15551234567&To=%2B15550001111&Body=hello&MessageSid=SM123" + request = self._mock_request(body, headers={}) + resp = await adapter._handle_webhook(request) + assert resp.status == 403 + + @pytest.mark.asyncio + async def test_missing_signature_returns_403(self): + adapter = self._make_adapter(webhook_url="https://example.com/webhooks/twilio") + body = b"From=%2B15551234567&To=%2B15550001111&Body=hello&MessageSid=SM123" + request = self._mock_request(body, headers={}) + resp = await adapter._handle_webhook(request) + assert resp.status == 403 + + @pytest.mark.asyncio + async def test_invalid_signature_returns_403(self): + adapter = self._make_adapter(webhook_url="https://example.com/webhooks/twilio") + body = b"From=%2B15551234567&To=%2B15550001111&Body=hello&MessageSid=SM123" + request = self._mock_request(body, headers={"X-Twilio-Signature": "invalid"}) + resp = await adapter._handle_webhook(request) + assert resp.status == 403 + + @pytest.mark.asyncio + async def test_valid_signature_returns_200(self): + webhook_url = "https://example.com/webhooks/twilio" + adapter = self._make_adapter(webhook_url=webhook_url) + params = { + "From": "+15551234567", + "To": "+15550001111", + "Body": "hello", + "MessageSid": "SM123", + } + sig = _compute_twilio_signature("test_token_secret", webhook_url, params) + body = b"From=%2B15551234567&To=%2B15550001111&Body=hello&MessageSid=SM123" + request = self._mock_request(body, headers={"X-Twilio-Signature": sig}) + resp = await adapter._handle_webhook(request) + assert resp.status == 200 + + @pytest.mark.asyncio + async def test_port_variant_signature_returns_200(self): + """Signature computed with :443 should pass when URL configured without port.""" + webhook_url = "https://example.com/webhooks/twilio" + adapter = self._make_adapter(webhook_url=webhook_url) + params = { + "From": "+15551234567", + "To": "+15550001111", + "Body": "hello", + "MessageSid": "SM123", + } + sig = _compute_twilio_signature( + "test_token_secret", "https://example.com:443/webhooks/twilio", params + ) + body = b"From=%2B15551234567&To=%2B15550001111&Body=hello&MessageSid=SM123" + request = self._mock_request(body, headers={"X-Twilio-Signature": sig}) + resp = await adapter._handle_webhook(request) + assert resp.status == 200 diff --git a/tests/gateway/test_stt_config.py b/tests/gateway/test_stt_config.py index a49e402151..23ba06af22 100644 --- a/tests/gateway/test_stt_config.py +++ b/tests/gateway/test_stt_config.py @@ -6,7 +6,9 @@ from unittest.mock import AsyncMock, patch import pytest import yaml -from gateway.config import GatewayConfig, load_gateway_config +from gateway.config import GatewayConfig, Platform, load_gateway_config +from gateway.platforms.base import MessageEvent, MessageType +from gateway.session import SessionSource def test_gateway_config_stt_disabled_from_dict_nested(): @@ -69,3 +71,46 @@ async def test_enrich_message_with_transcription_avoids_bogus_no_provider_messag assert "No STT provider is configured" not in result assert "trouble transcribing" in result assert "caption" in result + + +@pytest.mark.asyncio +async def test_prepare_inbound_message_text_transcribes_queued_voice_event(): + from gateway.run import GatewayRunner + + runner = GatewayRunner.__new__(GatewayRunner) + runner.config = GatewayConfig(stt_enabled=True) + runner.adapters = {} + runner._model = "test-model" + runner._base_url = "" + runner._has_setup_skill = lambda: False + + source = SessionSource( + platform=Platform.TELEGRAM, + chat_id="123", + chat_type="dm", + ) + event = MessageEvent( + text="", + message_type=MessageType.VOICE, + source=source, + media_urls=["/tmp/queued-voice.ogg"], + media_types=["audio/ogg"], + ) + + with patch( + "tools.transcription_tools.transcribe_audio", + return_value={ + "success": True, + "transcript": "queued voice transcript", + "provider": "local_command", + }, + ): + result = await runner._prepare_inbound_message_text( + event=event, + source=source, + history=[], + ) + + assert result is not None + assert "queued voice transcript" in result + assert "voice message" in result.lower() diff --git a/tests/gateway/test_telegram_conflict.py b/tests/gateway/test_telegram_conflict.py index 47a67f229b..dcf3116884 100644 --- a/tests/gateway/test_telegram_conflict.py +++ b/tests/gateway/test_telegram_conflict.py @@ -43,6 +43,8 @@ def _no_auto_discovery(monkeypatch): async def _noop(): return [] monkeypatch.setattr("gateway.platforms.telegram.discover_fallback_ips", _noop) + # Mock HTTPXRequest so the builder chain doesn't fail + monkeypatch.setattr("gateway.platforms.telegram.HTTPXRequest", lambda **kwargs: MagicMock()) @pytest.mark.asyncio @@ -57,9 +59,9 @@ async def test_connect_rejects_same_host_token_lock(monkeypatch): ok = await adapter.connect() assert ok is False - assert adapter.fatal_error_code == "telegram_token_lock" + assert adapter.fatal_error_code == "telegram-bot-token_lock" assert adapter.has_fatal_error is True - assert "already using this Telegram bot token" in adapter.fatal_error_message + assert "already in use" in adapter.fatal_error_message @pytest.mark.asyncio @@ -98,6 +100,8 @@ async def test_polling_conflict_retries_before_fatal(monkeypatch): ) builder = MagicMock() builder.token.return_value = builder + builder.request.return_value = builder + builder.get_updates_request.return_value = builder builder.build.return_value = app monkeypatch.setattr("gateway.platforms.telegram.Application", SimpleNamespace(builder=MagicMock(return_value=builder))) @@ -172,6 +176,8 @@ async def test_polling_conflict_becomes_fatal_after_retries(monkeypatch): ) builder = MagicMock() builder.token.return_value = builder + builder.request.return_value = builder + builder.get_updates_request.return_value = builder builder.build.return_value = app monkeypatch.setattr("gateway.platforms.telegram.Application", SimpleNamespace(builder=MagicMock(return_value=builder))) @@ -216,6 +222,8 @@ async def test_connect_marks_retryable_fatal_error_for_startup_network_failure(m builder = MagicMock() builder.token.return_value = builder + builder.request.return_value = builder + builder.get_updates_request.return_value = builder app = SimpleNamespace( bot=SimpleNamespace(delete_webhook=AsyncMock(), set_my_commands=AsyncMock()), updater=SimpleNamespace(), @@ -265,6 +273,8 @@ async def test_connect_clears_webhook_before_polling(monkeypatch): ) builder = MagicMock() builder.token.return_value = builder + builder.request.return_value = builder + builder.get_updates_request.return_value = builder builder.build.return_value = app monkeypatch.setattr( "gateway.platforms.telegram.Application", diff --git a/tests/gateway/test_weixin.py b/tests/gateway/test_weixin.py index 74b59f2f1d..bb439fa9a6 100644 --- a/tests/gateway/test_weixin.py +++ b/tests/gateway/test_weixin.py @@ -1,12 +1,14 @@ """Tests for the Weixin platform adapter.""" import asyncio +import json import os from unittest.mock import AsyncMock, patch from gateway.config import PlatformConfig from gateway.config import GatewayConfig, HomeChannel, Platform, _apply_env_overrides -from gateway.platforms.weixin import WeixinAdapter +from gateway.platforms import weixin +from gateway.platforms.weixin import ContextTokenStore, WeixinAdapter from tools.send_message_tool import _parse_target_ref, _send_to_platform @@ -62,15 +64,15 @@ class TestWeixinFormatting: class TestWeixinChunking: - def test_split_text_sends_top_level_newlines_as_separate_messages(self): + def test_split_text_keeps_short_multiline_message_in_single_chunk(self): adapter = _make_adapter() content = adapter.format_message("第一行\n第二行\n第三行") chunks = adapter._split_text(content) - assert chunks == ["第一行", "第二行", "第三行"] + assert chunks == ["第一行\n第二行\n第三行"] - def test_split_text_keeps_indented_followup_with_previous_line(self): + def test_split_text_keeps_short_reformatted_table_in_single_chunk(self): adapter = _make_adapter() content = adapter.format_message( @@ -81,10 +83,7 @@ class TestWeixinChunking: ) chunks = adapter._split_text(content) - assert chunks == [ - "- Setting: Timeout\n Value: 30s", - "- Setting: Retries\n Value: 3", - ] + assert chunks == [content] def test_split_text_keeps_complete_code_block_together_when_possible(self): adapter = _make_adapter() @@ -114,6 +113,23 @@ class TestWeixinChunking: assert all(len(chunk) <= adapter.MAX_MESSAGE_LENGTH for chunk in chunks) assert all(chunk.count("```") >= 2 for chunk in chunks) + def test_split_text_can_restore_legacy_multiline_splitting_via_config(self): + adapter = WeixinAdapter( + PlatformConfig( + enabled=True, + extra={ + "account_id": "acct", + "token": "***", + "split_multiline_messages": True, + }, + ) + ) + + content = adapter.format_message("第一行\n第二行\n第三行") + chunks = adapter._split_text(content) + + assert chunks == ["第一行", "第二行", "第三行"] + class TestWeixinConfig: def test_apply_env_overrides_configures_weixin(self): @@ -127,6 +143,7 @@ class TestWeixinConfig: "WEIXIN_BASE_URL": "https://ilink.example.com/", "WEIXIN_CDN_BASE_URL": "https://cdn.example.com/c2c/", "WEIXIN_DM_POLICY": "allowlist", + "WEIXIN_SPLIT_MULTILINE_MESSAGES": "true", "WEIXIN_ALLOWED_USERS": "wxid_1,wxid_2", "WEIXIN_HOME_CHANNEL": "wxid_1", "WEIXIN_HOME_CHANNEL_NAME": "Primary DM", @@ -142,6 +159,7 @@ class TestWeixinConfig: assert platform_config.extra["base_url"] == "https://ilink.example.com" assert platform_config.extra["cdn_base_url"] == "https://cdn.example.com/c2c" assert platform_config.extra["dm_policy"] == "allowlist" + assert platform_config.extra["split_multiline_messages"] == "true" assert platform_config.extra["allow_from"] == "wxid_1,wxid_2" assert platform_config.home_channel == HomeChannel(Platform.WEIXIN, "wxid_1", "Primary DM") @@ -171,6 +189,70 @@ class TestWeixinConfig: assert config.get_connected_platforms() == [] +class TestWeixinStatePersistence: + def test_save_weixin_account_preserves_existing_file_on_replace_failure(self, tmp_path, monkeypatch): + account_path = tmp_path / "weixin" / "accounts" / "acct.json" + account_path.parent.mkdir(parents=True, exist_ok=True) + original = {"token": "old-token", "base_url": "https://old.example.com"} + account_path.write_text(json.dumps(original), encoding="utf-8") + + def _boom(_src, _dst): + raise OSError("disk full") + + monkeypatch.setattr("utils.os.replace", _boom) + + try: + weixin.save_weixin_account( + str(tmp_path), + account_id="acct", + token="new-token", + base_url="https://new.example.com", + user_id="wxid_new", + ) + except OSError: + pass + else: + raise AssertionError("expected save_weixin_account to propagate replace failure") + + assert json.loads(account_path.read_text(encoding="utf-8")) == original + + def test_context_token_persist_preserves_existing_file_on_replace_failure(self, tmp_path, monkeypatch): + token_path = tmp_path / "weixin" / "accounts" / "acct.context-tokens.json" + token_path.parent.mkdir(parents=True, exist_ok=True) + token_path.write_text(json.dumps({"user-a": "old-token"}), encoding="utf-8") + + def _boom(_src, _dst): + raise OSError("disk full") + + monkeypatch.setattr("utils.os.replace", _boom) + + store = ContextTokenStore(str(tmp_path)) + with patch.object(weixin.logger, "warning") as warning_mock: + store.set("acct", "user-b", "new-token") + + assert json.loads(token_path.read_text(encoding="utf-8")) == {"user-a": "old-token"} + warning_mock.assert_called_once() + + def test_save_sync_buf_preserves_existing_file_on_replace_failure(self, tmp_path, monkeypatch): + sync_path = tmp_path / "weixin" / "accounts" / "acct.sync.json" + sync_path.parent.mkdir(parents=True, exist_ok=True) + sync_path.write_text(json.dumps({"get_updates_buf": "old-sync"}), encoding="utf-8") + + def _boom(_src, _dst): + raise OSError("disk full") + + monkeypatch.setattr("utils.os.replace", _boom) + + try: + weixin._save_sync_buf(str(tmp_path), "acct", "new-sync") + except OSError: + pass + else: + raise AssertionError("expected _save_sync_buf to propagate replace failure") + + assert json.loads(sync_path.read_text(encoding="utf-8")) == {"get_updates_buf": "old-sync"} + + class TestWeixinSendMessageIntegration: def test_parse_target_ref_accepts_weixin_ids(self): assert _parse_target_ref("weixin", "wxid_test123") == ("wxid_test123", None, True) @@ -201,6 +283,55 @@ class TestWeixinSendMessageIntegration: ) +class TestWeixinChunkDelivery: + def _connected_adapter(self) -> WeixinAdapter: + adapter = _make_adapter() + adapter._session = object() + adapter._token = "test-token" + adapter._base_url = "https://weixin.example.com" + adapter._token_store.get = lambda account_id, chat_id: "ctx-token" + return adapter + + @patch("gateway.platforms.weixin.asyncio.sleep", new_callable=AsyncMock) + @patch("gateway.platforms.weixin._send_message", new_callable=AsyncMock) + def test_send_waits_between_multiple_chunks(self, send_message_mock, sleep_mock): + adapter = self._connected_adapter() + adapter.MAX_MESSAGE_LENGTH = 12 + + # Use double newlines so _pack_markdown_blocks splits into 3 blocks + result = asyncio.run(adapter.send("wxid_test123", "first\n\nsecond\n\nthird")) + + assert result.success is True + assert send_message_mock.await_count == 3 + assert sleep_mock.await_count == 2 + + @patch("gateway.platforms.weixin.asyncio.sleep", new_callable=AsyncMock) + @patch("gateway.platforms.weixin._send_message", new_callable=AsyncMock) + def test_send_retries_failed_chunk_before_continuing(self, send_message_mock, sleep_mock): + adapter = self._connected_adapter() + adapter.MAX_MESSAGE_LENGTH = 12 + calls = {"count": 0} + + async def flaky_send(*args, **kwargs): + calls["count"] += 1 + if calls["count"] == 2: + raise RuntimeError("temporary iLink failure") + + send_message_mock.side_effect = flaky_send + + # Use double newlines so _pack_markdown_blocks splits into 3 blocks + result = asyncio.run(adapter.send("wxid_test123", "first\n\nsecond\n\nthird")) + + assert result.success is True + # 3 chunks, but chunk 2 fails once and retries → 4 _send_message calls total + assert send_message_mock.await_count == 4 + # The retried chunk should reuse the same client_id for deduplication + first_try = send_message_mock.await_args_list[1].kwargs + retry = send_message_mock.await_args_list[2].kwargs + assert first_try["text"] == retry["text"] + assert first_try["client_id"] == retry["client_id"] + + class TestWeixinRemoteMediaSafety: def test_download_remote_media_blocks_unsafe_urls(self): adapter = _make_adapter() diff --git a/tests/hermes_cli/test_claw.py b/tests/hermes_cli/test_claw.py index 138b21e9d8..da3002f8c4 100644 --- a/tests/hermes_cli/test_claw.py +++ b/tests/hermes_cli/test_claw.py @@ -289,12 +289,16 @@ class TestCmdMigrate: skill_conflict="skip", yes=False, ) + mock_stdin = MagicMock() + mock_stdin.isatty.return_value = True + with ( patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"), patch.object(claw_mod, "_load_migration_module", return_value=fake_mod), patch.object(claw_mod, "get_config_path", return_value=config_path), patch.object(claw_mod, "prompt_yes_no", return_value=True), patch.object(claw_mod, "_offer_source_archival"), + patch("sys.stdin", mock_stdin), ): claw_mod._cmd_migrate(args) @@ -377,6 +381,16 @@ class TestCmdMigrate: config_path = tmp_path / "config.yaml" config_path.write_text("") + # Preview must succeed before the confirmation prompt is shown + fake_mod = ModuleType("openclaw_to_hermes") + fake_mod.resolve_selected_options = MagicMock(return_value=set()) + fake_migrator = MagicMock() + fake_migrator.migrate.return_value = { + "summary": {"migrated": 1, "skipped": 0, "conflict": 0, "error": 0}, + "items": [{"kind": "soul", "status": "migrated", "source": "s", "destination": "d", "reason": ""}], + } + fake_mod.Migrator = MagicMock(return_value=fake_migrator) + args = Namespace( source=str(openclaw_dir), dry_run=False, preset="full", overwrite=False, @@ -384,9 +398,15 @@ class TestCmdMigrate: skill_conflict="skip", yes=False, ) + mock_stdin = MagicMock() + mock_stdin.isatty.return_value = True + with ( patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"), + patch.object(claw_mod, "_load_migration_module", return_value=fake_mod), + patch.object(claw_mod, "get_config_path", return_value=config_path), patch.object(claw_mod, "prompt_yes_no", return_value=False), + patch("sys.stdin", mock_stdin), ): claw_mod._cmd_migrate(args) @@ -448,7 +468,7 @@ class TestCmdMigrate: claw_mod._cmd_migrate(args) captured = capsys.readouterr() - assert "Migration failed" in captured.out + assert "Could not load migration script" in captured.out def test_full_preset_enables_secrets(self, tmp_path, capsys): """The 'full' preset should set migrate_secrets=True automatically.""" @@ -511,7 +531,13 @@ class TestOfferSourceArchival: source = tmp_path / ".openclaw" source.mkdir() - with patch.object(claw_mod, "prompt_yes_no", return_value=False): + mock_stdin = MagicMock() + mock_stdin.isatty.return_value = True + + with ( + patch.object(claw_mod, "prompt_yes_no", return_value=False), + patch("sys.stdin", mock_stdin), + ): claw_mod._offer_source_archival(source, auto_yes=False) captured = capsys.readouterr() @@ -597,10 +623,14 @@ class TestCmdCleanup: openclaw = tmp_path / ".openclaw" openclaw.mkdir() + mock_stdin = MagicMock() + mock_stdin.isatty.return_value = True + args = Namespace(source=None, dry_run=False, yes=False) with ( patch.object(claw_mod, "_find_openclaw_dirs", return_value=[openclaw]), patch.object(claw_mod, "prompt_yes_no", return_value=False), + patch("sys.stdin", mock_stdin), ): claw_mod._cmd_cleanup(args) diff --git a/tests/hermes_cli/test_cmd_update.py b/tests/hermes_cli/test_cmd_update.py index 9ffa809a5e..c8f284228b 100644 --- a/tests/hermes_cli/test_cmd_update.py +++ b/tests/hermes_cli/test_cmd_update.py @@ -106,6 +106,49 @@ class TestCmdUpdateBranchFallback: pull_cmds = [c for c in commands if "pull" in c] assert len(pull_cmds) == 0 + @patch("shutil.which") + @patch("subprocess.run") + def test_update_refreshes_repo_and_tui_node_dependencies( + self, mock_run, mock_which, mock_args + ): + mock_which.side_effect = {"uv": "/usr/bin/uv", "npm": "/usr/bin/npm"}.get + mock_run.side_effect = _make_run_side_effect( + branch="main", verify_ok=True, commit_count="1" + ) + + cmd_update(mock_args) + + npm_calls = [ + (call.args[0], call.kwargs.get("cwd")) + for call in mock_run.call_args_list + if call.args and call.args[0][0] == "/usr/bin/npm" + ] + + assert npm_calls == [ + ( + [ + "/usr/bin/npm", + "install", + "--silent", + "--no-fund", + "--no-audit", + "--progress=false", + ], + PROJECT_ROOT, + ), + ( + [ + "/usr/bin/npm", + "install", + "--silent", + "--no-fund", + "--no-audit", + "--progress=false", + ], + PROJECT_ROOT / "ui-tui", + ), + ] + def test_update_non_interactive_skips_migration_prompt(self, mock_args, capsys): """When stdin/stdout aren't TTYs, config migration prompt is skipped.""" with patch("shutil.which", return_value=None), patch( diff --git a/tests/hermes_cli/test_gateway.py b/tests/hermes_cli/test_gateway.py index 955449547c..fd88a26c6a 100644 --- a/tests/hermes_cli/test_gateway.py +++ b/tests/hermes_cli/test_gateway.py @@ -260,7 +260,7 @@ class TestWaitForGatewayExit: def test_kill_gateway_processes_force_uses_helper(self, monkeypatch): calls = [] - monkeypatch.setattr(gateway, "find_gateway_pids", lambda exclude_pids=None: [11, 22]) + monkeypatch.setattr(gateway, "find_gateway_pids", lambda exclude_pids=None, all_profiles=False: [11, 22]) monkeypatch.setattr(gateway, "terminate_pid", lambda pid, force=False: calls.append((pid, force))) killed = gateway.kill_gateway_processes(force=True) diff --git a/tests/hermes_cli/test_gateway_service.py b/tests/hermes_cli/test_gateway_service.py index c5d4cb4f5d..cba3a8192f 100644 --- a/tests/hermes_cli/test_gateway_service.py +++ b/tests/hermes_cli/test_gateway_service.py @@ -1,6 +1,7 @@ """Tests for gateway service management helpers.""" import os +import pwd from pathlib import Path from types import SimpleNamespace @@ -129,7 +130,7 @@ class TestGatewayStopCleanup: monkeypatch.setattr( gateway_cli, "kill_gateway_processes", - lambda force=False: kill_calls.append(force) or 2, + lambda force=False, all_profiles=False: kill_calls.append(force) or 2, ) gateway_cli.gateway_command(SimpleNamespace(gateway_command="stop")) @@ -155,7 +156,7 @@ class TestGatewayStopCleanup: monkeypatch.setattr( gateway_cli, "kill_gateway_processes", - lambda force=False: kill_calls.append(force) or 2, + lambda force=False, all_profiles=False: kill_calls.append(force) or 2, ) gateway_cli.gateway_command(SimpleNamespace(gateway_command="stop", **{"all": True})) @@ -924,6 +925,23 @@ class TestProfileArg: assert "--profile" in plist assert "mybot" in plist + def test_launchd_plist_path_uses_real_user_home_not_profile_home(self, tmp_path, monkeypatch): + profile_dir = tmp_path / ".hermes" / "profiles" / "orcha" + profile_dir.mkdir(parents=True) + machine_home = tmp_path / "machine-home" + machine_home.mkdir() + profile_home = profile_dir / "home" + profile_home.mkdir() + + monkeypatch.setattr(Path, "home", lambda: profile_home) + monkeypatch.setenv("HERMES_HOME", str(profile_dir)) + monkeypatch.setattr(gateway_cli, "get_hermes_home", lambda: profile_dir) + monkeypatch.setattr(pwd, "getpwuid", lambda uid: SimpleNamespace(pw_dir=str(machine_home))) + + plist_path = gateway_cli.get_launchd_plist_path() + + assert plist_path == machine_home / "Library" / "LaunchAgents" / "ai.hermes.gateway-orcha.plist" + class TestRemapPathForUser: """Unit tests for _remap_path_for_user().""" diff --git a/tests/hermes_cli/test_runtime_provider_resolution.py b/tests/hermes_cli/test_runtime_provider_resolution.py index f46b2dd133..20486a805b 100644 --- a/tests/hermes_cli/test_runtime_provider_resolution.py +++ b/tests/hermes_cli/test_runtime_provider_resolution.py @@ -1214,3 +1214,115 @@ def test_openrouter_provider_not_affected_by_custom_fix(monkeypatch): resolved = rp.resolve_runtime_provider(requested="openrouter") assert resolved["provider"] == "openrouter" + + +# ------------------------------------------------------------------ +# fix #7828 — custom_providers model field must propagate to runtime +# ------------------------------------------------------------------ + + +def test_get_named_custom_provider_includes_model(monkeypatch): + """_get_named_custom_provider should include the model field from config.""" + monkeypatch.setattr(rp, "load_config", lambda: { + "custom_providers": [{ + "name": "my-dashscope", + "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1", + "api_key": "test-key", + "api_mode": "chat_completions", + "model": "qwen3.6-plus", + }], + }) + + result = rp._get_named_custom_provider("my-dashscope") + assert result is not None + assert result["model"] == "qwen3.6-plus" + + +def test_get_named_custom_provider_excludes_empty_model(monkeypatch): + """Empty or whitespace-only model field should not appear in result.""" + for model_val in ["", " ", None]: + entry = { + "name": "test-ep", + "base_url": "https://example.com/v1", + "api_key": "key", + } + if model_val is not None: + entry["model"] = model_val + + monkeypatch.setattr(rp, "load_config", lambda e=entry: { + "custom_providers": [e], + }) + + result = rp._get_named_custom_provider("test-ep") + assert result is not None + assert "model" not in result, ( + f"model field {model_val!r} should not be included in result" + ) + + +def test_named_custom_runtime_propagates_model_direct_path(monkeypatch): + """Model should propagate through the direct (non-pool) resolution path.""" + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "my-server") + monkeypatch.setattr( + rp, "_get_named_custom_provider", + lambda p: { + "name": "my-server", + "base_url": "http://localhost:8000/v1", + "api_key": "test-key", + "model": "qwen3.6-plus", + }, + ) + # Ensure pool doesn't intercept + monkeypatch.setattr(rp, "_try_resolve_from_custom_pool", lambda *a, **k: None) + + resolved = rp.resolve_runtime_provider(requested="my-server") + assert resolved["model"] == "qwen3.6-plus" + assert resolved["provider"] == "custom" + + +def test_named_custom_runtime_propagates_model_pool_path(monkeypatch): + """Model should propagate even when credential pool handles credentials.""" + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "my-server") + monkeypatch.setattr( + rp, "_get_named_custom_provider", + lambda p: { + "name": "my-server", + "base_url": "http://localhost:8000/v1", + "api_key": "test-key", + "model": "qwen3.6-plus", + }, + ) + # Pool returns a result (intercepting the normal path) + monkeypatch.setattr( + rp, "_try_resolve_from_custom_pool", + lambda *a, **k: { + "provider": "custom", + "api_mode": "chat_completions", + "base_url": "http://localhost:8000/v1", + "api_key": "pool-key", + "source": "pool:custom:my-server", + }, + ) + + resolved = rp.resolve_runtime_provider(requested="my-server") + assert resolved["model"] == "qwen3.6-plus", ( + "model must be injected into pool result" + ) + assert resolved["api_key"] == "pool-key", "pool credentials should be used" + + +def test_named_custom_runtime_no_model_when_absent(monkeypatch): + """When custom_providers entry has no model field, runtime should not either.""" + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "my-server") + monkeypatch.setattr( + rp, "_get_named_custom_provider", + lambda p: { + "name": "my-server", + "base_url": "http://localhost:8000/v1", + "api_key": "test-key", + }, + ) + monkeypatch.setattr(rp, "_try_resolve_from_custom_pool", lambda *a, **k: None) + + resolved = rp.resolve_runtime_provider(requested="my-server") + assert "model" not in resolved diff --git a/tests/hermes_cli/test_tui_resume_flow.py b/tests/hermes_cli/test_tui_resume_flow.py index 1d4ff429af..96f7e145b4 100644 --- a/tests/hermes_cli/test_tui_resume_flow.py +++ b/tests/hermes_cli/test_tui_resume_flow.py @@ -25,7 +25,7 @@ def test_cmd_chat_tui_continue_uses_latest_tui_session(monkeypatch): calls.append(source) return "20260408_235959_a1b2c3" if source == "tui" else None - def fake_launch(resume_session_id=None): + def fake_launch(resume_session_id=None, tui_dev=False): captured["resume"] = resume_session_id raise SystemExit(0) @@ -54,7 +54,7 @@ def test_cmd_chat_tui_continue_falls_back_to_latest_cli_session(monkeypatch): return "20260408_235959_d4e5f6" return None - def fake_launch(resume_session_id=None): + def fake_launch(resume_session_id=None, tui_dev=False): captured["resume"] = resume_session_id raise SystemExit(0) @@ -74,7 +74,7 @@ def test_cmd_chat_tui_resume_resolves_title_before_launch(monkeypatch): captured = {} - def fake_launch(resume_session_id=None): + def fake_launch(resume_session_id=None, tui_dev=False): captured["resume"] = resume_session_id raise SystemExit(0) diff --git a/tests/hermes_cli/test_update_gateway_restart.py b/tests/hermes_cli/test_update_gateway_restart.py index ceb05f65c9..822b22742d 100644 --- a/tests/hermes_cli/test_update_gateway_restart.py +++ b/tests/hermes_cli/test_update_gateway_restart.py @@ -191,6 +191,19 @@ class TestLaunchdPlistPath: raise AssertionError("PATH key not found in plist") +class TestLaunchdPlistCurrentness: + def test_launchd_plist_is_current_ignores_path_drift(self, tmp_path, monkeypatch): + plist_path = tmp_path / "ai.hermes.gateway.plist" + monkeypatch.setattr(gateway_cli, "get_launchd_plist_path", lambda: plist_path) + + monkeypatch.setenv("PATH", "/custom/bin:/usr/bin:/bin") + plist_path.write_text(gateway_cli.generate_launchd_plist(), encoding="utf-8") + + monkeypatch.setenv("PATH", "/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin") + + assert gateway_cli.launchd_plist_is_current() is True + + # --------------------------------------------------------------------------- # cmd_update — macOS launchd detection # --------------------------------------------------------------------------- @@ -536,7 +549,7 @@ class TestServicePidExclusion: gateway_cli, "_get_service_pids", return_value={SERVICE_PID} ), patch.object( gateway_cli, "find_gateway_pids", - side_effect=lambda exclude_pids=None: ( + side_effect=lambda exclude_pids=None, all_profiles=False: ( [SERVICE_PID] if not exclude_pids else [p for p in [SERVICE_PID] if p not in exclude_pids] ), @@ -579,7 +592,7 @@ class TestServicePidExclusion: gateway_cli, "_get_service_pids", return_value={SERVICE_PID} ), patch.object( gateway_cli, "find_gateway_pids", - side_effect=lambda exclude_pids=None: ( + side_effect=lambda exclude_pids=None, all_profiles=False: ( [SERVICE_PID] if not exclude_pids else [p for p in [SERVICE_PID] if p not in exclude_pids] ), @@ -618,7 +631,7 @@ class TestServicePidExclusion: launchctl_loaded=True, ) - def fake_find(exclude_pids=None): + def fake_find(exclude_pids=None, all_profiles=False): _exclude = exclude_pids or set() return [p for p in [SERVICE_PID, MANUAL_PID] if p not in _exclude] @@ -760,3 +773,28 @@ class TestFindGatewayPidsExclude: pids = gateway_cli.find_gateway_pids() assert 100 in pids assert 200 in pids + + def test_filters_to_current_profile(self, monkeypatch, tmp_path): + profile_dir = tmp_path / ".hermes" / "profiles" / "orcha" + profile_dir.mkdir(parents=True) + monkeypatch.setattr(gateway_cli, "is_windows", lambda: False) + monkeypatch.setattr(gateway_cli, "get_hermes_home", lambda: profile_dir) + + def fake_run(cmd, **kwargs): + return subprocess.CompletedProcess( + cmd, 0, + stdout=( + "100 /Users/dgrieco/.hermes/hermes-agent/venv/bin/python -m hermes_cli.main --profile orcha gateway run --replace\n" + "200 /Users/dgrieco/.hermes/hermes-agent/venv/bin/python -m hermes_cli.main --profile other gateway run --replace\n" + ), + stderr="", + ) + + monkeypatch.setattr(gateway_cli.subprocess, "run", fake_run) + monkeypatch.setattr("os.getpid", lambda: 999) + monkeypatch.setattr(gateway_cli, "_get_service_pids", lambda: set()) + monkeypatch.setattr(gateway_cli, "_profile_arg", lambda hermes_home=None: "--profile orcha") + + pids = gateway_cli.find_gateway_pids() + + assert pids == [100] diff --git a/tests/hermes_cli/test_xiaomi_provider.py b/tests/hermes_cli/test_xiaomi_provider.py new file mode 100644 index 0000000000..ed60ed3fb2 --- /dev/null +++ b/tests/hermes_cli/test_xiaomi_provider.py @@ -0,0 +1,327 @@ +"""Tests for Xiaomi MiMo provider support.""" + +import os +import sys +import types + +import pytest + +# Ensure dotenv doesn't interfere +if "dotenv" not in sys.modules: + fake_dotenv = types.ModuleType("dotenv") + fake_dotenv.load_dotenv = lambda *args, **kwargs: None + sys.modules["dotenv"] = fake_dotenv + +from hermes_cli.auth import ( + PROVIDER_REGISTRY, + resolve_provider, + get_api_key_provider_status, + resolve_api_key_provider_credentials, + AuthError, +) + + +# ============================================================================= +# Provider Registry +# ============================================================================= + + +class TestXiaomiProviderRegistry: + """Verify Xiaomi is registered correctly in the PROVIDER_REGISTRY.""" + + def test_registered(self): + assert "xiaomi" in PROVIDER_REGISTRY + + def test_name(self): + assert PROVIDER_REGISTRY["xiaomi"].name == "Xiaomi MiMo" + + def test_auth_type(self): + assert PROVIDER_REGISTRY["xiaomi"].auth_type == "api_key" + + def test_inference_base_url(self): + assert PROVIDER_REGISTRY["xiaomi"].inference_base_url == "https://api.xiaomimimo.com/v1" + + def test_api_key_env_vars(self): + assert PROVIDER_REGISTRY["xiaomi"].api_key_env_vars == ("XIAOMI_API_KEY",) + + def test_base_url_env_var(self): + assert PROVIDER_REGISTRY["xiaomi"].base_url_env_var == "XIAOMI_BASE_URL" + + +# ============================================================================= +# Aliases +# ============================================================================= + + +class TestXiaomiAliases: + """All aliases should resolve to 'xiaomi'.""" + + @pytest.mark.parametrize("alias", [ + "xiaomi", "mimo", "xiaomi-mimo", + ]) + def test_alias_resolves(self, alias, monkeypatch): + # Clear env to avoid auto-detection interfering + for key in ("XIAOMI_API_KEY",): + monkeypatch.delenv(key, raising=False) + monkeypatch.setenv("XIAOMI_API_KEY", "sk-test-key-12345678") + assert resolve_provider(alias) == "xiaomi" + + def test_normalize_provider_models_py(self): + from hermes_cli.models import normalize_provider + assert normalize_provider("mimo") == "xiaomi" + assert normalize_provider("xiaomi-mimo") == "xiaomi" + + def test_normalize_provider_providers_py(self): + from hermes_cli.providers import normalize_provider + assert normalize_provider("mimo") == "xiaomi" + assert normalize_provider("xiaomi-mimo") == "xiaomi" + + +# ============================================================================= +# Auto-detection +# ============================================================================= + + +class TestXiaomiAutoDetection: + """Setting XIAOMI_API_KEY should auto-detect the provider.""" + + def test_auto_detect(self, monkeypatch): + # Clear all other provider env vars + for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY", + "DEEPSEEK_API_KEY", "GOOGLE_API_KEY", "GEMINI_API_KEY", + "DASHSCOPE_API_KEY", "XAI_API_KEY", "KIMI_API_KEY", + "MINIMAX_API_KEY", "AI_GATEWAY_API_KEY", "KILOCODE_API_KEY", + "HF_TOKEN", "GLM_API_KEY", "COPILOT_GITHUB_TOKEN", + "GH_TOKEN", "GITHUB_TOKEN", "MINIMAX_CN_API_KEY"): + monkeypatch.delenv(var, raising=False) + monkeypatch.setenv("XIAOMI_API_KEY", "sk-xiaomi-test-12345678") + provider = resolve_provider("auto") + assert provider == "xiaomi" + + +# ============================================================================= +# Credentials +# ============================================================================= + + +class TestXiaomiCredentials: + """Test credential resolution for the xiaomi provider.""" + + def test_status_configured(self, monkeypatch): + monkeypatch.setenv("XIAOMI_API_KEY", "sk-test-12345678") + status = get_api_key_provider_status("xiaomi") + assert status["configured"] + + def test_status_not_configured(self, monkeypatch): + monkeypatch.delenv("XIAOMI_API_KEY", raising=False) + status = get_api_key_provider_status("xiaomi") + assert not status["configured"] + + def test_resolve_credentials(self, monkeypatch): + monkeypatch.setenv("XIAOMI_API_KEY", "sk-test-12345678") + monkeypatch.delenv("XIAOMI_BASE_URL", raising=False) + creds = resolve_api_key_provider_credentials("xiaomi") + assert creds["api_key"] == "sk-test-12345678" + assert creds["base_url"] == "https://api.xiaomimimo.com/v1" + + def test_custom_base_url_override(self, monkeypatch): + monkeypatch.setenv("XIAOMI_API_KEY", "sk-test-12345678") + monkeypatch.setenv("XIAOMI_BASE_URL", "https://custom.xiaomi.example/v1") + creds = resolve_api_key_provider_credentials("xiaomi") + assert creds["base_url"] == "https://custom.xiaomi.example/v1" + + +# ============================================================================= +# Model catalog (dynamic — no static list) +# ============================================================================= + + +class TestXiaomiModelCatalog: + """Xiaomi uses dynamic model discovery via models.dev.""" + + def test_models_dev_mapping(self): + from agent.models_dev import PROVIDER_TO_MODELS_DEV + assert PROVIDER_TO_MODELS_DEV["xiaomi"] == "xiaomi" + + def test_static_model_list_fallback(self): + """Static _PROVIDER_MODELS fallback must exist for model picker.""" + from hermes_cli.models import _PROVIDER_MODELS + assert "xiaomi" in _PROVIDER_MODELS + models = _PROVIDER_MODELS["xiaomi"] + assert "mimo-v2-pro" in models + assert "mimo-v2-omni" in models + assert "mimo-v2-flash" in models + + def test_list_agentic_models_mock(self, monkeypatch): + """When models.dev returns Xiaomi data, list_agentic_models should return models.""" + from agent import models_dev as md + + fake_data = { + "xiaomi": { + "name": "Xiaomi", + "api": "https://api.xiaomimimo.com/v1", + "env": ["XIAOMI_API_KEY"], + "models": { + "mimo-v2-pro": { + "limit": {"context": 1000000}, + "tool_call": True, + }, + "mimo-v2-omni": { + "limit": {"context": 256000}, + "tool_call": True, + }, + "mimo-v2-flash": { + "limit": {"context": 256000}, + "tool_call": True, + }, + }, + } + } + monkeypatch.setattr(md, "fetch_models_dev", lambda: fake_data) + + result = md.list_agentic_models("xiaomi") + assert "mimo-v2-pro" in result + assert "mimo-v2-flash" in result + + +# ============================================================================= +# Normalization +# ============================================================================= + + +class TestXiaomiNormalization: + """Model name normalization — Xiaomi is a direct provider.""" + + def test_vendor_prefix_mapping(self): + from hermes_cli.model_normalize import _VENDOR_PREFIXES + assert _VENDOR_PREFIXES.get("mimo") == "xiaomi" + + def test_matching_prefix_strip(self): + """xiaomi/mimo-v2-pro should normalize to mimo-v2-pro for direct API.""" + from hermes_cli.model_normalize import _MATCHING_PREFIX_STRIP_PROVIDERS + assert "xiaomi" in _MATCHING_PREFIX_STRIP_PROVIDERS + + def test_normalize_strips_provider_prefix(self): + from hermes_cli.model_normalize import normalize_model_for_provider + result = normalize_model_for_provider("xiaomi/mimo-v2-pro", "xiaomi") + assert result == "mimo-v2-pro" + + def test_normalize_bare_name_unchanged(self): + from hermes_cli.model_normalize import normalize_model_for_provider + result = normalize_model_for_provider("mimo-v2-pro", "xiaomi") + assert result == "mimo-v2-pro" + + +# ============================================================================= +# URL mapping +# ============================================================================= + + +class TestXiaomiURLMapping: + """Test URL → provider inference for Xiaomi endpoints.""" + + def test_url_to_provider(self): + from agent.model_metadata import _URL_TO_PROVIDER + assert _URL_TO_PROVIDER.get("api.xiaomimimo.com") == "xiaomi" + + def test_provider_prefixes(self): + from agent.model_metadata import _PROVIDER_PREFIXES + assert "xiaomi" in _PROVIDER_PREFIXES + assert "mimo" in _PROVIDER_PREFIXES + assert "xiaomi-mimo" in _PROVIDER_PREFIXES + + def test_infer_from_url(self): + from agent.model_metadata import _infer_provider_from_url + assert _infer_provider_from_url("https://api.xiaomimimo.com/v1") == "xiaomi" + + def test_infer_from_regional_urls(self): + """Regional token-plan endpoints should also resolve to xiaomi.""" + from agent.model_metadata import _infer_provider_from_url + assert _infer_provider_from_url("https://token-plan-ams.xiaomimimo.com/v1") == "xiaomi" + assert _infer_provider_from_url("https://token-plan-cn.xiaomimimo.com/v1") == "xiaomi" + assert _infer_provider_from_url("https://token-plan-sgp.xiaomimimo.com/v1") == "xiaomi" + + +# ============================================================================= +# providers.py +# ============================================================================= + + +class TestXiaomiProvidersModule: + """Test Xiaomi in the unified providers module.""" + + def test_overlay_exists(self): + from hermes_cli.providers import HERMES_OVERLAYS + assert "xiaomi" in HERMES_OVERLAYS + overlay = HERMES_OVERLAYS["xiaomi"] + assert overlay.transport == "openai_chat" + assert overlay.base_url_env_var == "XIAOMI_BASE_URL" + assert not overlay.is_aggregator + + def test_alias_resolves(self): + from hermes_cli.providers import normalize_provider + assert normalize_provider("mimo") == "xiaomi" + assert normalize_provider("xiaomi-mimo") == "xiaomi" + + def test_label(self): + from hermes_cli.providers import get_label + assert get_label("xiaomi") == "Xiaomi MiMo" + + def test_get_provider(self): + pdef = None + try: + from hermes_cli.providers import get_provider + pdef = get_provider("xiaomi") + except Exception: + pass + if pdef is not None: + assert pdef.id == "xiaomi" + assert pdef.transport == "openai_chat" + + +# ============================================================================= +# Auxiliary client +# ============================================================================= + + +class TestXiaomiAuxiliary: + """Xiaomi auxiliary routing: vision → omni, non-vision → user's main model, never flash.""" + + def test_no_flash_in_aux_models(self): + """mimo-v2-flash must NEVER be used for automatic aux routing.""" + from agent.auxiliary_client import _API_KEY_PROVIDER_AUX_MODELS + assert "xiaomi" not in _API_KEY_PROVIDER_AUX_MODELS + + def test_vision_model_override(self): + """Xiaomi vision tasks should use mimo-v2-omni (multimodal), not the main model.""" + from agent.auxiliary_client import _PROVIDER_VISION_MODELS + assert "xiaomi" in _PROVIDER_VISION_MODELS + assert _PROVIDER_VISION_MODELS["xiaomi"] == "mimo-v2-omni" + + +# ============================================================================= +# Agent init (no SyntaxError, correct api_mode) +# ============================================================================= + + +class TestXiaomiDoctor: + """Verify hermes doctor recognizes Xiaomi env vars.""" + + def test_provider_env_hints(self): + from hermes_cli.doctor import _PROVIDER_ENV_HINTS + assert "XIAOMI_API_KEY" in _PROVIDER_ENV_HINTS + + +class TestXiaomiAgentInit: + """Verify the agent can be constructed with xiaomi provider without errors.""" + + def test_no_syntax_errors(self): + """Importing run_agent with xiaomi should not raise.""" + import importlib + importlib.import_module("run_agent") + + def test_api_mode_is_chat_completions(self): + from hermes_cli.providers import HERMES_OVERLAYS, TRANSPORT_TO_API_MODE + overlay = HERMES_OVERLAYS["xiaomi"] + api_mode = TRANSPORT_TO_API_MODE[overlay.transport] + assert api_mode == "chat_completions" diff --git a/tests/run_agent/test_compression_feasibility.py b/tests/run_agent/test_compression_feasibility.py new file mode 100644 index 0000000000..1b4423414e --- /dev/null +++ b/tests/run_agent/test_compression_feasibility.py @@ -0,0 +1,279 @@ +"""Tests for _check_compression_model_feasibility() — warns when the +auxiliary compression model's context is smaller than the main model's +compression threshold. + +Two-phase design: + 1. __init__ → runs the check, prints via _vprint (CLI), stores warning + 2. run_conversation (first call) → replays stored warning through + status_callback (gateway platforms) +""" + +from unittest.mock import MagicMock, patch + +from run_agent import AIAgent +from agent.context_compressor import ContextCompressor + + +def _make_agent( + *, + compression_enabled: bool = True, + threshold_percent: float = 0.50, + main_context: int = 200_000, +) -> AIAgent: + """Build a minimal AIAgent with a compressor, skipping __init__.""" + agent = AIAgent.__new__(AIAgent) + agent.model = "test-main-model" + agent.provider = "openrouter" + agent.base_url = "https://openrouter.ai/api/v1" + agent.api_key = "sk-test" + agent.quiet_mode = True + agent.log_prefix = "" + agent.compression_enabled = compression_enabled + agent._print_fn = None + agent.suppress_status_output = False + agent._stream_consumers = [] + agent._executing_tools = False + agent._mute_post_response = False + agent.status_callback = None + agent.tool_progress_callback = None + agent._compression_warning = None + + compressor = MagicMock(spec=ContextCompressor) + compressor.context_length = main_context + compressor.threshold_tokens = int(main_context * threshold_percent) + agent.context_compressor = compressor + + return agent + + +# ── Core warning logic ────────────────────────────────────────────── + + +@patch("agent.model_metadata.get_model_context_length", return_value=32_768) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_warns_when_aux_context_below_threshold(mock_get_client, mock_ctx_len): + """Warning emitted when aux model context < main model threshold.""" + agent = _make_agent(main_context=200_000, threshold_percent=0.50) + # threshold = 100,000 — aux has only 32,768 + mock_client = MagicMock() + mock_client.base_url = "https://openrouter.ai/api/v1" + mock_client.api_key = "sk-aux" + mock_get_client.return_value = (mock_client, "google/gemini-3-flash-preview") + + messages = [] + agent._emit_status = lambda msg: messages.append(msg) + + agent._check_compression_model_feasibility() + + assert len(messages) == 1 + assert "Compression model" in messages[0] + assert "32,768" in messages[0] + assert "100,000" in messages[0] + assert "will not be possible" in messages[0] + # Actionable fix guidance included + assert "Fix options" in messages[0] + assert "auxiliary:" in messages[0] + assert "compression:" in messages[0] + assert "threshold:" in messages[0] + # Warning stored for gateway replay + assert agent._compression_warning is not None + + +@patch("agent.model_metadata.get_model_context_length", return_value=200_000) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_no_warning_when_aux_context_sufficient(mock_get_client, mock_ctx_len): + """No warning when aux model context >= main model threshold.""" + agent = _make_agent(main_context=200_000, threshold_percent=0.50) + # threshold = 100,000 — aux has 200,000 (sufficient) + mock_client = MagicMock() + mock_client.base_url = "https://openrouter.ai/api/v1" + mock_client.api_key = "sk-aux" + mock_get_client.return_value = (mock_client, "google/gemini-2.5-flash") + + messages = [] + agent._emit_status = lambda msg: messages.append(msg) + + agent._check_compression_model_feasibility() + + assert len(messages) == 0 + assert agent._compression_warning is None + + +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_warns_when_no_auxiliary_provider(mock_get_client): + """Warning emitted when no auxiliary provider is configured.""" + agent = _make_agent() + mock_get_client.return_value = (None, None) + + messages = [] + agent._emit_status = lambda msg: messages.append(msg) + + agent._check_compression_model_feasibility() + + assert len(messages) == 1 + assert "No auxiliary LLM provider" in messages[0] + assert agent._compression_warning is not None + + +def test_skips_check_when_compression_disabled(): + """No check performed when compression is disabled.""" + agent = _make_agent(compression_enabled=False) + + messages = [] + agent._emit_status = lambda msg: messages.append(msg) + + agent._check_compression_model_feasibility() + + assert len(messages) == 0 + assert agent._compression_warning is None + + +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_exception_does_not_crash(mock_get_client): + """Exceptions in the check are caught — never blocks startup.""" + agent = _make_agent() + mock_get_client.side_effect = RuntimeError("boom") + + messages = [] + agent._emit_status = lambda msg: messages.append(msg) + + # Should not raise + agent._check_compression_model_feasibility() + + # No user-facing message (error is debug-logged) + assert len(messages) == 0 + + +@patch("agent.model_metadata.get_model_context_length", return_value=100_000) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_exact_threshold_boundary_no_warning(mock_get_client, mock_ctx_len): + """No warning when aux context exactly equals the threshold.""" + agent = _make_agent(main_context=200_000, threshold_percent=0.50) + mock_client = MagicMock() + mock_client.base_url = "https://openrouter.ai/api/v1" + mock_client.api_key = "sk-aux" + mock_get_client.return_value = (mock_client, "test-model") + + messages = [] + agent._emit_status = lambda msg: messages.append(msg) + + agent._check_compression_model_feasibility() + + assert len(messages) == 0 + + +@patch("agent.model_metadata.get_model_context_length", return_value=99_999) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_just_below_threshold_warns(mock_get_client, mock_ctx_len): + """Warning fires when aux context is one token below the threshold.""" + agent = _make_agent(main_context=200_000, threshold_percent=0.50) + mock_client = MagicMock() + mock_client.base_url = "https://openrouter.ai/api/v1" + mock_client.api_key = "sk-aux" + mock_get_client.return_value = (mock_client, "small-model") + + messages = [] + agent._emit_status = lambda msg: messages.append(msg) + + agent._check_compression_model_feasibility() + + assert len(messages) == 1 + assert "small-model" in messages[0] + + +# ── Two-phase: __init__ + run_conversation replay ─────────────────── + + +@patch("agent.model_metadata.get_model_context_length", return_value=32_768) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_warning_stored_for_gateway_replay(mock_get_client, mock_ctx_len): + """__init__ stores the warning; _replay sends it through status_callback.""" + agent = _make_agent(main_context=200_000, threshold_percent=0.50) + mock_client = MagicMock() + mock_client.base_url = "https://openrouter.ai/api/v1" + mock_client.api_key = "sk-aux" + mock_get_client.return_value = (mock_client, "google/gemini-3-flash-preview") + + # Phase 1: __init__ — _emit_status prints (CLI) but callback is None + vprint_messages = [] + agent._emit_status = lambda msg: vprint_messages.append(msg) + agent._check_compression_model_feasibility() + + assert len(vprint_messages) == 1 # CLI got it + assert agent._compression_warning is not None # stored for replay + + # Phase 2: gateway wires callback post-init, then run_conversation replays + callback_events = [] + agent.status_callback = lambda ev, msg: callback_events.append((ev, msg)) + agent._replay_compression_warning() + + assert any( + ev == "lifecycle" and "will not be possible" in msg + for ev, msg in callback_events + ) + + +@patch("agent.model_metadata.get_model_context_length", return_value=200_000) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_no_replay_when_no_warning(mock_get_client, mock_ctx_len): + """_replay_compression_warning is a no-op when there's no stored warning.""" + agent = _make_agent(main_context=200_000, threshold_percent=0.50) + mock_client = MagicMock() + mock_client.base_url = "https://openrouter.ai/api/v1" + mock_client.api_key = "sk-aux" + mock_get_client.return_value = (mock_client, "big-model") + + agent._emit_status = lambda msg: None + agent._check_compression_model_feasibility() + + assert agent._compression_warning is None + + callback_events = [] + agent.status_callback = lambda ev, msg: callback_events.append((ev, msg)) + agent._replay_compression_warning() + + assert len(callback_events) == 0 + + +def test_replay_without_callback_is_noop(): + """_replay_compression_warning doesn't crash when status_callback is None.""" + agent = _make_agent() + agent._compression_warning = "some warning" + agent.status_callback = None + + # Should not raise + agent._replay_compression_warning() + + +@patch("agent.model_metadata.get_model_context_length", return_value=32_768) +@patch("agent.auxiliary_client.get_text_auxiliary_client") +def test_run_conversation_clears_warning_after_replay(mock_get_client, mock_ctx_len): + """After replay in run_conversation, _compression_warning is cleared + so the warning is not sent again on subsequent turns.""" + agent = _make_agent(main_context=200_000, threshold_percent=0.50) + mock_client = MagicMock() + mock_client.base_url = "https://openrouter.ai/api/v1" + mock_client.api_key = "sk-aux" + mock_get_client.return_value = (mock_client, "small-model") + + agent._emit_status = lambda msg: None + agent._check_compression_model_feasibility() + + assert agent._compression_warning is not None + + # Simulate what run_conversation does + callback_events = [] + agent.status_callback = lambda ev, msg: callback_events.append((ev, msg)) + if agent._compression_warning: + agent._replay_compression_warning() + agent._compression_warning = None # as in run_conversation + + assert len(callback_events) == 1 + + # Second turn — nothing replayed + callback_events.clear() + if agent._compression_warning: + agent._replay_compression_warning() + agent._compression_warning = None + + assert len(callback_events) == 0 diff --git a/tests/run_agent/test_interrupt_propagation.py b/tests/run_agent/test_interrupt_propagation.py index 7f8cb01c35..a746efdac1 100644 --- a/tests/run_agent/test_interrupt_propagation.py +++ b/tests/run_agent/test_interrupt_propagation.py @@ -22,23 +22,22 @@ class TestInterruptPropagationToChild(unittest.TestCase): def tearDown(self): set_interrupt(False) + def _make_bare_agent(self): + """Create a bare AIAgent via __new__ with all interrupt-related attrs.""" + from run_agent import AIAgent + agent = AIAgent.__new__(AIAgent) + agent._interrupt_requested = False + agent._interrupt_message = None + agent._execution_thread_id = None # defaults to current thread in set_interrupt + agent._active_children = [] + agent._active_children_lock = threading.Lock() + agent.quiet_mode = True + return agent + def test_parent_interrupt_sets_child_flag(self): """When parent.interrupt() is called, child._interrupt_requested should be set.""" - from run_agent import AIAgent - - parent = AIAgent.__new__(AIAgent) - parent._interrupt_requested = False - parent._interrupt_message = None - parent._active_children = [] - parent._active_children_lock = threading.Lock() - parent.quiet_mode = True - - child = AIAgent.__new__(AIAgent) - child._interrupt_requested = False - child._interrupt_message = None - child._active_children = [] - child._active_children_lock = threading.Lock() - child.quiet_mode = True + parent = self._make_bare_agent() + child = self._make_bare_agent() parent._active_children.append(child) @@ -49,40 +48,26 @@ class TestInterruptPropagationToChild(unittest.TestCase): assert child._interrupt_message == "new user message" assert is_interrupted() is True - def test_child_clear_interrupt_at_start_clears_global(self): - """child.clear_interrupt() at start of run_conversation clears the GLOBAL event. - - This is the intended behavior at startup, but verify it doesn't - accidentally clear an interrupt intended for a running child. + def test_child_clear_interrupt_at_start_clears_thread(self): + """child.clear_interrupt() at start of run_conversation clears the + per-thread interrupt flag for the current thread. """ - from run_agent import AIAgent - - child = AIAgent.__new__(AIAgent) + child = self._make_bare_agent() child._interrupt_requested = True child._interrupt_message = "msg" - child.quiet_mode = True - child._active_children = [] - child._active_children_lock = threading.Lock() - # Global is set + # Interrupt for current thread is set set_interrupt(True) assert is_interrupted() is True - # child.clear_interrupt() clears both + # child.clear_interrupt() clears both instance flag and thread flag child.clear_interrupt() assert child._interrupt_requested is False assert is_interrupted() is False def test_interrupt_during_child_api_call_detected(self): """Interrupt set during _interruptible_api_call is detected within 0.5s.""" - from run_agent import AIAgent - - child = AIAgent.__new__(AIAgent) - child._interrupt_requested = False - child._interrupt_message = None - child._active_children = [] - child._active_children_lock = threading.Lock() - child.quiet_mode = True + child = self._make_bare_agent() child.api_mode = "chat_completions" child.log_prefix = "" child._client_kwargs = {"api_key": "test", "base_url": "http://localhost:1234"} @@ -117,21 +102,8 @@ class TestInterruptPropagationToChild(unittest.TestCase): def test_concurrent_interrupt_propagation(self): """Simulates exact CLI flow: parent runs delegate in thread, main thread interrupts.""" - from run_agent import AIAgent - - parent = AIAgent.__new__(AIAgent) - parent._interrupt_requested = False - parent._interrupt_message = None - parent._active_children = [] - parent._active_children_lock = threading.Lock() - parent.quiet_mode = True - - child = AIAgent.__new__(AIAgent) - child._interrupt_requested = False - child._interrupt_message = None - child._active_children = [] - child._active_children_lock = threading.Lock() - child.quiet_mode = True + parent = self._make_bare_agent() + child = self._make_bare_agent() # Register child (simulating what _run_single_child does) parent._active_children.append(child) @@ -157,5 +129,79 @@ class TestInterruptPropagationToChild(unittest.TestCase): set_interrupt(False) +class TestPerThreadInterruptIsolation(unittest.TestCase): + """Verify that interrupting one agent does NOT affect another agent's thread. + + This is the core fix for the gateway cross-session interrupt leak: + multiple agents run in separate threads within the same process, and + interrupting agent A must not kill agent B's running tools. + """ + + def setUp(self): + set_interrupt(False) + + def tearDown(self): + set_interrupt(False) + + def test_interrupt_only_affects_target_thread(self): + """set_interrupt(True, tid) only makes is_interrupted() True on that thread.""" + results = {} + barrier = threading.Barrier(2) + + def thread_a(): + """Agent A's execution thread — will be interrupted.""" + tid = threading.current_thread().ident + results["a_tid"] = tid + barrier.wait(timeout=5) # sync with thread B + time.sleep(0.2) # let the interrupt arrive + results["a_interrupted"] = is_interrupted() + + def thread_b(): + """Agent B's execution thread — should NOT be affected.""" + tid = threading.current_thread().ident + results["b_tid"] = tid + barrier.wait(timeout=5) # sync with thread A + time.sleep(0.2) + results["b_interrupted"] = is_interrupted() + + ta = threading.Thread(target=thread_a) + tb = threading.Thread(target=thread_b) + ta.start() + tb.start() + + # Wait for both threads to register their TIDs + time.sleep(0.05) + while "a_tid" not in results or "b_tid" not in results: + time.sleep(0.01) + + # Interrupt ONLY thread A (simulates gateway interrupting agent A) + set_interrupt(True, results["a_tid"]) + + ta.join(timeout=3) + tb.join(timeout=3) + + assert results["a_interrupted"] is True, "Thread A should see the interrupt" + assert results["b_interrupted"] is False, "Thread B must NOT see thread A's interrupt" + + def test_clear_interrupt_only_clears_target_thread(self): + """Clearing one thread's interrupt doesn't clear another's.""" + tid_a = 99990001 + tid_b = 99990002 + set_interrupt(True, tid_a) + set_interrupt(True, tid_b) + + # Clear only A + set_interrupt(False, tid_a) + + # Simulate checking from thread B's perspective + from tools.interrupt import _interrupted_threads, _lock + with _lock: + assert tid_a not in _interrupted_threads + assert tid_b in _interrupted_threads + + # Cleanup + set_interrupt(False, tid_b) + + if __name__ == "__main__": unittest.main() diff --git a/tests/run_agent/test_run_agent.py b/tests/run_agent/test_run_agent.py index 0f2d1d4de9..61137fe90a 100644 --- a/tests/run_agent/test_run_agent.py +++ b/tests/run_agent/test_run_agent.py @@ -2087,8 +2087,9 @@ class TestRunConversation: assert "Thinking Budget Exhausted" in result["final_response"] assert "/thinkon" in result["final_response"] - def test_length_empty_content_detected_as_thinking_exhausted(self, agent): - """When finish_reason='length' and content is None/empty, detect exhaustion.""" + def test_length_empty_content_without_think_tags_retries_normally(self, agent): + """When finish_reason='length' and content is None but no think tags, + fall through to normal continuation retry (not thinking-exhaustion).""" self._setup_agent(agent) resp = _mock_response(content=None, finish_reason="length") agent.client.chat.completions.create.return_value = resp @@ -2100,12 +2101,10 @@ class TestRunConversation: ): result = agent.run_conversation("hello") + # Without think tags, the agent should attempt continuation retries + # (up to 3), not immediately fire thinking-exhaustion. + assert result["api_calls"] == 3 assert result["completed"] is False - assert result["api_calls"] == 1 - assert "reasoning" in result["error"].lower() - # User-friendly message is returned - assert result["final_response"] is not None - assert "Thinking Budget Exhausted" in result["final_response"] def test_length_with_tool_calls_returns_partial_without_executing_tools(self, agent): self._setup_agent(agent) @@ -2169,6 +2168,35 @@ class TestRunConversation: mock_hfc.assert_called_once() assert result["final_response"] == "Done!" + def test_truncated_tool_args_detected_when_finish_reason_not_length(self, agent): + """When a router rewrites finish_reason from 'length' to 'tool_calls', + truncated JSON arguments should still be detected and refused rather + than wasting 3 retry attempts.""" + self._setup_agent(agent) + agent.valid_tool_names.add("write_file") + bad_tc = _mock_tool_call( + name="write_file", + arguments='{"path":"report.md","content":"partial', + call_id="c1", + ) + resp = _mock_response( + content="", finish_reason="tool_calls", tool_calls=[bad_tc], + ) + agent.client.chat.completions.create.return_value = resp + + with ( + patch("run_agent.handle_function_call") as mock_handle_function_call, + patch.object(agent, "_persist_session"), + patch.object(agent, "_save_trajectory"), + patch.object(agent, "_cleanup_task_resources"), + ): + result = agent.run_conversation("write the report") + + assert result["completed"] is False + assert result["partial"] is True + assert "truncated due to output length limit" in result["error"] + mock_handle_function_call.assert_not_called() + class TestRetryExhaustion: """Regression: retry_count > max_retries was dead code (off-by-one). diff --git a/tests/run_agent/test_run_agent_codex_responses.py b/tests/run_agent/test_run_agent_codex_responses.py index 6756ed6fde..17a70624d8 100644 --- a/tests/run_agent/test_run_agent_codex_responses.py +++ b/tests/run_agent/test_run_agent_codex_responses.py @@ -1104,3 +1104,58 @@ def test_duplicate_detection_distinguishes_different_codex_reasoning(monkeypatch ] assert "enc_first" in encrypted_contents assert "enc_second" in encrypted_contents + + +def test_chat_messages_to_responses_input_deduplicates_reasoning_ids(monkeypatch): + """Duplicate reasoning item IDs across multi-turn incomplete responses + must be deduplicated so the Responses API doesn't reject with HTTP 400.""" + agent = _build_agent(monkeypatch) + messages = [ + {"role": "user", "content": "think hard"}, + { + "role": "assistant", + "content": "", + "codex_reasoning_items": [ + {"type": "reasoning", "id": "rs_aaa", "encrypted_content": "enc_1"}, + {"type": "reasoning", "id": "rs_bbb", "encrypted_content": "enc_2"}, + ], + }, + { + "role": "assistant", + "content": "partial answer", + "codex_reasoning_items": [ + # rs_aaa is duplicated from the previous turn + {"type": "reasoning", "id": "rs_aaa", "encrypted_content": "enc_1"}, + {"type": "reasoning", "id": "rs_ccc", "encrypted_content": "enc_3"}, + ], + }, + ] + items = agent._chat_messages_to_responses_input(messages) + + reasoning_ids = [it["id"] for it in items if it.get("type") == "reasoning"] + # rs_aaa should appear only once (first occurrence kept) + assert reasoning_ids.count("rs_aaa") == 1 + # rs_bbb and rs_ccc should each appear once + assert reasoning_ids.count("rs_bbb") == 1 + assert reasoning_ids.count("rs_ccc") == 1 + assert len(reasoning_ids) == 3 + + +def test_preflight_codex_input_deduplicates_reasoning_ids(monkeypatch): + """_preflight_codex_input_items should also deduplicate reasoning items by ID.""" + agent = _build_agent(monkeypatch) + raw_input = [ + {"role": "user", "content": [{"type": "input_text", "text": "hello"}]}, + {"type": "reasoning", "id": "rs_xyz", "encrypted_content": "enc_a"}, + {"role": "assistant", "content": "ok"}, + {"type": "reasoning", "id": "rs_xyz", "encrypted_content": "enc_a"}, + {"type": "reasoning", "id": "rs_zzz", "encrypted_content": "enc_b"}, + {"role": "assistant", "content": "done"}, + ] + normalized = agent._preflight_codex_input_items(raw_input) + + reasoning_items = [it for it in normalized if it.get("type") == "reasoning"] + reasoning_ids = [it["id"] for it in reasoning_items] + assert reasoning_ids.count("rs_xyz") == 1 + assert reasoning_ids.count("rs_zzz") == 1 + assert len(reasoning_items) == 2 diff --git a/tests/tools/test_browser_orphan_reaper.py b/tests/tools/test_browser_orphan_reaper.py new file mode 100644 index 0000000000..254dad7db7 --- /dev/null +++ b/tests/tools/test_browser_orphan_reaper.py @@ -0,0 +1,158 @@ +"""Tests for _reap_orphaned_browser_sessions() — kills orphaned agent-browser +daemons whose Python parent exited without cleaning up.""" + +import os +import signal +import textwrap +from pathlib import Path +from unittest.mock import patch, MagicMock + +import pytest + + +@pytest.fixture +def fake_tmpdir(tmp_path): + """Patch _socket_safe_tmpdir to return a temp dir we control.""" + with patch("tools.browser_tool._socket_safe_tmpdir", return_value=str(tmp_path)): + yield tmp_path + + +@pytest.fixture(autouse=True) +def _isolate_sessions(): + """Ensure _active_sessions is empty for each test.""" + import tools.browser_tool as bt + orig = bt._active_sessions.copy() + bt._active_sessions.clear() + yield + bt._active_sessions.clear() + bt._active_sessions.update(orig) + + +def _make_socket_dir(tmpdir, session_name, pid=None): + """Create a fake agent-browser socket directory with optional PID file.""" + d = tmpdir / f"agent-browser-{session_name}" + d.mkdir() + if pid is not None: + (d / f"{session_name}.pid").write_text(str(pid)) + return d + + +class TestReapOrphanedBrowserSessions: + """Tests for the orphan reaper function.""" + + def test_no_socket_dirs_is_noop(self, fake_tmpdir): + """No socket dirs => nothing happens, no errors.""" + from tools.browser_tool import _reap_orphaned_browser_sessions + _reap_orphaned_browser_sessions() # should not raise + + def test_stale_dir_without_pid_file_is_removed(self, fake_tmpdir): + """Socket dir with no PID file is cleaned up.""" + from tools.browser_tool import _reap_orphaned_browser_sessions + d = _make_socket_dir(fake_tmpdir, "h_abc1234567") + assert d.exists() + _reap_orphaned_browser_sessions() + assert not d.exists() + + def test_stale_dir_with_dead_pid_is_removed(self, fake_tmpdir): + """Socket dir whose daemon PID is dead gets cleaned up.""" + from tools.browser_tool import _reap_orphaned_browser_sessions + d = _make_socket_dir(fake_tmpdir, "h_dead123456", pid=999999999) + assert d.exists() + _reap_orphaned_browser_sessions() + assert not d.exists() + + def test_orphaned_alive_daemon_is_killed(self, fake_tmpdir): + """Alive daemon not tracked by _active_sessions gets SIGTERM.""" + from tools.browser_tool import _reap_orphaned_browser_sessions + + d = _make_socket_dir(fake_tmpdir, "h_orphan12345", pid=12345) + + kill_calls = [] + original_kill = os.kill + + def mock_kill(pid, sig): + kill_calls.append((pid, sig)) + if sig == 0: + return # pretend process exists + # Don't actually kill anything + + with patch("os.kill", side_effect=mock_kill): + _reap_orphaned_browser_sessions() + + # Should have checked existence (sig 0) then killed (SIGTERM) + assert (12345, 0) in kill_calls + assert (12345, signal.SIGTERM) in kill_calls + + def test_tracked_session_is_not_reaped(self, fake_tmpdir): + """Sessions tracked in _active_sessions are left alone.""" + import tools.browser_tool as bt + from tools.browser_tool import _reap_orphaned_browser_sessions + + session_name = "h_tracked1234" + d = _make_socket_dir(fake_tmpdir, session_name, pid=12345) + + # Register the session as actively tracked + bt._active_sessions["some_task"] = {"session_name": session_name} + + kill_calls = [] + + def mock_kill(pid, sig): + kill_calls.append((pid, sig)) + + with patch("os.kill", side_effect=mock_kill): + _reap_orphaned_browser_sessions() + + # Should NOT have tried to kill anything + assert len(kill_calls) == 0 + # Dir should still exist + assert d.exists() + + def test_permission_error_on_kill_check_skips(self, fake_tmpdir): + """If we can't check the PID (PermissionError), skip it.""" + from tools.browser_tool import _reap_orphaned_browser_sessions + + d = _make_socket_dir(fake_tmpdir, "h_perm1234567", pid=12345) + + def mock_kill(pid, sig): + if sig == 0: + raise PermissionError("not our process") + + with patch("os.kill", side_effect=mock_kill): + _reap_orphaned_browser_sessions() + + # Dir should still exist (we didn't touch someone else's process) + assert d.exists() + + def test_cdp_sessions_are_also_reaped(self, fake_tmpdir): + """CDP sessions (cdp_ prefix) are also scanned.""" + from tools.browser_tool import _reap_orphaned_browser_sessions + + d = _make_socket_dir(fake_tmpdir, "cdp_abc1234567") + assert d.exists() + _reap_orphaned_browser_sessions() + # No PID file → cleaned up + assert not d.exists() + + def test_non_hermes_dirs_are_ignored(self, fake_tmpdir): + """Socket dirs that don't match our naming pattern are left alone.""" + from tools.browser_tool import _reap_orphaned_browser_sessions + + # Create a dir that doesn't match h_* or cdp_* pattern + d = fake_tmpdir / "agent-browser-other_session" + d.mkdir() + (d / "other_session.pid").write_text("12345") + + _reap_orphaned_browser_sessions() + + # Should NOT be touched + assert d.exists() + + def test_corrupt_pid_file_is_cleaned(self, fake_tmpdir): + """PID file with non-integer content is cleaned up.""" + from tools.browser_tool import _reap_orphaned_browser_sessions + + d = _make_socket_dir(fake_tmpdir, "h_corrupt1234") + (d / "h_corrupt1234.pid").write_text("not-a-number") + + _reap_orphaned_browser_sessions() + assert not d.exists() diff --git a/tests/tools/test_checkpoint_manager.py b/tests/tools/test_checkpoint_manager.py index ef843465f1..ba9da6da1f 100644 --- a/tests/tools/test_checkpoint_manager.py +++ b/tests/tools/test_checkpoint_manager.py @@ -1,9 +1,6 @@ """Tests for tools/checkpoint_manager.py — CheckpointManager.""" import logging -import os -import json -import shutil import subprocess import pytest from pathlib import Path @@ -42,6 +39,19 @@ def checkpoint_base(tmp_path): return tmp_path / "checkpoints" +@pytest.fixture() +def fake_home(tmp_path, monkeypatch): + """Set a deterministic fake home for expanduser/path-home behavior.""" + home = tmp_path / "home" + home.mkdir() + monkeypatch.setenv("HOME", str(home)) + monkeypatch.setenv("USERPROFILE", str(home)) + monkeypatch.delenv("HOMEDRIVE", raising=False) + monkeypatch.delenv("HOMEPATH", raising=False) + monkeypatch.setattr(Path, "home", classmethod(lambda cls: home)) + return home + + @pytest.fixture() def mgr(work_dir, checkpoint_base, monkeypatch): """CheckpointManager with redirected checkpoint base.""" @@ -78,6 +88,16 @@ class TestShadowRepoPath: p = _shadow_repo_path(str(work_dir)) assert str(p).startswith(str(checkpoint_base)) + def test_tilde_and_expanded_home_share_shadow_repo(self, fake_home, checkpoint_base, monkeypatch): + monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base) + project = fake_home / "project" + project.mkdir() + + tilde_path = f"~/{project.name}" + expanded_path = str(project) + + assert _shadow_repo_path(tilde_path) == _shadow_repo_path(expanded_path) + # ========================================================================= # Shadow repo init @@ -221,6 +241,20 @@ class TestListCheckpoints: assert result[0]["reason"] == "third" assert result[2]["reason"] == "first" + def test_tilde_path_lists_same_checkpoints_as_expanded_path(self, checkpoint_base, fake_home, monkeypatch): + monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base) + mgr = CheckpointManager(enabled=True, max_snapshots=50) + project = fake_home / "project" + project.mkdir() + (project / "main.py").write_text("v1\n") + + tilde_path = f"~/{project.name}" + assert mgr.ensure_checkpoint(tilde_path, "initial") is True + + listed = mgr.list_checkpoints(str(project)) + assert len(listed) == 1 + assert listed[0]["reason"] == "initial" + # ========================================================================= # CheckpointManager — restoring @@ -271,6 +305,28 @@ class TestRestore: assert len(all_cps) >= 2 assert "pre-rollback" in all_cps[0]["reason"] + def test_tilde_path_supports_diff_and_restore_flow(self, checkpoint_base, fake_home, monkeypatch): + monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base) + mgr = CheckpointManager(enabled=True, max_snapshots=50) + project = fake_home / "project" + project.mkdir() + file_path = project / "main.py" + file_path.write_text("original\n") + + tilde_path = f"~/{project.name}" + assert mgr.ensure_checkpoint(tilde_path, "initial") is True + mgr.new_turn() + + file_path.write_text("changed\n") + checkpoints = mgr.list_checkpoints(str(project)) + diff_result = mgr.diff(tilde_path, checkpoints[0]["hash"]) + assert diff_result["success"] is True + assert "main.py" in diff_result["diff"] + + restore_result = mgr.restore(tilde_path, checkpoints[0]["hash"]) + assert restore_result["success"] is True + assert file_path.read_text() == "original\n" + # ========================================================================= # CheckpointManager — working dir resolution @@ -310,6 +366,19 @@ class TestWorkingDirResolution: result = mgr.get_working_dir_for_path(str(filepath)) assert result == str(filepath.parent) + def test_resolves_tilde_path_to_project_root(self, fake_home): + mgr = CheckpointManager(enabled=True) + project = fake_home / "myproject" + project.mkdir() + (project / "pyproject.toml").write_text("[project]\n") + subdir = project / "src" + subdir.mkdir() + filepath = subdir / "main.py" + filepath.write_text("x\n") + + result = mgr.get_working_dir_for_path(f"~/{project.name}/src/main.py") + assert result == str(project) + # ========================================================================= # Git env isolation @@ -333,6 +402,14 @@ class TestGitEnvIsolation: env = _git_env(shadow, str(tmp_path)) assert "GIT_INDEX_FILE" not in env + def test_expands_tilde_in_work_tree(self, fake_home, tmp_path): + shadow = tmp_path / "shadow" + work = fake_home / "work" + work.mkdir() + + env = _git_env(shadow, f"~/{work.name}") + assert env["GIT_WORK_TREE"] == str(work.resolve()) + # ========================================================================= # format_checkpoint_list @@ -384,6 +461,8 @@ class TestErrorResilience: assert result is False def test_run_git_allows_expected_nonzero_without_error_log(self, tmp_path, caplog): + work = tmp_path / "work" + work.mkdir() completed = subprocess.CompletedProcess( args=["git", "diff", "--cached", "--quiet"], returncode=1, @@ -395,7 +474,7 @@ class TestErrorResilience: ok, stdout, stderr = _run_git( ["diff", "--cached", "--quiet"], tmp_path / "shadow", - str(tmp_path / "work"), + str(work), allowed_returncodes={1}, ) assert ok is False @@ -403,6 +482,38 @@ class TestErrorResilience: assert stderr == "" assert not caplog.records + def test_run_git_invalid_working_dir_reports_path_error(self, tmp_path, caplog): + missing = tmp_path / "missing" + with caplog.at_level(logging.ERROR, logger="tools.checkpoint_manager"): + ok, stdout, stderr = _run_git( + ["status"], + tmp_path / "shadow", + str(missing), + ) + assert ok is False + assert stdout == "" + assert "working directory not found" in stderr + assert not any("Git executable not found" in r.getMessage() for r in caplog.records) + + def test_run_git_missing_git_reports_git_not_found(self, tmp_path, monkeypatch, caplog): + work = tmp_path / "work" + work.mkdir() + + def raise_missing_git(*args, **kwargs): + raise FileNotFoundError(2, "No such file or directory", "git") + + monkeypatch.setattr("tools.checkpoint_manager.subprocess.run", raise_missing_git) + with caplog.at_level(logging.ERROR, logger="tools.checkpoint_manager"): + ok, stdout, stderr = _run_git( + ["status"], + tmp_path / "shadow", + str(work), + ) + assert ok is False + assert stdout == "" + assert stderr == "git not found" + assert any("Git executable not found" in r.getMessage() for r in caplog.records) + def test_checkpoint_failure_does_not_raise(self, mgr, work_dir, monkeypatch): """Checkpoint failures should never raise — they're silently logged.""" def broken_run_git(*args, **kwargs): @@ -411,3 +522,68 @@ class TestErrorResilience: # Should not raise result = mgr.ensure_checkpoint(str(work_dir), "test") assert result is False + + +# ========================================================================= +# Security / Input validation +# ========================================================================= + +class TestSecurity: + def test_restore_rejects_argument_injection(self, mgr, work_dir): + mgr.ensure_checkpoint(str(work_dir), "initial") + # Try to pass a git flag as a commit hash + result = mgr.restore(str(work_dir), "--patch") + assert result["success"] is False + assert "Invalid commit hash" in result["error"] + assert "must not start with '-'" in result["error"] + + result = mgr.restore(str(work_dir), "-p") + assert result["success"] is False + assert "Invalid commit hash" in result["error"] + + def test_restore_rejects_invalid_hex_chars(self, mgr, work_dir): + mgr.ensure_checkpoint(str(work_dir), "initial") + # Git hashes should not contain characters like ;, &, | + result = mgr.restore(str(work_dir), "abc; rm -rf /") + assert result["success"] is False + assert "expected 4-64 hex characters" in result["error"] + + result = mgr.diff(str(work_dir), "abc&def") + assert result["success"] is False + assert "expected 4-64 hex characters" in result["error"] + + def test_restore_rejects_path_traversal(self, mgr, work_dir): + mgr.ensure_checkpoint(str(work_dir), "initial") + # Real commit hash but malicious path + checkpoints = mgr.list_checkpoints(str(work_dir)) + target_hash = checkpoints[0]["hash"] + + # Absolute path outside + result = mgr.restore(str(work_dir), target_hash, file_path="/etc/passwd") + assert result["success"] is False + assert "got absolute path" in result["error"] + + # Relative traversal outside path + result = mgr.restore(str(work_dir), target_hash, file_path="../outside_file.txt") + assert result["success"] is False + assert "escapes the working directory" in result["error"] + + def test_restore_accepts_valid_file_path(self, mgr, work_dir): + mgr.ensure_checkpoint(str(work_dir), "initial") + checkpoints = mgr.list_checkpoints(str(work_dir)) + target_hash = checkpoints[0]["hash"] + + # Valid path inside directory + result = mgr.restore(str(work_dir), target_hash, file_path="main.py") + assert result["success"] is True + + # Another valid path with subdirectories + (work_dir / "subdir").mkdir() + (work_dir / "subdir" / "test.txt").write_text("hello") + mgr.new_turn() + mgr.ensure_checkpoint(str(work_dir), "second") + checkpoints = mgr.list_checkpoints(str(work_dir)) + target_hash = checkpoints[0]["hash"] + + result = mgr.restore(str(work_dir), target_hash, file_path="subdir/test.txt") + assert result["success"] is True diff --git a/tests/tools/test_code_execution.py b/tests/tools/test_code_execution.py index 33653c3607..e015e5d42b 100644 --- a/tests/tools/test_code_execution.py +++ b/tests/tools/test_code_execution.py @@ -780,14 +780,18 @@ class TestLoadConfig(unittest.TestCase): @unittest.skipIf(sys.platform == "win32", "UDS not available on Windows") class TestInterruptHandling(unittest.TestCase): def test_interrupt_event_stops_execution(self): - """When _interrupt_event is set, execute_code should stop the script.""" + """When interrupt is set for the execution thread, execute_code should stop.""" code = "import time; time.sleep(60); print('should not reach')" + from tools.interrupt import set_interrupt + + # Capture the main thread ID so we can target the interrupt correctly. + # execute_code runs in the current thread; set_interrupt needs its ID. + main_tid = threading.current_thread().ident def set_interrupt_after_delay(): import time as _t _t.sleep(1) - from tools.terminal_tool import _interrupt_event - _interrupt_event.set() + set_interrupt(True, main_tid) t = threading.Thread(target=set_interrupt_after_delay, daemon=True) t.start() @@ -804,8 +808,7 @@ class TestInterruptHandling(unittest.TestCase): self.assertEqual(result["status"], "interrupted") self.assertIn("interrupted", result["output"]) finally: - from tools.terminal_tool import _interrupt_event - _interrupt_event.clear() + set_interrupt(False, main_tid) t.join(timeout=3) diff --git a/tests/tools/test_notify_on_complete.py b/tests/tools/test_notify_on_complete.py index ff6f14922f..411f95f7e0 100644 --- a/tests/tools/test_notify_on_complete.py +++ b/tests/tools/test_notify_on_complete.py @@ -227,6 +227,8 @@ class TestCheckpointNotify: "session_key": "sk1", "watcher_platform": "telegram", "watcher_chat_id": "123", + "watcher_user_id": "u123", + "watcher_user_name": "alice", "watcher_thread_id": "42", "watcher_interval": 5, "notify_on_complete": True, @@ -236,6 +238,8 @@ class TestCheckpointNotify: assert recovered == 1 assert len(registry.pending_watchers) == 1 assert registry.pending_watchers[0]["notify_on_complete"] is True + assert registry.pending_watchers[0]["user_id"] == "u123" + assert registry.pending_watchers[0]["user_name"] == "alice" def test_recover_defaults_false(self, registry, tmp_path): """Old checkpoint entries without the field default to False.""" diff --git a/tests/tools/test_process_registry.py b/tests/tools/test_process_registry.py index a61da9dd3e..d981878a31 100644 --- a/tests/tools/test_process_registry.py +++ b/tests/tools/test_process_registry.py @@ -438,6 +438,8 @@ class TestCheckpoint: s = _make_session() s.watcher_platform = "telegram" s.watcher_chat_id = "999" + s.watcher_user_id = "u123" + s.watcher_user_name = "alice" s.watcher_thread_id = "42" s.watcher_interval = 60 registry._running[s.id] = s @@ -447,6 +449,8 @@ class TestCheckpoint: assert len(data) == 1 assert data[0]["watcher_platform"] == "telegram" assert data[0]["watcher_chat_id"] == "999" + assert data[0]["watcher_user_id"] == "u123" + assert data[0]["watcher_user_name"] == "alice" assert data[0]["watcher_thread_id"] == "42" assert data[0]["watcher_interval"] == 60 @@ -460,6 +464,8 @@ class TestCheckpoint: "session_key": "sk1", "watcher_platform": "telegram", "watcher_chat_id": "123", + "watcher_user_id": "u123", + "watcher_user_name": "alice", "watcher_thread_id": "42", "watcher_interval": 60, }])) @@ -471,6 +477,8 @@ class TestCheckpoint: assert w["session_id"] == "proc_live" assert w["platform"] == "telegram" assert w["chat_id"] == "123" + assert w["user_id"] == "u123" + assert w["user_name"] == "alice" assert w["thread_id"] == "42" assert w["check_interval"] == 60 diff --git a/tests/tools/test_skill_manager_tool.py b/tests/tools/test_skill_manager_tool.py index 7b9e49d4f2..dd0ae17f8c 100644 --- a/tests/tools/test_skill_manager_tool.py +++ b/tests/tools/test_skill_manager_tool.py @@ -348,7 +348,7 @@ word word result = _patch_skill("my-skill", "old text", "new text", file_path="references/evil.md") assert result["success"] is False - assert "boundary" in result["error"].lower() + assert "escapes" in result["error"].lower() assert outside_file.read_text() == "old text here" @@ -412,7 +412,7 @@ class TestWriteFile: result = _write_file("my-skill", "references/escape/owned.md", "malicious") assert result["success"] is False - assert "boundary" in result["error"].lower() + assert "escapes" in result["error"].lower() assert not (outside_dir / "owned.md").exists() @@ -449,7 +449,7 @@ class TestRemoveFile: result = _remove_file("my-skill", "references/escape/keep.txt") assert result["success"] is False - assert "boundary" in result["error"].lower() + assert "escapes" in result["error"].lower() assert outside_file.exists() diff --git a/tests/tools/test_tool_result_storage.py b/tests/tools/test_tool_result_storage.py index f95b5dc08a..0bbb95bbd6 100644 --- a/tests/tools/test_tool_result_storage.py +++ b/tests/tools/test_tool_result_storage.py @@ -124,6 +124,34 @@ class TestWriteToSandbox: cmd = env.execute.call_args[0][0] assert "mkdir -p /data/data/com.termux/files/usr/tmp/hermes-results" in cmd + def test_path_with_spaces_is_quoted(self): + env = MagicMock() + env.execute.return_value = {"output": "", "returncode": 0} + remote_path = "/tmp/hermes results/abc file.txt" + _write_to_sandbox("content", remote_path, env) + cmd = env.execute.call_args[0][0] + assert "'/tmp/hermes results'" in cmd + assert "'/tmp/hermes results/abc file.txt'" in cmd + + def test_shell_metacharacters_neutralized(self): + """Paths with shell metacharacters must be quoted to prevent injection.""" + env = MagicMock() + env.execute.return_value = {"output": "", "returncode": 0} + malicious_path = "/tmp/hermes-results/$(whoami).txt" + _write_to_sandbox("content", malicious_path, env) + cmd = env.execute.call_args[0][0] + # The $() must not appear unquoted — shlex.quote wraps it + assert "'/tmp/hermes-results/$(whoami).txt'" in cmd + + def test_semicolon_injection_neutralized(self): + env = MagicMock() + env.execute.return_value = {"output": "", "returncode": 0} + malicious_path = "/tmp/x; rm -rf /; echo .txt" + _write_to_sandbox("content", malicious_path, env) + cmd = env.execute.call_args[0][0] + # The semicolons must be inside quotes, not acting as command separators + assert "'/tmp/x; rm -rf /; echo .txt'" in cmd + class TestResolveStorageDir: def test_defaults_to_storage_dir_without_env(self): diff --git a/tests/tools/test_vision_tools.py b/tests/tools/test_vision_tools.py index 6e9a6034e2..55949144a0 100644 --- a/tests/tools/test_vision_tools.py +++ b/tests/tools/test_vision_tools.py @@ -769,6 +769,62 @@ class TestResizeImageForVision: assert _RESIZE_TARGET_BYTES == 5 * 1024 * 1024 assert _MAX_BASE64_BYTES > _RESIZE_TARGET_BYTES + def test_extreme_aspect_ratio_preserved(self, tmp_path): + """Extreme aspect ratios should be preserved during resize.""" + try: + from PIL import Image + except ImportError: + pytest.skip("Pillow not installed") + # Very wide panorama: 8000x200 + img = Image.new("RGB", (8000, 200), (100, 150, 200)) + path = tmp_path / "panorama.png" + img.save(path, "PNG") + + result = _resize_image_for_vision(path, mime_type="image/png", + max_base64_bytes=50_000) + assert result.startswith("data:image/") + # Decode and check aspect ratio is roughly preserved + import base64 + header, b64data = result.split(",", 1) + raw = base64.b64decode(b64data) + from io import BytesIO + resized = Image.open(BytesIO(raw)) + original_ratio = 8000 / 200 # 40:1 + resized_ratio = resized.width / resized.height if resized.height > 0 else 0 + # Allow some tolerance (floor clamping), but ratio should stay above 10:1 + # With independent halving, ratio would collapse to ~1:1. Proportional + # scaling should keep it well above 10. + assert resized_ratio > 10, ( + f"Aspect ratio collapsed: {resized.width}x{resized.height} " + f"(ratio {resized_ratio:.1f}, expected >10)" + ) + + def test_tall_narrow_image_preserved(self, tmp_path): + """Tall narrow images should also preserve aspect ratio.""" + try: + from PIL import Image + except ImportError: + pytest.skip("Pillow not installed") + # Very tall: 200x6000 + img = Image.new("RGB", (200, 6000), (200, 100, 50)) + path = tmp_path / "tall.png" + img.save(path, "PNG") + + result = _resize_image_for_vision(path, mime_type="image/png", + max_base64_bytes=50_000) + assert result.startswith("data:image/") + import base64 + from io import BytesIO + header, b64data = result.split(",", 1) + raw = base64.b64decode(b64data) + resized = Image.open(BytesIO(raw)) + original_ratio = 6000 / 200 # 30:1 (h/w) + resized_ratio = resized.height / resized.width if resized.width > 0 else 0 + assert resized_ratio > 5, ( + f"Aspect ratio collapsed: {resized.width}x{resized.height} " + f"(h/w ratio {resized_ratio:.1f}, expected >5)" + ) + def test_no_pillow_returns_original(self, tmp_path): """Without Pillow, oversized images should be returned as-is.""" # Create a dummy file diff --git a/tools/browser_tool.py b/tools/browser_tool.py index ed3cfbb9bb..bb24866066 100644 --- a/tools/browser_tool.py +++ b/tools/browser_tool.py @@ -473,13 +473,104 @@ def _cleanup_inactive_browser_sessions(): logger.warning("Error cleaning up inactive session %s: %s", task_id, e) +def _reap_orphaned_browser_sessions(): + """Scan for orphaned agent-browser daemon processes from previous runs. + + When the Python process that created a browser session exits uncleanly + (SIGKILL, crash, gateway restart), the in-memory ``_active_sessions`` + tracking is lost but the node + Chromium processes keep running. + + This function scans the tmp directory for ``agent-browser-*`` socket dirs + left behind by previous runs, reads the daemon PID files, and kills any + daemons that are still alive but not tracked by the current process. + + Called once on cleanup-thread startup — not every 30 seconds — to avoid + races with sessions being actively created. + """ + import glob + + tmpdir = _socket_safe_tmpdir() + pattern = os.path.join(tmpdir, "agent-browser-h_*") + socket_dirs = glob.glob(pattern) + # Also pick up CDP sessions + socket_dirs += glob.glob(os.path.join(tmpdir, "agent-browser-cdp_*")) + + if not socket_dirs: + return + + # Build set of session_names currently tracked by this process + with _cleanup_lock: + tracked_names = { + info.get("session_name") + for info in _active_sessions.values() + if info.get("session_name") + } + + reaped = 0 + for socket_dir in socket_dirs: + dir_name = os.path.basename(socket_dir) + # dir_name is "agent-browser-{session_name}" + session_name = dir_name.removeprefix("agent-browser-") + if not session_name: + continue + + # Skip sessions that we are actively tracking + if session_name in tracked_names: + continue + + pid_file = os.path.join(socket_dir, f"{session_name}.pid") + if not os.path.isfile(pid_file): + # No PID file — just a stale dir, remove it + shutil.rmtree(socket_dir, ignore_errors=True) + continue + + try: + daemon_pid = int(Path(pid_file).read_text().strip()) + except (ValueError, OSError): + shutil.rmtree(socket_dir, ignore_errors=True) + continue + + # Check if the daemon is still alive + try: + os.kill(daemon_pid, 0) # signal 0 = existence check + except ProcessLookupError: + # Already dead, just clean up the dir + shutil.rmtree(socket_dir, ignore_errors=True) + continue + except PermissionError: + # Alive but owned by someone else — leave it alone + continue + + # Daemon is alive and not tracked — orphan. Kill it. + try: + os.kill(daemon_pid, signal.SIGTERM) + logger.info("Reaped orphaned browser daemon PID %d (session %s)", + daemon_pid, session_name) + reaped += 1 + except (ProcessLookupError, PermissionError, OSError): + pass + + # Clean up the socket directory + shutil.rmtree(socket_dir, ignore_errors=True) + + if reaped: + logger.info("Reaped %d orphaned browser session(s) from previous run(s)", reaped) + + def _browser_cleanup_thread_worker(): """ Background thread that periodically cleans up inactive browser sessions. Runs every 30 seconds and checks for sessions that haven't been used within the BROWSER_SESSION_INACTIVITY_TIMEOUT period. + On first run, also reaps orphaned sessions from previous process lifetimes. """ + # One-time orphan reap on startup + try: + _reap_orphaned_browser_sessions() + except Exception as e: + logger.warning("Orphan reap error: %s", e) + while _cleanup_running: try: _cleanup_inactive_browser_sessions() diff --git a/tools/checkpoint_manager.py b/tools/checkpoint_manager.py index c298aa0bb6..42900a643d 100644 --- a/tools/checkpoint_manager.py +++ b/tools/checkpoint_manager.py @@ -21,6 +21,7 @@ into the user's project directory. import hashlib import logging import os +import re import shutil import subprocess from pathlib import Path @@ -64,23 +65,72 @@ _GIT_TIMEOUT: int = max(10, min(60, int(os.getenv("HERMES_CHECKPOINT_TIMEOUT", " # Max files to snapshot — skip huge directories to avoid slowdowns. _MAX_FILES = 50_000 +# Valid git commit hash pattern: 4–40 hex chars (short or full SHA-1/SHA-256). +_COMMIT_HASH_RE = re.compile(r'^[0-9a-fA-F]{4,64}$') + + +# --------------------------------------------------------------------------- +# Input validation helpers +# --------------------------------------------------------------------------- + +def _validate_commit_hash(commit_hash: str) -> Optional[str]: + """Validate a commit hash to prevent git argument injection. + + Returns an error string if invalid, None if valid. + Values starting with '-' would be interpreted as git flags + (e.g., '--patch', '-p') instead of revision specifiers. + """ + if not commit_hash or not commit_hash.strip(): + return "Empty commit hash" + if commit_hash.startswith("-"): + return f"Invalid commit hash (must not start with '-'): {commit_hash!r}" + if not _COMMIT_HASH_RE.match(commit_hash): + return f"Invalid commit hash (expected 4-64 hex characters): {commit_hash!r}" + return None + + +def _validate_file_path(file_path: str, working_dir: str) -> Optional[str]: + """Validate a file path to prevent path traversal outside the working directory. + + Returns an error string if invalid, None if valid. + """ + if not file_path or not file_path.strip(): + return "Empty file path" + # Reject absolute paths — restore targets must be relative to the workdir + if os.path.isabs(file_path): + return f"File path must be relative, got absolute path: {file_path!r}" + # Resolve and check containment within working_dir + abs_workdir = _normalize_path(working_dir) + resolved = (abs_workdir / file_path).resolve() + try: + resolved.relative_to(abs_workdir) + except ValueError: + return f"File path escapes the working directory via traversal: {file_path!r}" + return None + # --------------------------------------------------------------------------- # Shadow repo helpers # --------------------------------------------------------------------------- +def _normalize_path(path_value: str) -> Path: + """Return a canonical absolute path for checkpoint operations.""" + return Path(path_value).expanduser().resolve() + + def _shadow_repo_path(working_dir: str) -> Path: """Deterministic shadow repo path: sha256(abs_path)[:16].""" - abs_path = str(Path(working_dir).resolve()) + abs_path = str(_normalize_path(working_dir)) dir_hash = hashlib.sha256(abs_path.encode()).hexdigest()[:16] return CHECKPOINT_BASE / dir_hash def _git_env(shadow_repo: Path, working_dir: str) -> dict: """Build env dict that redirects git to the shadow repo.""" + normalized_working_dir = _normalize_path(working_dir) env = os.environ.copy() env["GIT_DIR"] = str(shadow_repo) - env["GIT_WORK_TREE"] = str(Path(working_dir).resolve()) + env["GIT_WORK_TREE"] = str(normalized_working_dir) env.pop("GIT_INDEX_FILE", None) env.pop("GIT_NAMESPACE", None) env.pop("GIT_ALTERNATE_OBJECT_DIRECTORIES", None) @@ -100,7 +150,17 @@ def _run_git( exits while preserving the normal ``ok = (returncode == 0)`` contract. Example: ``git diff --cached --quiet`` returns 1 when changes exist. """ - env = _git_env(shadow_repo, working_dir) + normalized_working_dir = _normalize_path(working_dir) + if not normalized_working_dir.exists(): + msg = f"working directory not found: {normalized_working_dir}" + logger.error("Git command skipped: %s (%s)", " ".join(["git"] + list(args)), msg) + return False, "", msg + if not normalized_working_dir.is_dir(): + msg = f"working directory is not a directory: {normalized_working_dir}" + logger.error("Git command skipped: %s (%s)", " ".join(["git"] + list(args)), msg) + return False, "", msg + + env = _git_env(shadow_repo, str(normalized_working_dir)) cmd = ["git"] + list(args) allowed_returncodes = allowed_returncodes or set() try: @@ -110,7 +170,7 @@ def _run_git( text=True, timeout=timeout, env=env, - cwd=str(Path(working_dir).resolve()), + cwd=str(normalized_working_dir), ) ok = result.returncode == 0 stdout = result.stdout.strip() @@ -125,9 +185,14 @@ def _run_git( msg = f"git timed out after {timeout}s: {' '.join(cmd)}" logger.error(msg, exc_info=True) return False, "", msg - except FileNotFoundError: - logger.error("Git executable not found: %s", " ".join(cmd), exc_info=True) - return False, "", "git not found" + except FileNotFoundError as exc: + missing_target = getattr(exc, "filename", None) + if missing_target == "git": + logger.error("Git executable not found: %s", " ".join(cmd), exc_info=True) + return False, "", "git not found" + msg = f"working directory not found: {normalized_working_dir}" + logger.error("Git command failed before execution: %s (%s)", " ".join(cmd), msg, exc_info=True) + return False, "", msg except Exception as exc: logger.error("Unexpected git error running %s: %s", " ".join(cmd), exc, exc_info=True) return False, "", str(exc) @@ -154,7 +219,7 @@ def _init_shadow_repo(shadow_repo: Path, working_dir: str) -> Optional[str]: ) (shadow_repo / "HERMES_WORKDIR").write_text( - str(Path(working_dir).resolve()) + "\n", encoding="utf-8" + str(_normalize_path(working_dir)) + "\n", encoding="utf-8" ) logger.debug("Initialised checkpoint repo at %s for %s", shadow_repo, working_dir) @@ -229,7 +294,7 @@ class CheckpointManager: if not self._git_available: return False - abs_dir = str(Path(working_dir).resolve()) + abs_dir = str(_normalize_path(working_dir)) # Skip root, home, and other overly broad directories if abs_dir in ("/", str(Path.home())): @@ -254,7 +319,7 @@ class CheckpointManager: Returns a list of dicts with keys: hash, short_hash, timestamp, reason, files_changed, insertions, deletions. Most recent first. """ - abs_dir = str(Path(working_dir).resolve()) + abs_dir = str(_normalize_path(working_dir)) shadow = _shadow_repo_path(abs_dir) if not (shadow / "HEAD").exists(): @@ -311,7 +376,12 @@ class CheckpointManager: Returns dict with success, diff text, and stat summary. """ - abs_dir = str(Path(working_dir).resolve()) + # Validate commit_hash to prevent git argument injection + hash_err = _validate_commit_hash(commit_hash) + if hash_err: + return {"success": False, "error": hash_err} + + abs_dir = str(_normalize_path(working_dir)) shadow = _shadow_repo_path(abs_dir) if not (shadow / "HEAD").exists(): @@ -364,7 +434,19 @@ class CheckpointManager: Returns dict with success/error info. """ - abs_dir = str(Path(working_dir).resolve()) + # Validate commit_hash to prevent git argument injection + hash_err = _validate_commit_hash(commit_hash) + if hash_err: + return {"success": False, "error": hash_err} + + abs_dir = str(_normalize_path(working_dir)) + + # Validate file_path to prevent path traversal outside the working dir + if file_path: + path_err = _validate_file_path(file_path, abs_dir) + if path_err: + return {"success": False, "error": path_err} + shadow = _shadow_repo_path(abs_dir) if not (shadow / "HEAD").exists(): @@ -413,7 +495,7 @@ class CheckpointManager: (directory containing .git, pyproject.toml, package.json, etc.). Falls back to the file's parent directory. """ - path = Path(file_path).resolve() + path = _normalize_path(file_path) if path.is_dir(): candidate = path else: diff --git a/tools/code_execution_tool.py b/tools/code_execution_tool.py index 7837d70d6c..d6c561e2c3 100644 --- a/tools/code_execution_tool.py +++ b/tools/code_execution_tool.py @@ -924,8 +924,8 @@ def execute_code( # --- Local execution path (UDS) --- below this line is unchanged --- - # Import interrupt event from terminal_tool (cooperative cancellation) - from tools.terminal_tool import _interrupt_event + # Import per-thread interrupt check (cooperative cancellation) + from tools.interrupt import is_interrupted as _is_interrupted # Resolve config _cfg = _load_config() @@ -1114,7 +1114,7 @@ def execute_code( status = "success" while proc.poll() is None: - if _interrupt_event.is_set(): + if _is_interrupted(): _kill_process_group(proc) status = "interrupted" break diff --git a/tools/credential_files.py b/tools/credential_files.py index 6ddcd07708..7998321e63 100644 --- a/tools/credential_files.py +++ b/tools/credential_files.py @@ -80,20 +80,18 @@ def register_credential_file( # Resolve symlinks and normalise ``..`` before the containment check so # that traversal like ``../. ssh/id_rsa`` cannot escape HERMES_HOME. - try: - resolved = host_path.resolve() - hermes_home_resolved = hermes_home.resolve() - resolved.relative_to(hermes_home_resolved) # raises ValueError if outside - except ValueError: + from tools.path_security import validate_within_dir + + containment_error = validate_within_dir(host_path, hermes_home) + if containment_error: logger.warning( - "credential_files: rejected path traversal %r " - "(resolves to %s, outside HERMES_HOME %s)", + "credential_files: rejected path traversal %r (%s)", relative_path, - resolved, - hermes_home_resolved, + containment_error, ) return False + resolved = host_path.resolve() if not resolved.is_file(): logger.debug("credential_files: skipping %s (not found)", resolved) return False @@ -142,7 +140,8 @@ def _load_config_files() -> List[Dict[str, str]]: cfg = read_raw_config() cred_files = cfg.get("terminal", {}).get("credential_files") if isinstance(cred_files, list): - hermes_home_resolved = hermes_home.resolve() + from tools.path_security import validate_within_dir + for item in cred_files: if isinstance(item, str) and item.strip(): rel = item.strip() @@ -151,20 +150,19 @@ def _load_config_files() -> List[Dict[str, str]]: "credential_files: rejected absolute config path %r", rel, ) continue - host_path = (hermes_home / rel).resolve() - try: - host_path.relative_to(hermes_home_resolved) - except ValueError: + host_path = hermes_home / rel + containment_error = validate_within_dir(host_path, hermes_home) + if containment_error: logger.warning( - "credential_files: rejected config path traversal %r " - "(resolves to %s, outside HERMES_HOME %s)", - rel, host_path, hermes_home_resolved, + "credential_files: rejected config path traversal %r (%s)", + rel, containment_error, ) continue - if host_path.is_file(): + resolved_path = host_path.resolve() + if resolved_path.is_file(): container_path = f"/root/.hermes/{rel}" result.append({ - "host_path": str(host_path), + "host_path": str(resolved_path), "container_path": container_path, }) except Exception as e: diff --git a/tools/cronjob_tools.py b/tools/cronjob_tools.py index 3018b8731f..e2db933813 100644 --- a/tools/cronjob_tools.py +++ b/tools/cronjob_tools.py @@ -165,12 +165,12 @@ def _validate_cron_script_path(script: Optional[str]) -> Optional[str]: ) # Validate containment after resolution + from tools.path_security import validate_within_dir + scripts_dir = get_hermes_home() / "scripts" scripts_dir.mkdir(parents=True, exist_ok=True) - resolved = (scripts_dir / raw).resolve() - try: - resolved.relative_to(scripts_dir.resolve()) - except ValueError: + containment_error = validate_within_dir(scripts_dir / raw, scripts_dir) + if containment_error: return ( f"Script path escapes the scripts directory via traversal: {raw!r}" ) diff --git a/tools/interrupt.py b/tools/interrupt.py index e5c9b1e27e..9bc8b83ae4 100644 --- a/tools/interrupt.py +++ b/tools/interrupt.py @@ -1,8 +1,12 @@ -"""Shared interrupt signaling for all tools. +"""Per-thread interrupt signaling for all tools. -Provides a global threading.Event that any tool can check to determine -if the user has requested an interrupt. The agent's interrupt() method -sets this event, and tools poll it during long-running operations. +Provides thread-scoped interrupt tracking so that interrupting one agent +session does not kill tools running in other sessions. This is critical +in the gateway where multiple agents run concurrently in the same process. + +The agent stores its execution thread ID at the start of run_conversation() +and passes it to set_interrupt()/clear_interrupt(). Tools call +is_interrupted() which checks the CURRENT thread — no argument needed. Usage in tools: from tools.interrupt import is_interrupted @@ -12,17 +16,61 @@ Usage in tools: import threading -_interrupt_event = threading.Event() +# Set of thread idents that have been interrupted. +_interrupted_threads: set[int] = set() +_lock = threading.Lock() -def set_interrupt(active: bool) -> None: - """Called by the agent to signal or clear the interrupt.""" - if active: - _interrupt_event.set() - else: - _interrupt_event.clear() +def set_interrupt(active: bool, thread_id: int | None = None) -> None: + """Set or clear interrupt for a specific thread. + + Args: + active: True to signal interrupt, False to clear it. + thread_id: Target thread ident. When None, targets the + current thread (backward compat for CLI/tests). + """ + tid = thread_id if thread_id is not None else threading.current_thread().ident + with _lock: + if active: + _interrupted_threads.add(tid) + else: + _interrupted_threads.discard(tid) def is_interrupted() -> bool: - """Check if an interrupt has been requested. Safe to call from any thread.""" - return _interrupt_event.is_set() + """Check if an interrupt has been requested for the current thread. + + Safe to call from any thread — each thread only sees its own + interrupt state. + """ + tid = threading.current_thread().ident + with _lock: + return tid in _interrupted_threads + + +# --------------------------------------------------------------------------- +# Backward-compatible _interrupt_event proxy +# --------------------------------------------------------------------------- +# Some legacy call sites (code_execution_tool, process_registry, tests) +# import _interrupt_event directly and call .is_set() / .set() / .clear(). +# This shim maps those calls to the per-thread functions above so existing +# code keeps working while the underlying mechanism is thread-scoped. + +class _ThreadAwareEventProxy: + """Drop-in proxy that maps threading.Event methods to per-thread state.""" + + def is_set(self) -> bool: + return is_interrupted() + + def set(self) -> None: # noqa: A003 + set_interrupt(True) + + def clear(self) -> None: + set_interrupt(False) + + def wait(self, timeout: float | None = None) -> bool: + """Not truly supported — returns current state immediately.""" + return self.is_set() + + +_interrupt_event = _ThreadAwareEventProxy() diff --git a/tools/path_security.py b/tools/path_security.py new file mode 100644 index 0000000000..828011e5d7 --- /dev/null +++ b/tools/path_security.py @@ -0,0 +1,43 @@ +"""Shared path validation helpers for tool implementations. + +Extracts the ``resolve() + relative_to()`` and ``..`` traversal check +patterns previously duplicated across skill_manager_tool, skills_tool, +skills_hub, cronjob_tools, and credential_files. +""" + +import logging +from pathlib import Path +from typing import Optional + +logger = logging.getLogger(__name__) + + +def validate_within_dir(path: Path, root: Path) -> Optional[str]: + """Ensure *path* resolves to a location within *root*. + + Returns an error message string if validation fails, or ``None`` if the + path is safe. Uses ``Path.resolve()`` to follow symlinks and normalize + ``..`` components. + + Usage:: + + error = validate_within_dir(user_path, allowed_root) + if error: + return json.dumps({"error": error}) + """ + try: + resolved = path.resolve() + root_resolved = root.resolve() + resolved.relative_to(root_resolved) + except (ValueError, OSError) as exc: + return f"Path escapes allowed directory: {exc}" + return None + + +def has_traversal_component(path_str: str) -> bool: + """Return True if *path_str* contains ``..`` traversal components. + + Quick check for obvious traversal attempts before doing full resolution. + """ + parts = Path(path_str).parts + return ".." in parts diff --git a/tools/process_registry.py b/tools/process_registry.py index 18d0b1de22..0e8e04b3b0 100644 --- a/tools/process_registry.py +++ b/tools/process_registry.py @@ -96,6 +96,8 @@ class ProcessSession: # Watcher/notification metadata (persisted for crash recovery) watcher_platform: str = "" watcher_chat_id: str = "" + watcher_user_id: str = "" + watcher_user_name: str = "" watcher_thread_id: str = "" watcher_interval: int = 0 # 0 = no watcher configured notify_on_complete: bool = False # Queue agent notification on exit @@ -695,7 +697,7 @@ class ProcessRegistry: and output snapshot. """ from tools.ansi_strip import strip_ansi - from tools.terminal_tool import _interrupt_event + from tools.interrupt import is_interrupted as _is_interrupted try: default_timeout = int(os.getenv("TERMINAL_TIMEOUT", "180")) @@ -732,7 +734,7 @@ class ProcessRegistry: result["timeout_note"] = timeout_note return result - if _interrupt_event.is_set(): + if _is_interrupted(): result = { "status": "interrupted", "output": strip_ansi(session.output_buffer[-1000:]), @@ -981,6 +983,8 @@ class ProcessRegistry: "session_key": s.session_key, "watcher_platform": s.watcher_platform, "watcher_chat_id": s.watcher_chat_id, + "watcher_user_id": s.watcher_user_id, + "watcher_user_name": s.watcher_user_name, "watcher_thread_id": s.watcher_thread_id, "watcher_interval": s.watcher_interval, "notify_on_complete": s.notify_on_complete, @@ -1042,6 +1046,8 @@ class ProcessRegistry: detached=True, # Can't read output, but can report status + kill watcher_platform=entry.get("watcher_platform", ""), watcher_chat_id=entry.get("watcher_chat_id", ""), + watcher_user_id=entry.get("watcher_user_id", ""), + watcher_user_name=entry.get("watcher_user_name", ""), watcher_thread_id=entry.get("watcher_thread_id", ""), watcher_interval=entry.get("watcher_interval", 0), notify_on_complete=entry.get("notify_on_complete", False), @@ -1060,6 +1066,8 @@ class ProcessRegistry: "session_key": session.session_key, "platform": session.watcher_platform, "chat_id": session.watcher_chat_id, + "user_id": session.watcher_user_id, + "user_name": session.watcher_user_name, "thread_id": session.watcher_thread_id, "notify_on_complete": session.notify_on_complete, }) diff --git a/tools/skill_manager_tool.py b/tools/skill_manager_tool.py index 2273d75fa6..2b2625fa0d 100644 --- a/tools/skill_manager_tool.py +++ b/tools/skill_manager_tool.py @@ -219,13 +219,15 @@ def _validate_file_path(file_path: str) -> Optional[str]: Validate a file path for write_file/remove_file. Must be under an allowed subdirectory and not escape the skill dir. """ + from tools.path_security import has_traversal_component + if not file_path: return "file_path is required." normalized = Path(file_path) # Prevent path traversal - if ".." in normalized.parts: + if has_traversal_component(file_path): return "Path traversal ('..') is not allowed." # Must be under an allowed subdirectory @@ -242,15 +244,12 @@ def _validate_file_path(file_path: str) -> Optional[str]: def _resolve_skill_target(skill_dir: Path, file_path: str) -> Tuple[Optional[Path], Optional[str]]: """Resolve a supporting-file path and ensure it stays within the skill directory.""" + from tools.path_security import validate_within_dir + target = skill_dir / file_path - try: - resolved = target.resolve(strict=False) - skill_dir_resolved = skill_dir.resolve() - resolved.relative_to(skill_dir_resolved) - except ValueError: - return None, "Path escapes skill directory boundary." - except OSError as e: - return None, f"Invalid file path '{file_path}': {e}" + error = validate_within_dir(target, skill_dir) + if error: + return None, error return target, None diff --git a/tools/skills_tool.py b/tools/skills_tool.py index 085ed00550..94b7c235b7 100644 --- a/tools/skills_tool.py +++ b/tools/skills_tool.py @@ -447,17 +447,8 @@ def _get_category_from_path(skill_path: Path) -> Optional[str]: return None -def _estimate_tokens(content: str) -> int: - """ - Rough token estimate (4 chars per token average). - - Args: - content: Text content - - Returns: - Estimated token count - """ - return len(content) // 4 +# Token estimation — use the shared implementation from model_metadata. +from agent.model_metadata import estimate_tokens_rough as _estimate_tokens def _parse_tags(tags_value) -> List[str]: @@ -947,9 +938,10 @@ def skill_view(name: str, file_path: str = None, task_id: str = None) -> str: # If a specific file path is requested, read that instead if file_path and skill_dir: + from tools.path_security import validate_within_dir, has_traversal_component + # Security: Prevent path traversal attacks - normalized_path = Path(file_path) - if ".." in normalized_path.parts: + if has_traversal_component(file_path): return json.dumps( { "success": False, @@ -962,24 +954,13 @@ def skill_view(name: str, file_path: str = None, task_id: str = None) -> str: target_file = skill_dir / file_path # Security: Verify resolved path is still within skill directory - try: - resolved = target_file.resolve() - skill_dir_resolved = skill_dir.resolve() - if not resolved.is_relative_to(skill_dir_resolved): - return json.dumps( - { - "success": False, - "error": "Path escapes skill directory boundary.", - "hint": "Use a relative path within the skill directory", - }, - ensure_ascii=False, - ) - except (OSError, ValueError): + traversal_error = validate_within_dir(target_file, skill_dir) + if traversal_error: return json.dumps( { "success": False, - "error": f"Invalid file path: '{file_path}'", - "hint": "Use a valid relative path within the skill directory", + "error": traversal_error, + "hint": "Use a relative path within the skill directory", }, ensure_ascii=False, ) diff --git a/tools/terminal_tool.py b/tools/terminal_tool.py index 859f0f1f36..f0cbff0f4c 100644 --- a/tools/terminal_tool.py +++ b/tools/terminal_tool.py @@ -1427,8 +1427,12 @@ def terminal_tool( if _gw_platform and not check_interval: _gw_chat_id = _gse("HERMES_SESSION_CHAT_ID", "") _gw_thread_id = _gse("HERMES_SESSION_THREAD_ID", "") + _gw_user_id = _gse("HERMES_SESSION_USER_ID", "") + _gw_user_name = _gse("HERMES_SESSION_USER_NAME", "") proc_session.watcher_platform = _gw_platform proc_session.watcher_chat_id = _gw_chat_id + proc_session.watcher_user_id = _gw_user_id + proc_session.watcher_user_name = _gw_user_name proc_session.watcher_thread_id = _gw_thread_id proc_session.watcher_interval = 5 process_registry.pending_watchers.append({ @@ -1437,6 +1441,8 @@ def terminal_tool( "session_key": session_key, "platform": _gw_platform, "chat_id": _gw_chat_id, + "user_id": _gw_user_id, + "user_name": _gw_user_name, "thread_id": _gw_thread_id, "notify_on_complete": True, }) @@ -1457,10 +1463,14 @@ def terminal_tool( watcher_platform = _gse2("HERMES_SESSION_PLATFORM", "") watcher_chat_id = _gse2("HERMES_SESSION_CHAT_ID", "") watcher_thread_id = _gse2("HERMES_SESSION_THREAD_ID", "") + watcher_user_id = _gse2("HERMES_SESSION_USER_ID", "") + watcher_user_name = _gse2("HERMES_SESSION_USER_NAME", "") # Store on session for checkpoint persistence proc_session.watcher_platform = watcher_platform proc_session.watcher_chat_id = watcher_chat_id + proc_session.watcher_user_id = watcher_user_id + proc_session.watcher_user_name = watcher_user_name proc_session.watcher_thread_id = watcher_thread_id proc_session.watcher_interval = effective_interval @@ -1470,6 +1480,8 @@ def terminal_tool( "session_key": session_key, "platform": watcher_platform, "chat_id": watcher_chat_id, + "user_id": watcher_user_id, + "user_name": watcher_user_name, "thread_id": watcher_thread_id, }) diff --git a/tools/tool_result_storage.py b/tools/tool_result_storage.py index a8ec5440bc..4342264482 100644 --- a/tools/tool_result_storage.py +++ b/tools/tool_result_storage.py @@ -24,6 +24,7 @@ Defense against context-window overflow operates at three levels: import logging import os +import shlex import uuid from tools.budget_config import ( @@ -79,7 +80,7 @@ def _write_to_sandbox(content: str, remote_path: str, env) -> bool: marker = _heredoc_marker(content) storage_dir = os.path.dirname(remote_path) cmd = ( - f"mkdir -p {storage_dir} && cat > {remote_path} << '{marker}'\n" + f"mkdir -p {shlex.quote(storage_dir)} && cat > {shlex.quote(remote_path)} << '{marker}'\n" f"{content}\n" f"{marker}" ) diff --git a/tools/vision_tools.py b/tools/vision_tools.py index 8242c78837..91ef672f48 100644 --- a/tools/vision_tools.py +++ b/tools/vision_tools.py @@ -357,8 +357,19 @@ def _resize_image_for_vision(image_path: Path, mime_type: Optional[str] = None, for attempt in range(5): if attempt > 0: - new_w = max(img.width // 2, 64) - new_h = max(img.height // 2, 64) + # Proportional scaling: halve the longer side and scale the + # shorter side to preserve aspect ratio (min dimension 64). + scale = 0.5 + new_w = max(int(img.width * scale), 64) + new_h = max(int(img.height * scale), 64) + # Re-derive the scale from whichever dimension hit the floor + # so both axes shrink by the same factor. + if new_w == 64 and img.width > 0: + effective_scale = 64 / img.width + new_h = max(int(img.height * effective_scale), 64) + elif new_h == 64 and img.height > 0: + effective_scale = 64 / img.height + new_w = max(int(img.width * effective_scale), 64) # Stop if dimensions can't shrink further if (new_w, new_h) == prev_dims: break diff --git a/ui-tui/bun.lock b/ui-tui/bun.lock deleted file mode 100644 index c93554b990..0000000000 --- a/ui-tui/bun.lock +++ /dev/null @@ -1,756 +0,0 @@ -{ - "lockfileVersion": 1, - "configVersion": 0, - "workspaces": { - "": { - "name": "hermes-tui", - "dependencies": { - "ink": "^6.8.0", - "ink-text-input": "^6.0.0", - "react": "^19.2.4", - "unicode-animations": "^1.0.3", - }, - "devDependencies": { - "@eslint/js": "^9", - "@types/node": "^25.5.0", - "@types/react": "^19.2.14", - "@typescript-eslint/eslint-plugin": "^8", - "@typescript-eslint/parser": "^8", - "eslint": "^9", - "eslint-plugin-perfectionist": "^5", - "eslint-plugin-react": "^7", - "eslint-plugin-react-hooks": "^7", - "eslint-plugin-unused-imports": "^4", - "globals": "^16", - "prettier": "^3", - "tsx": "^4.19.0", - "typescript": "^5.7.0", - }, - }, - }, - "packages": { - "@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.2.5", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-3NX/MpTdroi0aKz134A6RC2Gb2iXVECN4QaAXnvCIxxIm3C3AVB1mkUe8NaaiyvOpDfsrqWhYtj+Q6a62RrTsw=="], - - "@babel/code-frame": ["@babel/code-frame@7.29.0", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw=="], - - "@babel/compat-data": ["@babel/compat-data@7.29.0", "", {}, "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg=="], - - "@babel/core": ["@babel/core@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-compilation-targets": "^7.28.6", "@babel/helper-module-transforms": "^7.28.6", "@babel/helpers": "^7.28.6", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/traverse": "^7.29.0", "@babel/types": "^7.29.0", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA=="], - - "@babel/generator": ["@babel/generator@7.29.1", "", { "dependencies": { "@babel/parser": "^7.29.0", "@babel/types": "^7.29.0", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw=="], - - "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.28.6", "", { "dependencies": { "@babel/compat-data": "^7.28.6", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA=="], - - "@babel/helper-globals": ["@babel/helper-globals@7.28.0", "", {}, "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw=="], - - "@babel/helper-module-imports": ["@babel/helper-module-imports@7.28.6", "", { "dependencies": { "@babel/traverse": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw=="], - - "@babel/helper-module-transforms": ["@babel/helper-module-transforms@7.28.6", "", { "dependencies": { "@babel/helper-module-imports": "^7.28.6", "@babel/helper-validator-identifier": "^7.28.5", "@babel/traverse": "^7.28.6" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA=="], - - "@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="], - - "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.28.5", "", {}, "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q=="], - - "@babel/helper-validator-option": ["@babel/helper-validator-option@7.27.1", "", {}, "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg=="], - - "@babel/helpers": ["@babel/helpers@7.29.2", "", { "dependencies": { "@babel/template": "^7.28.6", "@babel/types": "^7.29.0" } }, "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw=="], - - "@babel/parser": ["@babel/parser@7.29.2", "", { "dependencies": { "@babel/types": "^7.29.0" }, "bin": { "parser": "bin/babel-parser.js" } }, "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA=="], - - "@babel/template": ["@babel/template@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ=="], - - "@babel/traverse": ["@babel/traverse@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/types": "^7.29.0", "debug": "^4.3.1" } }, "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA=="], - - "@babel/types": ["@babel/types@7.29.0", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A=="], - - "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.5", "", { "os": "aix", "cpu": "ppc64" }, "sha512-nGsF/4C7uzUj+Nj/4J+Zt0bYQ6bz33Phz8Lb2N80Mti1HjGclTJdXZ+9APC4kLvONbjxN1zfvYNd8FEcbBK/MQ=="], - - "@esbuild/android-arm": ["@esbuild/android-arm@0.27.5", "", { "os": "android", "cpu": "arm" }, "sha512-Cv781jd0Rfj/paoNrul1/r4G0HLvuFKYh7C9uHZ2Pl8YXstzvCyyeWENTFR9qFnRzNMCjXmsulZuvosDg10Mog=="], - - "@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.5", "", { "os": "android", "cpu": "arm64" }, "sha512-Oeghq+XFgh1pUGd1YKs4DDoxzxkoUkvko+T/IVKwlghKLvvjbGFB3ek8VEDBmNvqhwuL0CQS3cExdzpmUyIrgA=="], - - "@esbuild/android-x64": ["@esbuild/android-x64@0.27.5", "", { "os": "android", "cpu": "x64" }, "sha512-nQD7lspbzerlmtNOxYMFAGmhxgzn8Z7m9jgFkh6kpkjsAhZee1w8tJW3ZlW+N9iRePz0oPUDrYrXidCPSImD0Q=="], - - "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.5", "", { "os": "darwin", "cpu": "arm64" }, "sha512-I+Ya/MgC6rr8oRWGRDF3BXDfP8K1BVUggHqN6VI2lUZLdDi1IM1v2cy0e3lCPbP+pVcK3Tv8cgUhHse1kaNZZw=="], - - "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.5", "", { "os": "darwin", "cpu": "x64" }, "sha512-MCjQUtC8wWJn/pIPM7vQaO69BFgwPD1jriEdqwTCKzWjGgkMbcg+M5HzrOhPhuYe1AJjXlHmD142KQf+jnYj8A=="], - - "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.5", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-X6xVS+goSH0UelYXnuf4GHLwpOdc8rgK/zai+dKzBMnncw7BTQIwquOodE7EKvY2UVUetSqyAfyZC1D+oqLQtg=="], - - "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.5", "", { "os": "freebsd", "cpu": "x64" }, "sha512-233X1FGo3a8x1ekLB6XT69LfZ83vqz+9z3TSEQCTYfMNY880A97nr81KbPcAMl9rmOFp11wO0dP+eB18KU/Ucg=="], - - "@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.5", "", { "os": "linux", "cpu": "arm" }, "sha512-0wkVrYHG4sdCCN/bcwQ7yYMXACkaHc3UFeaEOwSVW6e5RycMageYAFv+JS2bKLwHyeKVUvtoVH+5/RHq0fgeFw=="], - - "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.5", "", { "os": "linux", "cpu": "arm64" }, "sha512-euKkilsNOv7x/M1NKsx5znyprbpsRFIzTV6lWziqJch7yWYayfLtZzDxDTl+LSQDJYAjd9TVb/Kt5UKIrj2e4A=="], - - "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.5", "", { "os": "linux", "cpu": "ia32" }, "sha512-hVRQX4+P3MS36NxOy24v/Cdsimy/5HYePw+tmPqnNN1fxV0bPrFWR6TMqwXPwoTM2VzbkA+4lbHWUKDd5ZDA/w=="], - - "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.5", "", { "os": "linux", "cpu": "none" }, "sha512-mKqqRuOPALI8nDzhOBmIS0INvZOOFGGg5n1osGIXAx8oersceEbKd4t1ACNTHM3sJBXGFAlEgqM+svzjPot+ZQ=="], - - "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.5", "", { "os": "linux", "cpu": "none" }, "sha512-EE/QXH9IyaAj1qeuIV5+/GZkBTipgGO782Ff7Um3vPS9cvLhJJeATy4Ggxikz2inZ46KByamMn6GqtqyVjhenA=="], - - "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.5", "", { "os": "linux", "cpu": "ppc64" }, "sha512-0V2iF1RGxBf1b7/BjurA5jfkl7PtySjom1r6xOK2q9KWw/XCpAdtB6KNMO+9xx69yYfSCRR9FE0TyKfHA2eQMw=="], - - "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.5", "", { "os": "linux", "cpu": "none" }, "sha512-rYxThBx6G9HN6tFNuvB/vykeLi4VDsm5hE5pVwzqbAjZEARQrWu3noZSfbEnPZ/CRXP3271GyFk/49up2W190g=="], - - "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.5", "", { "os": "linux", "cpu": "s390x" }, "sha512-uEP2q/4qgd8goEUc4QIdU/1P2NmEtZ/zX5u3OpLlCGhJIuBIv0s0wr7TB2nBrd3/A5XIdEkkS5ZLF0ULuvaaYQ=="], - - "@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.5", "", { "os": "linux", "cpu": "x64" }, "sha512-+Gq47Wqq6PLOOZuBzVSII2//9yyHNKZLuwfzCemqexqOQCSz0zy0O26kIzyp9EMNMK+nZ0tFHBZrCeVUuMs/ew=="], - - "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.5", "", { "os": "none", "cpu": "arm64" }, "sha512-3F/5EG8VHfN/I+W5cO1/SV2H9Q/5r7vcHabMnBqhHK2lTWOh3F8vixNzo8lqxrlmBtZVFpW8pmITHnq54+Tq4g=="], - - "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.5", "", { "os": "none", "cpu": "x64" }, "sha512-28t+Sj3CPN8vkMOlZotOmDgilQwVvxWZl7b8rxpn73Tt/gCnvrHxQUMng4uu3itdFvrtba/1nHejvxqz8xgEMA=="], - - "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.5", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-Doz/hKtiuVAi9hMsBMpwBANhIZc8l238U2Onko3t2xUp8xtM0ZKdDYHMnm/qPFVthY8KtxkXaocwmMh6VolzMA=="], - - "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.5", "", { "os": "openbsd", "cpu": "x64" }, "sha512-WfGVaa1oz5A7+ZFPkERIbIhKT4olvGl1tyzTRaB5yoZRLqC0KwaO95FeZtOdQj/oKkjW57KcVF944m62/0GYtA=="], - - "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.5", "", { "os": "none", "cpu": "arm64" }, "sha512-Xh+VRuh6OMh3uJ0JkCjI57l+DVe7VRGBYymen8rFPnTVgATBwA6nmToxM2OwTlSvrnWpPKkrQUj93+K9huYC6A=="], - - "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.5", "", { "os": "sunos", "cpu": "x64" }, "sha512-aC1gpJkkaUADHuAdQfuVTnqVUTLqqUNhAvEwHwVWcnVVZvNlDPGA0UveZsfXJJ9T6k9Po4eHi3c02gbdwO3g6w=="], - - "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.5", "", { "os": "win32", "cpu": "arm64" }, "sha512-0UNx2aavV0fk6UpZcwXFLztA2r/k9jTUa7OW7SAea1VYUhkug99MW1uZeXEnPn5+cHOd0n8myQay6TlFnBR07w=="], - - "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.5", "", { "os": "win32", "cpu": "ia32" }, "sha512-5nlJ3AeJWCTSzR7AEqVjT/faWyqKU86kCi1lLmxVqmNR+j4HrYdns+eTGjS/vmrzCIe8inGQckUadvS0+JkKdQ=="], - - "@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.5", "", { "os": "win32", "cpu": "x64" }, "sha512-PWypQR+d4FLfkhBIV+/kHsUELAnMpx1bRvvsn3p+/sAERbnCzFrtDRG2Xw5n+2zPxBK2+iaP+vetsRl4Ti7WgA=="], - - "@eslint-community/eslint-utils": ["@eslint-community/eslint-utils@4.9.1", "", { "dependencies": { "eslint-visitor-keys": "^3.4.3" }, "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ=="], - - "@eslint-community/regexpp": ["@eslint-community/regexpp@4.12.2", "", {}, "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew=="], - - "@eslint/config-array": ["@eslint/config-array@0.21.2", "", { "dependencies": { "@eslint/object-schema": "^2.1.7", "debug": "^4.3.1", "minimatch": "^3.1.5" } }, "sha512-nJl2KGTlrf9GjLimgIru+V/mzgSK0ABCDQRvxw5BjURL7WfH5uoWmizbH7QB6MmnMBd8cIC9uceWnezL1VZWWw=="], - - "@eslint/config-helpers": ["@eslint/config-helpers@0.4.2", "", { "dependencies": { "@eslint/core": "^0.17.0" } }, "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw=="], - - "@eslint/core": ["@eslint/core@0.17.0", "", { "dependencies": { "@types/json-schema": "^7.0.15" } }, "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ=="], - - "@eslint/eslintrc": ["@eslint/eslintrc@3.3.5", "", { "dependencies": { "ajv": "^6.14.0", "debug": "^4.3.2", "espree": "^10.0.1", "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", "js-yaml": "^4.1.1", "minimatch": "^3.1.5", "strip-json-comments": "^3.1.1" } }, "sha512-4IlJx0X0qftVsN5E+/vGujTRIFtwuLbNsVUe7TO6zYPDR1O6nFwvwhIKEKSrl6dZchmYBITazxKoUYOjdtjlRg=="], - - "@eslint/js": ["@eslint/js@9.39.4", "", {}, "sha512-nE7DEIchvtiFTwBw4Lfbu59PG+kCofhjsKaCWzxTpt4lfRjRMqG6uMBzKXuEcyXhOHoUp9riAm7/aWYGhXZ9cw=="], - - "@eslint/object-schema": ["@eslint/object-schema@2.1.7", "", {}, "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA=="], - - "@eslint/plugin-kit": ["@eslint/plugin-kit@0.4.1", "", { "dependencies": { "@eslint/core": "^0.17.0", "levn": "^0.4.1" } }, "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA=="], - - "@humanfs/core": ["@humanfs/core@0.19.1", "", {}, "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA=="], - - "@humanfs/node": ["@humanfs/node@0.16.7", "", { "dependencies": { "@humanfs/core": "^0.19.1", "@humanwhocodes/retry": "^0.4.0" } }, "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ=="], - - "@humanwhocodes/module-importer": ["@humanwhocodes/module-importer@1.0.1", "", {}, "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA=="], - - "@humanwhocodes/retry": ["@humanwhocodes/retry@0.4.3", "", {}, "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ=="], - - "@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.13", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA=="], - - "@jridgewell/remapping": ["@jridgewell/remapping@2.3.5", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ=="], - - "@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="], - - "@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.5", "", {}, "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og=="], - - "@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.31", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw=="], - - "@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="], - - "@types/json-schema": ["@types/json-schema@7.0.15", "", {}, "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA=="], - - "@types/node": ["@types/node@25.5.0", "", { "dependencies": { "undici-types": "~7.18.0" } }, "sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw=="], - - "@types/react": ["@types/react@19.2.14", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w=="], - - "@typescript-eslint/eslint-plugin": ["@typescript-eslint/eslint-plugin@8.58.0", "", { "dependencies": { "@eslint-community/regexpp": "^4.12.2", "@typescript-eslint/scope-manager": "8.58.0", "@typescript-eslint/type-utils": "8.58.0", "@typescript-eslint/utils": "8.58.0", "@typescript-eslint/visitor-keys": "8.58.0", "ignore": "^7.0.5", "natural-compare": "^1.4.0", "ts-api-utils": "^2.5.0" }, "peerDependencies": { "@typescript-eslint/parser": "^8.58.0", "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.1.0" } }, "sha512-RLkVSiNuUP1C2ROIWfqX+YcUfLaSnxGE/8M+Y57lopVwg9VTYYfhuz15Yf1IzCKgZj6/rIbYTmJCUSqr76r0Wg=="], - - "@typescript-eslint/parser": ["@typescript-eslint/parser@8.58.0", "", { "dependencies": { "@typescript-eslint/scope-manager": "8.58.0", "@typescript-eslint/types": "8.58.0", "@typescript-eslint/typescript-estree": "8.58.0", "@typescript-eslint/visitor-keys": "8.58.0", "debug": "^4.4.3" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.1.0" } }, "sha512-rLoGZIf9afaRBYsPUMtvkDWykwXwUPL60HebR4JgTI8mxfFe2cQTu3AGitANp4b9B2QlVru6WzjgB2IzJKiCSA=="], - - "@typescript-eslint/project-service": ["@typescript-eslint/project-service@8.58.0", "", { "dependencies": { "@typescript-eslint/tsconfig-utils": "^8.58.0", "@typescript-eslint/types": "^8.58.0", "debug": "^4.4.3" }, "peerDependencies": { "typescript": ">=4.8.4 <6.1.0" } }, "sha512-8Q/wBPWLQP1j16NxoPNIKpDZFMaxl7yWIoqXWYeWO+Bbd2mjgvoF0dxP2jKZg5+x49rgKdf7Ck473M8PC3V9lg=="], - - "@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.58.0", "", { "dependencies": { "@typescript-eslint/types": "8.58.0", "@typescript-eslint/visitor-keys": "8.58.0" } }, "sha512-W1Lur1oF50FxSnNdGp3Vs6P+yBRSmZiw4IIjEeYxd8UQJwhUF0gDgDD/W/Tgmh73mxgEU3qX0Bzdl/NGuSPEpQ=="], - - "@typescript-eslint/tsconfig-utils": ["@typescript-eslint/tsconfig-utils@8.58.0", "", { "peerDependencies": { "typescript": ">=4.8.4 <6.1.0" } }, "sha512-doNSZEVJsWEu4htiVC+PR6NpM+pa+a4ClH9INRWOWCUzMst/VA9c4gXq92F8GUD1rwhNvRLkgjfYtFXegXQF7A=="], - - "@typescript-eslint/type-utils": ["@typescript-eslint/type-utils@8.58.0", "", { "dependencies": { "@typescript-eslint/types": "8.58.0", "@typescript-eslint/typescript-estree": "8.58.0", "@typescript-eslint/utils": "8.58.0", "debug": "^4.4.3", "ts-api-utils": "^2.5.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.1.0" } }, "sha512-aGsCQImkDIqMyx1u4PrVlbi/krmDsQUs4zAcCV6M7yPcPev+RqVlndsJy9kJ8TLihW9TZ0kbDAzctpLn5o+lOg=="], - - "@typescript-eslint/types": ["@typescript-eslint/types@8.58.0", "", {}, "sha512-O9CjxypDT89fbHxRfETNoAnHj/i6IpRK0CvbVN3qibxlLdo5p5hcLmUuCCrHMpxiWSwKyI8mCP7qRNYuOJ0Uww=="], - - "@typescript-eslint/typescript-estree": ["@typescript-eslint/typescript-estree@8.58.0", "", { "dependencies": { "@typescript-eslint/project-service": "8.58.0", "@typescript-eslint/tsconfig-utils": "8.58.0", "@typescript-eslint/types": "8.58.0", "@typescript-eslint/visitor-keys": "8.58.0", "debug": "^4.4.3", "minimatch": "^10.2.2", "semver": "^7.7.3", "tinyglobby": "^0.2.15", "ts-api-utils": "^2.5.0" }, "peerDependencies": { "typescript": ">=4.8.4 <6.1.0" } }, "sha512-7vv5UWbHqew/dvs+D3e1RvLv1v2eeZ9txRHPnEEBUgSNLx5ghdzjHa0sgLWYVKssH+lYmV0JaWdoubo0ncGYLA=="], - - "@typescript-eslint/utils": ["@typescript-eslint/utils@8.58.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.9.1", "@typescript-eslint/scope-manager": "8.58.0", "@typescript-eslint/types": "8.58.0", "@typescript-eslint/typescript-estree": "8.58.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.1.0" } }, "sha512-RfeSqcFeHMHlAWzt4TBjWOAtoW9lnsAGiP3GbaX9uVgTYYrMbVnGONEfUCiSss+xMHFl+eHZiipmA8WkQ7FuNA=="], - - "@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@8.58.0", "", { "dependencies": { "@typescript-eslint/types": "8.58.0", "eslint-visitor-keys": "^5.0.0" } }, "sha512-XJ9UD9+bbDo4a4epraTwG3TsNPeiB9aShrUneAVXy8q4LuwowN+qu89/6ByLMINqvIMeI9H9hOHQtg/ijrYXzQ=="], - - "acorn": ["acorn@8.16.0", "", { "bin": "bin/acorn" }, "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw=="], - - "acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="], - - "ajv": ["ajv@6.14.0", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw=="], - - "ansi-escapes": ["ansi-escapes@7.3.0", "", { "dependencies": { "environment": "^1.0.0" } }, "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg=="], - - "ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], - - "ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], - - "argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="], - - "array-buffer-byte-length": ["array-buffer-byte-length@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "is-array-buffer": "^3.0.5" } }, "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw=="], - - "array-includes": ["array-includes@3.1.9", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.4", "define-properties": "^1.2.1", "es-abstract": "^1.24.0", "es-object-atoms": "^1.1.1", "get-intrinsic": "^1.3.0", "is-string": "^1.1.1", "math-intrinsics": "^1.1.0" } }, "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ=="], - - "array.prototype.findlast": ["array.prototype.findlast@1.2.5", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-abstract": "^1.23.2", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "es-shim-unscopables": "^1.0.2" } }, "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ=="], - - "array.prototype.flat": ["array.prototype.flat@1.3.3", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-shim-unscopables": "^1.0.2" } }, "sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg=="], - - "array.prototype.flatmap": ["array.prototype.flatmap@1.3.3", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-shim-unscopables": "^1.0.2" } }, "sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg=="], - - "array.prototype.tosorted": ["array.prototype.tosorted@1.1.4", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-abstract": "^1.23.3", "es-errors": "^1.3.0", "es-shim-unscopables": "^1.0.2" } }, "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA=="], - - "arraybuffer.prototype.slice": ["arraybuffer.prototype.slice@1.0.4", "", { "dependencies": { "array-buffer-byte-length": "^1.0.1", "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "is-array-buffer": "^3.0.4" } }, "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ=="], - - "async-function": ["async-function@1.0.0", "", {}, "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA=="], - - "auto-bind": ["auto-bind@5.0.1", "", {}, "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg=="], - - "available-typed-arrays": ["available-typed-arrays@1.0.7", "", { "dependencies": { "possible-typed-array-names": "^1.0.0" } }, "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ=="], - - "balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], - - "baseline-browser-mapping": ["baseline-browser-mapping@2.10.13", "", { "bin": "dist/cli.cjs" }, "sha512-BL2sTuHOdy0YT1lYieUxTw/QMtPBC3pmlJC6xk8BBYVv6vcw3SGdKemQ+Xsx9ik2F/lYDO9tqsFQH1r9PFuHKw=="], - - "brace-expansion": ["brace-expansion@1.1.13", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w=="], - - "browserslist": ["browserslist@4.28.2", "", { "dependencies": { "baseline-browser-mapping": "^2.10.12", "caniuse-lite": "^1.0.30001782", "electron-to-chromium": "^1.5.328", "node-releases": "^2.0.36", "update-browserslist-db": "^1.2.3" }, "bin": "cli.js" }, "sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg=="], - - "call-bind": ["call-bind@1.0.8", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.0", "es-define-property": "^1.0.0", "get-intrinsic": "^1.2.4", "set-function-length": "^1.2.2" } }, "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww=="], - - "call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="], - - "call-bound": ["call-bound@1.0.4", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg=="], - - "callsites": ["callsites@3.1.0", "", {}, "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="], - - "caniuse-lite": ["caniuse-lite@1.0.30001784", "", {}, "sha512-WU346nBTklUV9YfUl60fqRbU5ZqyXlqvo1SgigE1OAXK5bFL8LL9q1K7aap3N739l4BvNqnkm3YrGHiY9sfUQw=="], - - "chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "cli-boxes": ["cli-boxes@3.0.0", "", {}, "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g=="], - - "cli-cursor": ["cli-cursor@4.0.0", "", { "dependencies": { "restore-cursor": "^4.0.0" } }, "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg=="], - - "cli-truncate": ["cli-truncate@5.2.0", "", { "dependencies": { "slice-ansi": "^8.0.0", "string-width": "^8.2.0" } }, "sha512-xRwvIOMGrfOAnM1JYtqQImuaNtDEv9v6oIYAs4LIHwTiKee8uwvIi363igssOC0O5U04i4AlENs79LQLu9tEMw=="], - - "code-excerpt": ["code-excerpt@4.0.0", "", { "dependencies": { "convert-to-spaces": "^2.0.1" } }, "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA=="], - - "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], - - "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], - - "concat-map": ["concat-map@0.0.1", "", {}, "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="], - - "convert-source-map": ["convert-source-map@2.0.0", "", {}, "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="], - - "convert-to-spaces": ["convert-to-spaces@2.0.1", "", {}, "sha512-rcQ1bsQO9799wq24uE5AM2tAILy4gXGIK/njFWcVQkGNZ96edlpY+A7bjwvzjYvLDyzmG1MmMLZhpcsb+klNMQ=="], - - "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], - - "csstype": ["csstype@3.2.3", "", {}, "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ=="], - - "data-view-buffer": ["data-view-buffer@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" } }, "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ=="], - - "data-view-byte-length": ["data-view-byte-length@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" } }, "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ=="], - - "data-view-byte-offset": ["data-view-byte-offset@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-data-view": "^1.0.1" } }, "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ=="], - - "debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="], - - "deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="], - - "define-data-property": ["define-data-property@1.1.4", "", { "dependencies": { "es-define-property": "^1.0.0", "es-errors": "^1.3.0", "gopd": "^1.0.1" } }, "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A=="], - - "define-properties": ["define-properties@1.2.1", "", { "dependencies": { "define-data-property": "^1.0.1", "has-property-descriptors": "^1.0.0", "object-keys": "^1.1.1" } }, "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg=="], - - "doctrine": ["doctrine@2.1.0", "", { "dependencies": { "esutils": "^2.0.2" } }, "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw=="], - - "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], - - "electron-to-chromium": ["electron-to-chromium@1.5.331", "", {}, "sha512-IbxXrsTlD3hRodkLnbxAPP4OuJYdWCeM3IOdT+CpcMoIwIoDfCmRpEtSPfwBXxVkg9xmBeY7Lz2Eo2TDn/HC3Q=="], - - "emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], - - "environment": ["environment@1.1.0", "", {}, "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q=="], - - "es-abstract": ["es-abstract@1.24.1", "", { "dependencies": { "array-buffer-byte-length": "^1.0.2", "arraybuffer.prototype.slice": "^1.0.4", "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "call-bound": "^1.0.4", "data-view-buffer": "^1.0.2", "data-view-byte-length": "^1.0.2", "data-view-byte-offset": "^1.0.1", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "es-set-tostringtag": "^2.1.0", "es-to-primitive": "^1.3.0", "function.prototype.name": "^1.1.8", "get-intrinsic": "^1.3.0", "get-proto": "^1.0.1", "get-symbol-description": "^1.1.0", "globalthis": "^1.0.4", "gopd": "^1.2.0", "has-property-descriptors": "^1.0.2", "has-proto": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "internal-slot": "^1.1.0", "is-array-buffer": "^3.0.5", "is-callable": "^1.2.7", "is-data-view": "^1.0.2", "is-negative-zero": "^2.0.3", "is-regex": "^1.2.1", "is-set": "^2.0.3", "is-shared-array-buffer": "^1.0.4", "is-string": "^1.1.1", "is-typed-array": "^1.1.15", "is-weakref": "^1.1.1", "math-intrinsics": "^1.1.0", "object-inspect": "^1.13.4", "object-keys": "^1.1.1", "object.assign": "^4.1.7", "own-keys": "^1.0.1", "regexp.prototype.flags": "^1.5.4", "safe-array-concat": "^1.1.3", "safe-push-apply": "^1.0.0", "safe-regex-test": "^1.1.0", "set-proto": "^1.0.0", "stop-iteration-iterator": "^1.1.0", "string.prototype.trim": "^1.2.10", "string.prototype.trimend": "^1.0.9", "string.prototype.trimstart": "^1.0.8", "typed-array-buffer": "^1.0.3", "typed-array-byte-length": "^1.0.3", "typed-array-byte-offset": "^1.0.4", "typed-array-length": "^1.0.7", "unbox-primitive": "^1.1.0", "which-typed-array": "^1.1.19" } }, "sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw=="], - - "es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="], - - "es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="], - - "es-iterator-helpers": ["es-iterator-helpers@1.3.1", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.4", "define-properties": "^1.2.1", "es-abstract": "^1.24.1", "es-errors": "^1.3.0", "es-set-tostringtag": "^2.1.0", "function-bind": "^1.1.2", "get-intrinsic": "^1.3.0", "globalthis": "^1.0.4", "gopd": "^1.2.0", "has-property-descriptors": "^1.0.2", "has-proto": "^1.2.0", "has-symbols": "^1.1.0", "internal-slot": "^1.1.0", "iterator.prototype": "^1.1.5", "math-intrinsics": "^1.1.0", "safe-array-concat": "^1.1.3" } }, "sha512-zWwRvqWiuBPr0muUG/78cW3aHROFCNIQ3zpmYDpwdbnt2m+xlNyRWpHBpa2lJjSBit7BQ+RXA1iwbSmu5yJ/EQ=="], - - "es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="], - - "es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="], - - "es-shim-unscopables": ["es-shim-unscopables@1.1.0", "", { "dependencies": { "hasown": "^2.0.2" } }, "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw=="], - - "es-to-primitive": ["es-to-primitive@1.3.0", "", { "dependencies": { "is-callable": "^1.2.7", "is-date-object": "^1.0.5", "is-symbol": "^1.0.4" } }, "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g=="], - - "es-toolkit": ["es-toolkit@1.45.1", "", {}, "sha512-/jhoOj/Fx+A+IIyDNOvO3TItGmlMKhtX8ISAHKE90c4b/k1tqaqEZ+uUqfpU8DMnW5cgNJv606zS55jGvza0Xw=="], - - "esbuild": ["esbuild@0.27.5", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.5", "@esbuild/android-arm": "0.27.5", "@esbuild/android-arm64": "0.27.5", "@esbuild/android-x64": "0.27.5", "@esbuild/darwin-arm64": "0.27.5", "@esbuild/darwin-x64": "0.27.5", "@esbuild/freebsd-arm64": "0.27.5", "@esbuild/freebsd-x64": "0.27.5", "@esbuild/linux-arm": "0.27.5", "@esbuild/linux-arm64": "0.27.5", "@esbuild/linux-ia32": "0.27.5", "@esbuild/linux-loong64": "0.27.5", "@esbuild/linux-mips64el": "0.27.5", "@esbuild/linux-ppc64": "0.27.5", "@esbuild/linux-riscv64": "0.27.5", "@esbuild/linux-s390x": "0.27.5", "@esbuild/linux-x64": "0.27.5", "@esbuild/netbsd-arm64": "0.27.5", "@esbuild/netbsd-x64": "0.27.5", "@esbuild/openbsd-arm64": "0.27.5", "@esbuild/openbsd-x64": "0.27.5", "@esbuild/openharmony-arm64": "0.27.5", "@esbuild/sunos-x64": "0.27.5", "@esbuild/win32-arm64": "0.27.5", "@esbuild/win32-ia32": "0.27.5", "@esbuild/win32-x64": "0.27.5" }, "bin": "bin/esbuild" }, "sha512-zdQoHBjuDqKsvV5OPaWansOwfSQ0Js+Uj9J85TBvj3bFW1JjWTSULMRwdQAc8qMeIScbClxeMK0jlrtB9linhA=="], - - "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], - - "escape-string-regexp": ["escape-string-regexp@4.0.0", "", {}, "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA=="], - - "eslint": ["eslint@9.39.4", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.21.2", "@eslint/config-helpers": "^0.4.2", "@eslint/core": "^0.17.0", "@eslint/eslintrc": "^3.3.5", "@eslint/js": "9.39.4", "@eslint/plugin-kit": "^0.4.1", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", "@types/estree": "^1.0.6", "ajv": "^6.14.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", "eslint-scope": "^8.4.0", "eslint-visitor-keys": "^4.2.1", "espree": "^10.4.0", "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", "file-entry-cache": "^8.0.0", "find-up": "^5.0.0", "glob-parent": "^6.0.2", "ignore": "^5.2.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "json-stable-stringify-without-jsonify": "^1.0.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.5", "natural-compare": "^1.4.0", "optionator": "^0.9.3" }, "peerDependencies": { "jiti": "*" }, "optionalPeers": ["jiti"], "bin": "bin/eslint.js" }, "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ=="], - - "eslint-plugin-perfectionist": ["eslint-plugin-perfectionist@5.8.0", "", { "dependencies": { "@typescript-eslint/utils": "^8.58.0", "natural-orderby": "^5.0.0" }, "peerDependencies": { "eslint": "^8.45.0 || ^9.0.0 || ^10.0.0" } }, "sha512-k8uIptWIxkUclonCFGyDzgYs9NI+Qh0a7cUXS3L7IYZDEsjXuimFBVbxXPQQngWqMiaxJRwbtYB4smMGMqF+cw=="], - - "eslint-plugin-react": ["eslint-plugin-react@7.37.5", "", { "dependencies": { "array-includes": "^3.1.8", "array.prototype.findlast": "^1.2.5", "array.prototype.flatmap": "^1.3.3", "array.prototype.tosorted": "^1.1.4", "doctrine": "^2.1.0", "es-iterator-helpers": "^1.2.1", "estraverse": "^5.3.0", "hasown": "^2.0.2", "jsx-ast-utils": "^2.4.1 || ^3.0.0", "minimatch": "^3.1.2", "object.entries": "^1.1.9", "object.fromentries": "^2.0.8", "object.values": "^1.2.1", "prop-types": "^15.8.1", "resolve": "^2.0.0-next.5", "semver": "^6.3.1", "string.prototype.matchall": "^4.0.12", "string.prototype.repeat": "^1.0.0" }, "peerDependencies": { "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" } }, "sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA=="], - - "eslint-plugin-react-hooks": ["eslint-plugin-react-hooks@7.0.1", "", { "dependencies": { "@babel/core": "^7.24.4", "@babel/parser": "^7.24.4", "hermes-parser": "^0.25.1", "zod": "^3.25.0 || ^4.0.0", "zod-validation-error": "^3.5.0 || ^4.0.0" }, "peerDependencies": { "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" } }, "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA=="], - - "eslint-plugin-unused-imports": ["eslint-plugin-unused-imports@4.4.1", "", { "peerDependencies": { "@typescript-eslint/eslint-plugin": "^8.0.0-0 || ^7.0.0 || ^6.0.0 || ^5.0.0", "eslint": "^10.0.0 || ^9.0.0 || ^8.0.0" } }, "sha512-oZGYUz1X3sRMGUB+0cZyK2VcvRX5lm/vB56PgNNcU+7ficUCKm66oZWKUubXWnOuPjQ8PvmXtCViXBMONPe7tQ=="], - - "eslint-scope": ["eslint-scope@8.4.0", "", { "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" } }, "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg=="], - - "eslint-visitor-keys": ["eslint-visitor-keys@4.2.1", "", {}, "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ=="], - - "espree": ["espree@10.4.0", "", { "dependencies": { "acorn": "^8.15.0", "acorn-jsx": "^5.3.2", "eslint-visitor-keys": "^4.2.1" } }, "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ=="], - - "esquery": ["esquery@1.7.0", "", { "dependencies": { "estraverse": "^5.1.0" } }, "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g=="], - - "esrecurse": ["esrecurse@4.3.0", "", { "dependencies": { "estraverse": "^5.2.0" } }, "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag=="], - - "estraverse": ["estraverse@5.3.0", "", {}, "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA=="], - - "esutils": ["esutils@2.0.3", "", {}, "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="], - - "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], - - "fast-json-stable-stringify": ["fast-json-stable-stringify@2.1.0", "", {}, "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="], - - "fast-levenshtein": ["fast-levenshtein@2.0.6", "", {}, "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw=="], - - "fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" } }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="], - - "file-entry-cache": ["file-entry-cache@8.0.0", "", { "dependencies": { "flat-cache": "^4.0.0" } }, "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ=="], - - "find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="], - - "flat-cache": ["flat-cache@4.0.1", "", { "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" } }, "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw=="], - - "flatted": ["flatted@3.4.2", "", {}, "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA=="], - - "for-each": ["for-each@0.3.5", "", { "dependencies": { "is-callable": "^1.2.7" } }, "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg=="], - - "fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], - - "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], - - "function.prototype.name": ["function.prototype.name@1.1.8", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "functions-have-names": "^1.2.3", "hasown": "^2.0.2", "is-callable": "^1.2.7" } }, "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q=="], - - "functions-have-names": ["functions-have-names@1.2.3", "", {}, "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ=="], - - "generator-function": ["generator-function@2.0.1", "", {}, "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g=="], - - "gensync": ["gensync@1.0.0-beta.2", "", {}, "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="], - - "get-east-asian-width": ["get-east-asian-width@1.5.0", "", {}, "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA=="], - - "get-intrinsic": ["get-intrinsic@1.3.0", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="], - - "get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="], - - "get-symbol-description": ["get-symbol-description@1.1.0", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6" } }, "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg=="], - - "get-tsconfig": ["get-tsconfig@4.13.7", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q=="], - - "glob-parent": ["glob-parent@6.0.2", "", { "dependencies": { "is-glob": "^4.0.3" } }, "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A=="], - - "globals": ["globals@16.5.0", "", {}, "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ=="], - - "globalthis": ["globalthis@1.0.4", "", { "dependencies": { "define-properties": "^1.2.1", "gopd": "^1.0.1" } }, "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ=="], - - "gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="], - - "has-bigints": ["has-bigints@1.1.0", "", {}, "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg=="], - - "has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], - - "has-property-descriptors": ["has-property-descriptors@1.0.2", "", { "dependencies": { "es-define-property": "^1.0.0" } }, "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg=="], - - "has-proto": ["has-proto@1.2.0", "", { "dependencies": { "dunder-proto": "^1.0.0" } }, "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ=="], - - "has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="], - - "has-tostringtag": ["has-tostringtag@1.0.2", "", { "dependencies": { "has-symbols": "^1.0.3" } }, "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw=="], - - "hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="], - - "hermes-estree": ["hermes-estree@0.25.1", "", {}, "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw=="], - - "hermes-parser": ["hermes-parser@0.25.1", "", { "dependencies": { "hermes-estree": "0.25.1" } }, "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA=="], - - "ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="], - - "import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="], - - "imurmurhash": ["imurmurhash@0.1.4", "", {}, "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA=="], - - "indent-string": ["indent-string@5.0.0", "", {}, "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg=="], - - "ink": ["ink@6.8.0", "", { "dependencies": { "@alcalzone/ansi-tokenize": "^0.2.4", "ansi-escapes": "^7.3.0", "ansi-styles": "^6.2.1", "auto-bind": "^5.0.1", "chalk": "^5.6.0", "cli-boxes": "^3.0.0", "cli-cursor": "^4.0.0", "cli-truncate": "^5.1.1", "code-excerpt": "^4.0.0", "es-toolkit": "^1.39.10", "indent-string": "^5.0.0", "is-in-ci": "^2.0.0", "patch-console": "^2.0.0", "react-reconciler": "^0.33.0", "scheduler": "^0.27.0", "signal-exit": "^3.0.7", "slice-ansi": "^8.0.0", "stack-utils": "^2.0.6", "string-width": "^8.1.1", "terminal-size": "^4.0.1", "type-fest": "^5.4.1", "widest-line": "^6.0.0", "wrap-ansi": "^9.0.0", "ws": "^8.18.0", "yoga-layout": "~3.2.1" }, "peerDependencies": { "@types/react": ">=19.0.0", "react": ">=19.0.0", "react-devtools-core": ">=6.1.2" }, "optionalPeers": ["react-devtools-core"] }, "sha512-sbl1RdLOgkO9isK42WCZlJCFN9hb++sX9dsklOvfd1YQ3bQ2AiFu12Q6tFlr0HvEUvzraJntQCCpfEoUe9DSzA=="], - - "ink-text-input": ["ink-text-input@6.0.0", "", { "dependencies": { "chalk": "^5.3.0", "type-fest": "^4.18.2" }, "peerDependencies": { "ink": ">=5", "react": ">=18" } }, "sha512-Fw64n7Yha5deb1rHY137zHTAbSTNelUKuB5Kkk2HACXEtwIHBCf9OH2tP/LQ9fRYTl1F0dZgbW0zPnZk6FA9Lw=="], - - "internal-slot": ["internal-slot@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "hasown": "^2.0.2", "side-channel": "^1.1.0" } }, "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw=="], - - "is-array-buffer": ["is-array-buffer@3.0.5", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "get-intrinsic": "^1.2.6" } }, "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A=="], - - "is-async-function": ["is-async-function@2.1.1", "", { "dependencies": { "async-function": "^1.0.0", "call-bound": "^1.0.3", "get-proto": "^1.0.1", "has-tostringtag": "^1.0.2", "safe-regex-test": "^1.1.0" } }, "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ=="], - - "is-bigint": ["is-bigint@1.1.0", "", { "dependencies": { "has-bigints": "^1.0.2" } }, "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ=="], - - "is-boolean-object": ["is-boolean-object@1.2.2", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A=="], - - "is-callable": ["is-callable@1.2.7", "", {}, "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA=="], - - "is-core-module": ["is-core-module@2.16.1", "", { "dependencies": { "hasown": "^2.0.2" } }, "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w=="], - - "is-data-view": ["is-data-view@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "get-intrinsic": "^1.2.6", "is-typed-array": "^1.1.13" } }, "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw=="], - - "is-date-object": ["is-date-object@1.1.0", "", { "dependencies": { "call-bound": "^1.0.2", "has-tostringtag": "^1.0.2" } }, "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg=="], - - "is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="], - - "is-finalizationregistry": ["is-finalizationregistry@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg=="], - - "is-fullwidth-code-point": ["is-fullwidth-code-point@5.1.0", "", { "dependencies": { "get-east-asian-width": "^1.3.1" } }, "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ=="], - - "is-generator-function": ["is-generator-function@1.1.2", "", { "dependencies": { "call-bound": "^1.0.4", "generator-function": "^2.0.0", "get-proto": "^1.0.1", "has-tostringtag": "^1.0.2", "safe-regex-test": "^1.1.0" } }, "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA=="], - - "is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="], - - "is-in-ci": ["is-in-ci@2.0.0", "", { "bin": "cli.js" }, "sha512-cFeerHriAnhrQSbpAxL37W1wcJKUUX07HyLWZCW1URJT/ra3GyUTzBgUnh24TMVfNTV2Hij2HLxkPHFZfOZy5w=="], - - "is-map": ["is-map@2.0.3", "", {}, "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw=="], - - "is-negative-zero": ["is-negative-zero@2.0.3", "", {}, "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw=="], - - "is-number-object": ["is-number-object@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw=="], - - "is-regex": ["is-regex@1.2.1", "", { "dependencies": { "call-bound": "^1.0.2", "gopd": "^1.2.0", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g=="], - - "is-set": ["is-set@2.0.3", "", {}, "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg=="], - - "is-shared-array-buffer": ["is-shared-array-buffer@1.0.4", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A=="], - - "is-string": ["is-string@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA=="], - - "is-symbol": ["is-symbol@1.1.1", "", { "dependencies": { "call-bound": "^1.0.2", "has-symbols": "^1.1.0", "safe-regex-test": "^1.1.0" } }, "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w=="], - - "is-typed-array": ["is-typed-array@1.1.15", "", { "dependencies": { "which-typed-array": "^1.1.16" } }, "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ=="], - - "is-weakmap": ["is-weakmap@2.0.2", "", {}, "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w=="], - - "is-weakref": ["is-weakref@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew=="], - - "is-weakset": ["is-weakset@2.0.4", "", { "dependencies": { "call-bound": "^1.0.3", "get-intrinsic": "^1.2.6" } }, "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ=="], - - "isarray": ["isarray@2.0.5", "", {}, "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw=="], - - "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], - - "iterator.prototype": ["iterator.prototype@1.1.5", "", { "dependencies": { "define-data-property": "^1.1.4", "es-object-atoms": "^1.0.0", "get-intrinsic": "^1.2.6", "get-proto": "^1.0.0", "has-symbols": "^1.1.0", "set-function-name": "^2.0.2" } }, "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g=="], - - "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], - - "js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": "bin/js-yaml.js" }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], - - "jsesc": ["jsesc@3.1.0", "", { "bin": "bin/jsesc" }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="], - - "json-buffer": ["json-buffer@3.0.1", "", {}, "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ=="], - - "json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="], - - "json-stable-stringify-without-jsonify": ["json-stable-stringify-without-jsonify@1.0.1", "", {}, "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw=="], - - "json5": ["json5@2.2.3", "", { "bin": "lib/cli.js" }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="], - - "jsx-ast-utils": ["jsx-ast-utils@3.3.5", "", { "dependencies": { "array-includes": "^3.1.6", "array.prototype.flat": "^1.3.1", "object.assign": "^4.1.4", "object.values": "^1.1.6" } }, "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ=="], - - "keyv": ["keyv@4.5.4", "", { "dependencies": { "json-buffer": "3.0.1" } }, "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw=="], - - "levn": ["levn@0.4.1", "", { "dependencies": { "prelude-ls": "^1.2.1", "type-check": "~0.4.0" } }, "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ=="], - - "locate-path": ["locate-path@6.0.0", "", { "dependencies": { "p-locate": "^5.0.0" } }, "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw=="], - - "lodash.merge": ["lodash.merge@4.6.2", "", {}, "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="], - - "loose-envify": ["loose-envify@1.4.0", "", { "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": "cli.js" }, "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q=="], - - "lru-cache": ["lru-cache@5.1.1", "", { "dependencies": { "yallist": "^3.0.2" } }, "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w=="], - - "math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="], - - "mimic-fn": ["mimic-fn@2.1.0", "", {}, "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="], - - "minimatch": ["minimatch@3.1.5", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w=="], - - "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], - - "natural-compare": ["natural-compare@1.4.0", "", {}, "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="], - - "natural-orderby": ["natural-orderby@5.0.0", "", {}, "sha512-kKHJhxwpR/Okycz4HhQKKlhWe4ASEfPgkSWNmKFHd7+ezuQlxkA5cM3+XkBPvm1gmHen3w53qsYAv+8GwRrBlg=="], - - "node-exports-info": ["node-exports-info@1.6.0", "", { "dependencies": { "array.prototype.flatmap": "^1.3.3", "es-errors": "^1.3.0", "object.entries": "^1.1.9", "semver": "^6.3.1" } }, "sha512-pyFS63ptit/P5WqUkt+UUfe+4oevH+bFeIiPPdfb0pFeYEu/1ELnJu5l+5EcTKYL5M7zaAa7S8ddywgXypqKCw=="], - - "node-releases": ["node-releases@2.0.37", "", {}, "sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg=="], - - "object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="], - - "object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="], - - "object-keys": ["object-keys@1.1.1", "", {}, "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA=="], - - "object.assign": ["object.assign@4.1.7", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0", "has-symbols": "^1.1.0", "object-keys": "^1.1.1" } }, "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw=="], - - "object.entries": ["object.entries@1.1.9", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.4", "define-properties": "^1.2.1", "es-object-atoms": "^1.1.1" } }, "sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw=="], - - "object.fromentries": ["object.fromentries@2.0.8", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-abstract": "^1.23.2", "es-object-atoms": "^1.0.0" } }, "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ=="], - - "object.values": ["object.values@1.2.1", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA=="], - - "onetime": ["onetime@5.1.2", "", { "dependencies": { "mimic-fn": "^2.1.0" } }, "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg=="], - - "optionator": ["optionator@0.9.4", "", { "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", "word-wrap": "^1.2.5" } }, "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g=="], - - "own-keys": ["own-keys@1.0.1", "", { "dependencies": { "get-intrinsic": "^1.2.6", "object-keys": "^1.1.1", "safe-push-apply": "^1.0.0" } }, "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg=="], - - "p-limit": ["p-limit@3.1.0", "", { "dependencies": { "yocto-queue": "^0.1.0" } }, "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ=="], - - "p-locate": ["p-locate@5.0.0", "", { "dependencies": { "p-limit": "^3.0.2" } }, "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw=="], - - "parent-module": ["parent-module@1.0.1", "", { "dependencies": { "callsites": "^3.0.0" } }, "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g=="], - - "patch-console": ["patch-console@2.0.0", "", {}, "sha512-0YNdUceMdaQwoKce1gatDScmMo5pu/tfABfnzEqeG0gtTmd7mh/WcwgUjtAeOU7N8nFFlbQBnFK2gXW5fGvmMA=="], - - "path-exists": ["path-exists@4.0.0", "", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="], - - "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], - - "path-parse": ["path-parse@1.0.7", "", {}, "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="], - - "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], - - "picomatch": ["picomatch@4.0.4", "", {}, "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A=="], - - "possible-typed-array-names": ["possible-typed-array-names@1.1.0", "", {}, "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg=="], - - "prelude-ls": ["prelude-ls@1.2.1", "", {}, "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g=="], - - "prettier": ["prettier@3.8.1", "", { "bin": "bin/prettier.cjs" }, "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg=="], - - "prop-types": ["prop-types@15.8.1", "", { "dependencies": { "loose-envify": "^1.4.0", "object-assign": "^4.1.1", "react-is": "^16.13.1" } }, "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg=="], - - "punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="], - - "react": ["react@19.2.4", "", {}, "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ=="], - - "react-is": ["react-is@16.13.1", "", {}, "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="], - - "react-reconciler": ["react-reconciler@0.33.0", "", { "dependencies": { "scheduler": "^0.27.0" }, "peerDependencies": { "react": "^19.2.0" } }, "sha512-KetWRytFv1epdpJc3J4G75I4WrplZE5jOL7Yq0p34+OVOKF4Se7WrdIdVC45XsSSmUTlht2FM/fM1FZb1mfQeA=="], - - "reflect.getprototypeof": ["reflect.getprototypeof@1.0.10", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.9", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "get-intrinsic": "^1.2.7", "get-proto": "^1.0.1", "which-builtin-type": "^1.2.1" } }, "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw=="], - - "regexp.prototype.flags": ["regexp.prototype.flags@1.5.4", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-errors": "^1.3.0", "get-proto": "^1.0.1", "gopd": "^1.2.0", "set-function-name": "^2.0.2" } }, "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA=="], - - "resolve": ["resolve@2.0.0-next.6", "", { "dependencies": { "es-errors": "^1.3.0", "is-core-module": "^2.16.1", "node-exports-info": "^1.6.0", "object-keys": "^1.1.1", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": "bin/resolve" }, "sha512-3JmVl5hMGtJ3kMmB3zi3DL25KfkCEyy3Tw7Gmw7z5w8M9WlwoPFnIvwChzu1+cF3iaK3sp18hhPz8ANeimdJfA=="], - - "resolve-from": ["resolve-from@4.0.0", "", {}, "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="], - - "resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="], - - "restore-cursor": ["restore-cursor@4.0.0", "", { "dependencies": { "onetime": "^5.1.0", "signal-exit": "^3.0.2" } }, "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg=="], - - "safe-array-concat": ["safe-array-concat@1.1.3", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "get-intrinsic": "^1.2.6", "has-symbols": "^1.1.0", "isarray": "^2.0.5" } }, "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q=="], - - "safe-push-apply": ["safe-push-apply@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "isarray": "^2.0.5" } }, "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA=="], - - "safe-regex-test": ["safe-regex-test@1.1.0", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-regex": "^1.2.1" } }, "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw=="], - - "scheduler": ["scheduler@0.27.0", "", {}, "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="], - - "semver": ["semver@6.3.1", "", { "bin": "bin/semver.js" }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "set-function-length": ["set-function-length@1.2.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2" } }, "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg=="], - - "set-function-name": ["set-function-name@2.0.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "functions-have-names": "^1.2.3", "has-property-descriptors": "^1.0.2" } }, "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ=="], - - "set-proto": ["set-proto@1.0.0", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0" } }, "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw=="], - - "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], - - "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], - - "side-channel": ["side-channel@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="], - - "side-channel-list": ["side-channel-list@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA=="], - - "side-channel-map": ["side-channel-map@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3" } }, "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA=="], - - "side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="], - - "signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], - - "slice-ansi": ["slice-ansi@8.0.0", "", { "dependencies": { "ansi-styles": "^6.2.3", "is-fullwidth-code-point": "^5.1.0" } }, "sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg=="], - - "stack-utils": ["stack-utils@2.0.6", "", { "dependencies": { "escape-string-regexp": "^2.0.0" } }, "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ=="], - - "stop-iteration-iterator": ["stop-iteration-iterator@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "internal-slot": "^1.1.0" } }, "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ=="], - - "string-width": ["string-width@8.2.0", "", { "dependencies": { "get-east-asian-width": "^1.5.0", "strip-ansi": "^7.1.2" } }, "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw=="], - - "string.prototype.matchall": ["string.prototype.matchall@4.0.12", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-abstract": "^1.23.6", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "get-intrinsic": "^1.2.6", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "internal-slot": "^1.1.0", "regexp.prototype.flags": "^1.5.3", "set-function-name": "^2.0.2", "side-channel": "^1.1.0" } }, "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA=="], - - "string.prototype.repeat": ["string.prototype.repeat@1.0.0", "", { "dependencies": { "define-properties": "^1.1.3", "es-abstract": "^1.17.5" } }, "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w=="], - - "string.prototype.trim": ["string.prototype.trim@1.2.10", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "define-data-property": "^1.1.4", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-object-atoms": "^1.0.0", "has-property-descriptors": "^1.0.2" } }, "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA=="], - - "string.prototype.trimend": ["string.prototype.trimend@1.0.9", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ=="], - - "string.prototype.trimstart": ["string.prototype.trimstart@1.0.8", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg=="], - - "strip-ansi": ["strip-ansi@7.2.0", "", { "dependencies": { "ansi-regex": "^6.2.2" } }, "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w=="], - - "strip-json-comments": ["strip-json-comments@3.1.1", "", {}, "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig=="], - - "supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "supports-preserve-symlinks-flag": ["supports-preserve-symlinks-flag@1.0.0", "", {}, "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w=="], - - "tagged-tag": ["tagged-tag@1.0.0", "", {}, "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng=="], - - "terminal-size": ["terminal-size@4.0.1", "", {}, "sha512-avMLDQpUI9I5XFrklECw1ZEUPJhqzcwSWsyyI8blhRLT+8N1jLJWLWWYQpB2q2xthq8xDvjZPISVh53T/+CLYQ=="], - - "tinyglobby": ["tinyglobby@0.2.15", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ=="], - - "ts-api-utils": ["ts-api-utils@2.5.0", "", { "peerDependencies": { "typescript": ">=4.8.4" } }, "sha512-OJ/ibxhPlqrMM0UiNHJ/0CKQkoKF243/AEmplt3qpRgkW8VG7IfOS41h7V8TjITqdByHzrjcS/2si+y4lIh8NA=="], - - "tsx": ["tsx@4.21.0", "", { "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "bin": "dist/cli.mjs" }, "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw=="], - - "type-check": ["type-check@0.4.0", "", { "dependencies": { "prelude-ls": "^1.2.1" } }, "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew=="], - - "type-fest": ["type-fest@5.5.0", "", { "dependencies": { "tagged-tag": "^1.0.0" } }, "sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g=="], - - "typed-array-buffer": ["typed-array-buffer@1.0.3", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-typed-array": "^1.1.14" } }, "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw=="], - - "typed-array-byte-length": ["typed-array-byte-length@1.0.3", "", { "dependencies": { "call-bind": "^1.0.8", "for-each": "^0.3.3", "gopd": "^1.2.0", "has-proto": "^1.2.0", "is-typed-array": "^1.1.14" } }, "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg=="], - - "typed-array-byte-offset": ["typed-array-byte-offset@1.0.4", "", { "dependencies": { "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "for-each": "^0.3.3", "gopd": "^1.2.0", "has-proto": "^1.2.0", "is-typed-array": "^1.1.15", "reflect.getprototypeof": "^1.0.9" } }, "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ=="], - - "typed-array-length": ["typed-array-length@1.0.7", "", { "dependencies": { "call-bind": "^1.0.7", "for-each": "^0.3.3", "gopd": "^1.0.1", "is-typed-array": "^1.1.13", "possible-typed-array-names": "^1.0.0", "reflect.getprototypeof": "^1.0.6" } }, "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg=="], - - "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], - - "unbox-primitive": ["unbox-primitive@1.1.0", "", { "dependencies": { "call-bound": "^1.0.3", "has-bigints": "^1.0.2", "has-symbols": "^1.1.0", "which-boxed-primitive": "^1.1.1" } }, "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw=="], - - "undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="], - - "unicode-animations": ["unicode-animations@1.0.3", "", { "dependencies": { "unicode-animations": "^1.0.1" }, "bin": { "unicode-animations": "scripts/demo.cjs" } }, "sha512-+klB2oWwcYZjYWhwP4Pr8UZffWDFVx6jKeIahE6z0QYyM2dwDeDPyn5nevCYbyotxvtT9lh21cVURO1RX0+YMg=="], - - "update-browserslist-db": ["update-browserslist-db@1.2.3", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": "cli.js" }, "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w=="], - - "uri-js": ["uri-js@4.4.1", "", { "dependencies": { "punycode": "^2.1.0" } }, "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg=="], - - "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], - - "which-boxed-primitive": ["which-boxed-primitive@1.1.1", "", { "dependencies": { "is-bigint": "^1.1.0", "is-boolean-object": "^1.2.1", "is-number-object": "^1.1.1", "is-string": "^1.1.1", "is-symbol": "^1.1.1" } }, "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA=="], - - "which-builtin-type": ["which-builtin-type@1.2.1", "", { "dependencies": { "call-bound": "^1.0.2", "function.prototype.name": "^1.1.6", "has-tostringtag": "^1.0.2", "is-async-function": "^2.0.0", "is-date-object": "^1.1.0", "is-finalizationregistry": "^1.1.0", "is-generator-function": "^1.0.10", "is-regex": "^1.2.1", "is-weakref": "^1.0.2", "isarray": "^2.0.5", "which-boxed-primitive": "^1.1.0", "which-collection": "^1.0.2", "which-typed-array": "^1.1.16" } }, "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q=="], - - "which-collection": ["which-collection@1.0.2", "", { "dependencies": { "is-map": "^2.0.3", "is-set": "^2.0.3", "is-weakmap": "^2.0.2", "is-weakset": "^2.0.3" } }, "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw=="], - - "which-typed-array": ["which-typed-array@1.1.20", "", { "dependencies": { "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "call-bound": "^1.0.4", "for-each": "^0.3.5", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-tostringtag": "^1.0.2" } }, "sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg=="], - - "widest-line": ["widest-line@6.0.0", "", { "dependencies": { "string-width": "^8.1.0" } }, "sha512-U89AsyEeAsyoF0zVJBkG9zBgekjgjK7yk9sje3F4IQpXBJ10TF6ByLlIfjMhcmHMJgHZI4KHt4rdNfktzxIAMA=="], - - "word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="], - - "wrap-ansi": ["wrap-ansi@9.0.2", "", { "dependencies": { "ansi-styles": "^6.2.1", "string-width": "^7.0.0", "strip-ansi": "^7.1.0" } }, "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww=="], - - "ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], - - "yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], - - "yocto-queue": ["yocto-queue@0.1.0", "", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="], - - "yoga-layout": ["yoga-layout@3.2.1", "", {}, "sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ=="], - - "zod": ["zod@4.3.6", "", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="], - - "zod-validation-error": ["zod-validation-error@4.0.2", "", { "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" } }, "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ=="], - - "@babel/core/semver": ["semver@6.3.1", "", { "bin": "bin/semver.js" }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "@babel/helper-compilation-targets/semver": ["semver@6.3.1", "", { "bin": "bin/semver.js" }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "@eslint-community/eslint-utils/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], - - "@eslint/config-array/minimatch": ["minimatch@3.1.5", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w=="], - - "@eslint/eslintrc/globals": ["globals@14.0.0", "", {}, "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ=="], - - "@eslint/eslintrc/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], - - "@eslint/eslintrc/minimatch": ["minimatch@3.1.5", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w=="], - - "@typescript-eslint/typescript-estree/minimatch": ["minimatch@10.2.5", "", { "dependencies": { "brace-expansion": "^5.0.5" } }, "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg=="], - - "@typescript-eslint/typescript-estree/semver": ["semver@7.7.4", "", { "bin": "bin/semver.js" }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="], - - "@typescript-eslint/visitor-keys/eslint-visitor-keys": ["eslint-visitor-keys@5.0.1", "", {}, "sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA=="], - - "chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "cli-truncate/string-width": ["string-width@8.2.0", "", { "dependencies": { "get-east-asian-width": "^1.5.0", "strip-ansi": "^7.1.2" } }, "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw=="], - - "eslint/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], - - "eslint-plugin-react/minimatch": ["minimatch@3.1.5", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w=="], - - "espree/eslint-visitor-keys": ["eslint-visitor-keys@4.2.1", "", {}, "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ=="], - - "ink/chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], - - "ink-text-input/chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], - - "ink-text-input/type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="], - - "node-exports-info/semver": ["semver@6.3.1", "", { "bin": "bin/semver.js" }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "stack-utils/escape-string-regexp": ["escape-string-regexp@2.0.0", "", {}, "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w=="], - - "widest-line/string-width": ["string-width@8.2.0", "", { "dependencies": { "get-east-asian-width": "^1.5.0", "strip-ansi": "^7.1.2" } }, "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw=="], - - "wrap-ansi/string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], - - "@eslint/config-array/minimatch/brace-expansion": ["brace-expansion@1.1.13", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w=="], - - "@eslint/eslintrc/minimatch/brace-expansion": ["brace-expansion@1.1.13", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w=="], - - "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@5.0.5", "", { "dependencies": { "balanced-match": "^4.0.2" } }, "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ=="], - - "eslint-plugin-react/minimatch/brace-expansion": ["brace-expansion@1.1.13", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w=="], - - "@eslint/config-array/minimatch/brace-expansion/balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], - - "@eslint/eslintrc/minimatch/brace-expansion/balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], - - "@typescript-eslint/typescript-estree/minimatch/brace-expansion/balanced-match": ["balanced-match@4.0.4", "", {}, "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA=="], - - "eslint-plugin-react/minimatch/brace-expansion/balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], - } -} diff --git a/ui-tui/eslint.config.mjs b/ui-tui/eslint.config.mjs index 7013dfdb6e..14a5d108d4 100644 --- a/ui-tui/eslint.config.mjs +++ b/ui-tui/eslint.config.mjs @@ -23,6 +23,9 @@ const customRules = { } export default [ + { + ignores: ['**/node_modules/**', '**/dist/**', 'src/**/*.js'] + }, js.configs.recommended, { files: ['**/*.{ts,tsx}'], @@ -89,6 +92,15 @@ export default [ } }, { - ignores: ['node_modules/', 'dist/', '*.config.*', 'src/**/*.js'] + files: ['**/*.js'], + ignores: ['**/node_modules/**', '**/dist/**'], + languageOptions: { + globals: { ...globals.node }, + ecmaVersion: 'latest', + sourceType: 'module' + } + }, + { + ignores: ['*.config.*'] } ] diff --git a/ui-tui/package-lock.json b/ui-tui/package-lock.json index ec79588fec..04c2767975 100644 --- a/ui-tui/package-lock.json +++ b/ui-tui/package-lock.json @@ -6641,6 +6641,9 @@ "usehooks-ts": "^3.1.0", "wrap-ansi": "^9.0.0" }, + "devDependencies": { + "esbuild": "^0.25.0" + }, "peerDependencies": { "ink-text-input": ">=6.0.0", "react": ">=19.0.0" @@ -6659,6 +6662,448 @@ "node": ">=14.13.1" } }, + "packages/hermes-ink/node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "packages/hermes-ink/node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, "packages/hermes-ink/node_modules/ansi-styles": { "version": "6.2.3", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", @@ -6683,6 +7128,48 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "packages/hermes-ink/node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, "packages/hermes-ink/node_modules/is-fullwidth-code-point": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", diff --git a/ui-tui/package.json b/ui-tui/package.json index 2fc6271f8c..e6e10ec06c 100644 --- a/ui-tui/package.json +++ b/ui-tui/package.json @@ -4,9 +4,10 @@ "private": true, "type": "module", "scripts": { - "dev": "tsx --watch src/entry.tsx", + "dev": "npm run build --prefix packages/hermes-ink && tsx --watch src/entry.tsx", "start": "tsx src/entry.tsx", - "build": "tsc -p tsconfig.build.json && chmod +x dist/entry.js", + "build": "npm run build --prefix packages/hermes-ink && tsc -p tsconfig.build.json && chmod +x dist/entry.js", + "type-check": "tsc --noEmit -p tsconfig.json", "lint": "eslint src/ packages/", "lint:fix": "eslint src/ packages/ --fix", "fmt": "prettier --write 'src/**/*.{ts,tsx}' 'packages/**/*.{ts,tsx}'", diff --git a/ui-tui/packages/hermes-ink/ambient.d.ts b/ui-tui/packages/hermes-ink/ambient.d.ts new file mode 100644 index 0000000000..943ff76bc0 --- /dev/null +++ b/ui-tui/packages/hermes-ink/ambient.d.ts @@ -0,0 +1,83 @@ +/// + +declare module 'react/compiler-runtime' { + export function c(size: number): any[] +} + +declare module 'bidi-js' { + const bidiFactory: () => Record + export default bidiFactory +} + +declare module 'stack-utils' { + class StackUtils { + static nodeInternals(): RegExp[] + constructor(opts?: { cwd?: string; internals?: RegExp[] }) + clean(stack: string | undefined): string | undefined + parseLine(line: string): { file?: string; line?: number; column?: number; function?: string } | undefined + } + export default StackUtils +} + +declare module 'react-reconciler' { + export type FiberRoot = unknown + const createReconciler: any + export default createReconciler +} + +declare module 'react-reconciler/constants.js' { + export const ConcurrentRoot: number + export const LegacyRoot: number + export const DiscreteEventPriority: symbol | number + export const ContinuousEventPriority: symbol | number + export const DefaultEventPriority: symbol | number + export const NoEventPriority: symbol | number +} + +declare module 'lodash-es/noop.js' { + const noop: (...args: unknown[]) => void + export default noop +} + +declare module 'lodash-es/throttle.js' { + function throttle unknown>( + fn: T, + wait?: number, + opts?: { leading?: boolean; trailing?: boolean } + ): T & { cancel(): void; flush(): void } + export default throttle +} + +declare module 'semver' { + export function coerce(version: string | number | null | undefined): { version: string } | null + export function gt(a: string, b: string, opts?: { loose?: boolean }): boolean + export function gte(a: string, b: string, opts?: { loose?: boolean }): boolean + export function lt(a: string, b: string, opts?: { loose?: boolean }): boolean + export function lte(a: string, b: string, opts?: { loose?: boolean }): boolean + export function satisfies(version: string, range: string, opts?: { loose?: boolean }): boolean + export function compare(a: string, b: string, opts?: { loose?: boolean }): number +} + +interface BunSemver { + order(a: string, b: string): -1 | 0 | 1 + satisfies(version: string, range: string): boolean +} + +interface BunRuntime { + stringWidth(s: string, opts?: { ambiguousIsNarrow?: boolean }): number + semver: BunSemver + wrapAnsi?(input: string, columns: number, options?: { hard?: boolean; wordWrap?: boolean; trim?: boolean }): string +} + +declare var Bun: BunRuntime | undefined + +declare namespace React { + namespace JSX { + interface IntrinsicElements { + 'ink-box': Record + 'ink-text': Record + 'ink-link': Record + 'ink-raw-ansi': Record + } + } +} diff --git a/ui-tui/packages/hermes-ink/index.d.ts b/ui-tui/packages/hermes-ink/index.d.ts index 1c23959a35..6536bddb02 100644 --- a/ui-tui/packages/hermes-ink/index.d.ts +++ b/ui-tui/packages/hermes-ink/index.d.ts @@ -1,3 +1,4 @@ +/// export { default as useStderr } from './src/hooks/use-stderr.ts' export type { StderrHandle } from './src/hooks/use-stderr.ts' export { default as useStdout } from './src/hooks/use-stdout.ts' diff --git a/ui-tui/packages/hermes-ink/index.js b/ui-tui/packages/hermes-ink/index.js index be929ce6ca..758fef3073 100644 --- a/ui-tui/packages/hermes-ink/index.js +++ b/ui-tui/packages/hermes-ink/index.js @@ -1,25 +1 @@ -export { default as render, createRoot, renderSync } from './src/ink/root.ts' -export { default as Box } from './src/ink/components/Box.tsx' -export { default as Text } from './src/ink/components/Text.tsx' -export { Ansi } from './src/ink/Ansi.tsx' -export { AlternateScreen } from './src/ink/components/AlternateScreen.tsx' -export { default as Link } from './src/ink/components/Link.tsx' -export { default as Newline } from './src/ink/components/Newline.tsx' -export { NoSelect } from './src/ink/components/NoSelect.tsx' -export { RawAnsi } from './src/ink/components/RawAnsi.tsx' -export { default as ScrollBox } from './src/ink/components/ScrollBox.tsx' -export { default as Spacer } from './src/ink/components/Spacer.tsx' -export { default as measureElement } from './src/ink/measure-element.ts' -export { stringWidth } from './src/ink/stringWidth.ts' -export { default as useApp } from './src/ink/hooks/use-app.ts' -export { useDeclaredCursor } from './src/ink/hooks/use-declared-cursor.ts' -export { default as useInput } from './src/ink/hooks/use-input.ts' -export { default as useStdin } from './src/ink/hooks/use-stdin.ts' -export { useHasSelection, useSelection } from './src/ink/hooks/use-selection.ts' -export { default as useStdout } from './src/hooks/use-stdout.ts' -export { default as useStderr } from './src/hooks/use-stderr.ts' -export { useTabStatus } from './src/ink/hooks/use-tab-status.ts' -export { useTerminalFocus } from './src/ink/hooks/use-terminal-focus.ts' -export { useTerminalTitle } from './src/ink/hooks/use-terminal-title.ts' -export { useTerminalViewport } from './src/ink/hooks/use-terminal-viewport.ts' -export { default as TextInput, UncontrolledTextInput } from 'ink-text-input' +export * from './dist/ink-bundle.js' diff --git a/ui-tui/packages/hermes-ink/package-lock.json b/ui-tui/packages/hermes-ink/package-lock.json new file mode 100644 index 0000000000..4fb5866d14 --- /dev/null +++ b/ui-tui/packages/hermes-ink/package-lock.json @@ -0,0 +1,819 @@ +{ + "name": "@hermes/ink", + "version": "0.0.1", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@hermes/ink", + "version": "0.0.1", + "dependencies": { + "@alcalzone/ansi-tokenize": "^0.1.0", + "auto-bind": "^5.0.0", + "bidi-js": "^1.0.0", + "chalk": "^5.4.0", + "cli-boxes": "^3.0.0", + "code-excerpt": "^4.0.0", + "emoji-regex": "^10.4.0", + "get-east-asian-width": "^1.3.0", + "indent-string": "^5.0.0", + "lodash-es": "^4.17.0", + "react": ">=19.0.0", + "react-reconciler": "0.33.0", + "semver": "^7.6.0", + "signal-exit": "^4.1.0", + "stack-utils": "^2.0.0", + "strip-ansi": "^7.1.0", + "supports-hyperlinks": "^3.1.0", + "type-fest": "^4.30.0", + "usehooks-ts": "^3.1.0", + "wrap-ansi": "^9.0.0" + }, + "devDependencies": { + "typescript": "~5.7.0" + }, + "peerDependencies": { + "ink-text-input": ">=6.0.0", + "react": ">=19.0.0" + } + }, + "node_modules/@alcalzone/ansi-tokenize": { + "version": "0.1.3", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "is-fullwidth-code-point": "^4.0.0" + }, + "engines": { + "node": ">=14.13.1" + } + }, + "node_modules/ansi-escapes": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.3.0.tgz", + "integrity": "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg==", + "license": "MIT", + "peer": true, + "dependencies": { + "environment": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.3", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/auto-bind": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/auto-bind/-/auto-bind-5.0.1.tgz", + "integrity": "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bidi-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", + "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", + "license": "MIT", + "dependencies": { + "require-from-string": "^2.0.2" + } + }, + "node_modules/chalk": { + "version": "5.6.2", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-cursor": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-4.0.0.tgz", + "integrity": "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==", + "license": "MIT", + "peer": true, + "dependencies": { + "restore-cursor": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-truncate": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-6.0.0.tgz", + "integrity": "sha512-3+YKIUFsohD9MIoOFPFBldjAlnfCmCDcqe6aYGFqlDTRKg80p4wg35L+j83QQ63iOlKRccEkbn8IuM++HsgEjA==", + "license": "MIT", + "peer": true, + "dependencies": { + "slice-ansi": "^9.0.0", + "string-width": "^8.2.0" + }, + "engines": { + "node": ">=22" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/code-excerpt": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/code-excerpt/-/code-excerpt-4.0.0.tgz", + "integrity": "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA==", + "license": "MIT", + "dependencies": { + "convert-to-spaces": "^2.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/convert-to-spaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/convert-to-spaces/-/convert-to-spaces-2.0.1.tgz", + "integrity": "sha512-rcQ1bsQO9799wq24uE5AM2tAILy4gXGIK/njFWcVQkGNZ96edlpY+A7bjwvzjYvLDyzmG1MmMLZhpcsb+klNMQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "license": "MIT" + }, + "node_modules/environment": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz", + "integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/es-toolkit": { + "version": "1.45.1", + "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.45.1.tgz", + "integrity": "sha512-/jhoOj/Fx+A+IIyDNOvO3TItGmlMKhtX8ISAHKE90c4b/k1tqaqEZ+uUqfpU8DMnW5cgNJv606zS55jGvza0Xw==", + "license": "MIT", + "peer": true, + "workspaces": [ + "docs", + "benchmarks" + ] + }, + "node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz", + "integrity": "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/indent-string": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz", + "integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ink": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/ink/-/ink-7.0.0.tgz", + "integrity": "sha512-fMie5/VwIYXofMyND0s+fOVhwVBBPYx+uuqJ6V6rUBGjui+2UYp+0fWtvhSeKT4z+X1uH98a4ge5Vj3aTlL6mg==", + "license": "MIT", + "peer": true, + "dependencies": { + "@alcalzone/ansi-tokenize": "^0.3.0", + "ansi-escapes": "^7.3.0", + "ansi-styles": "^6.2.3", + "auto-bind": "^5.0.1", + "chalk": "^5.6.2", + "cli-boxes": "^4.0.1", + "cli-cursor": "^4.0.0", + "cli-truncate": "^6.0.0", + "code-excerpt": "^4.0.0", + "es-toolkit": "^1.45.1", + "indent-string": "^5.0.0", + "is-in-ci": "^2.0.0", + "patch-console": "^2.0.0", + "react-reconciler": "^0.33.0", + "scheduler": "^0.27.0", + "signal-exit": "^3.0.7", + "slice-ansi": "^9.0.0", + "stack-utils": "^2.0.6", + "string-width": "^8.2.0", + "terminal-size": "^4.0.1", + "type-fest": "^5.5.0", + "widest-line": "^6.0.0", + "wrap-ansi": "^10.0.0", + "ws": "^8.20.0", + "yoga-layout": "~3.2.1" + }, + "engines": { + "node": ">=22" + }, + "peerDependencies": { + "@types/react": ">=19.2.0", + "react": ">=19.2.0", + "react-devtools-core": ">=6.1.2" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react-devtools-core": { + "optional": true + } + } + }, + "node_modules/ink-text-input": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/ink-text-input/-/ink-text-input-6.0.0.tgz", + "integrity": "sha512-Fw64n7Yha5deb1rHY137zHTAbSTNelUKuB5Kkk2HACXEtwIHBCf9OH2tP/LQ9fRYTl1F0dZgbW0zPnZk6FA9Lw==", + "license": "MIT", + "peer": true, + "dependencies": { + "chalk": "^5.3.0", + "type-fest": "^4.18.2" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "ink": ">=5", + "react": ">=18" + } + }, + "node_modules/ink/node_modules/@alcalzone/ansi-tokenize": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@alcalzone/ansi-tokenize/-/ansi-tokenize-0.3.0.tgz", + "integrity": "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA==", + "license": "MIT", + "peer": true, + "dependencies": { + "ansi-styles": "^6.2.1", + "is-fullwidth-code-point": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/ink/node_modules/cli-boxes": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-4.0.1.tgz", + "integrity": "sha512-5IOn+jcCEHEraYolBPs/sT4BxYCe2nHg374OPiItB1O96KZFseS2gthU4twyYzeDcFew4DaUM/xwc5BQf08JJw==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=18.20 <19 || >=20.10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ink/node_modules/is-fullwidth-code-point": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", + "integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "get-east-asian-width": "^1.3.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ink/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC", + "peer": true + }, + "node_modules/ink/node_modules/type-fest": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-5.5.0.tgz", + "integrity": "sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g==", + "license": "(MIT OR CC0-1.0)", + "peer": true, + "dependencies": { + "tagged-tag": "^1.0.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ink/node_modules/wrap-ansi": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-10.0.0.tgz", + "integrity": "sha512-SGcvg80f0wUy2/fXES19feHMz8E0JoXv2uNgHOu4Dgi2OrCy1lqwFYEJz1BLbDI0exjPMe/ZdzZ/YpGECBG/aQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "ansi-styles": "^6.2.3", + "string-width": "^8.2.0", + "strip-ansi": "^7.1.2" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-in-ci": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-in-ci/-/is-in-ci-2.0.0.tgz", + "integrity": "sha512-cFeerHriAnhrQSbpAxL37W1wcJKUUX07HyLWZCW1URJT/ra3GyUTzBgUnh24TMVfNTV2Hij2HLxkPHFZfOZy5w==", + "license": "MIT", + "peer": true, + "bin": { + "is-in-ci": "cli.js" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash-es": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.18.1.tgz", + "integrity": "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A==", + "license": "MIT" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", + "license": "MIT" + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "license": "MIT", + "peer": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/patch-console": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/patch-console/-/patch-console-2.0.0.tgz", + "integrity": "sha512-0YNdUceMdaQwoKce1gatDScmMo5pu/tfABfnzEqeG0gtTmd7mh/WcwgUjtAeOU7N8nFFlbQBnFK2gXW5fGvmMA==", + "license": "MIT", + "peer": true, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/react": { + "version": "19.2.5", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz", + "integrity": "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-reconciler": { + "version": "0.33.0", + "resolved": "https://registry.npmjs.org/react-reconciler/-/react-reconciler-0.33.0.tgz", + "integrity": "sha512-KetWRytFv1epdpJc3J4G75I4WrplZE5jOL7Yq0p34+OVOKF4Se7WrdIdVC45XsSSmUTlht2FM/fM1FZb1mfQeA==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "engines": { + "node": ">=0.10.0" + }, + "peerDependencies": { + "react": "^19.2.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/restore-cursor": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-4.0.0.tgz", + "integrity": "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==", + "license": "MIT", + "peer": true, + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/restore-cursor/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC", + "peer": true + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/slice-ansi": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-9.0.0.tgz", + "integrity": "sha512-SO/3iYL5S3W57LLEniscOGPZgOqZUPCx6d3dB+52B80yJ0XstzsC/eV8gnA4tM3MHDrKz+OCFSLNjswdSC+/bA==", + "license": "MIT", + "peer": true, + "dependencies": { + "ansi-styles": "^6.2.3", + "is-fullwidth-code-point": "^5.1.0" + }, + "engines": { + "node": ">=22" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/slice-ansi/node_modules/is-fullwidth-code-point": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", + "integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "get-east-asian-width": "^1.3.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.2.0.tgz", + "integrity": "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw==", + "license": "MIT", + "peer": true, + "dependencies": { + "get-east-asian-width": "^1.5.0", + "strip-ansi": "^7.1.2" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-ansi": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.2.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-3.2.0.tgz", + "integrity": "sha512-zFObLMyZeEwzAoKCyu1B91U79K2t7ApXuQfo8OuxwXLDgcKxuwM+YvcbIhm6QWqz7mHUH1TVytR1PwVVjEuMig==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "engines": { + "node": ">=14.18" + }, + "funding": { + "url": "https://github.com/chalk/supports-hyperlinks?sponsor=1" + } + }, + "node_modules/tagged-tag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/tagged-tag/-/tagged-tag-1.0.0.tgz", + "integrity": "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/terminal-size": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/terminal-size/-/terminal-size-4.0.1.tgz", + "integrity": "sha512-avMLDQpUI9I5XFrklECw1ZEUPJhqzcwSWsyyI8blhRLT+8N1jLJWLWWYQpB2q2xthq8xDvjZPISVh53T/+CLYQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-fest": { + "version": "4.41.0", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.7.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz", + "integrity": "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/usehooks-ts": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/usehooks-ts/-/usehooks-ts-3.1.1.tgz", + "integrity": "sha512-I4diPp9Cq6ieSUH2wu+fDAVQO43xwtulo+fKEidHUwZPnYImbtkTjzIJYcDcJqxgmX31GVqNFURodvcgHcW0pA==", + "license": "MIT", + "dependencies": { + "lodash.debounce": "^4.0.8" + }, + "engines": { + "node": ">=16.15.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17 || ^18 || ^19 || ^19.0.0-rc" + } + }, + "node_modules/widest-line": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-6.0.0.tgz", + "integrity": "sha512-U89AsyEeAsyoF0zVJBkG9zBgekjgjK7yk9sje3F4IQpXBJ10TF6ByLlIfjMhcmHMJgHZI4KHt4rdNfktzxIAMA==", + "license": "MIT", + "peer": true, + "dependencies": { + "string-width": "^8.1.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ws": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", + "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/yoga-layout": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/yoga-layout/-/yoga-layout-3.2.1.tgz", + "integrity": "sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ==", + "license": "MIT", + "peer": true + } + } +} diff --git a/ui-tui/packages/hermes-ink/package.json b/ui-tui/packages/hermes-ink/package.json index 6741a24f93..8e23491310 100644 --- a/ui-tui/packages/hermes-ink/package.json +++ b/ui-tui/packages/hermes-ink/package.json @@ -3,19 +3,22 @@ "version": "0.0.1", "private": true, "type": "module", - "sideEffects": false, + "scripts": { + "build": "esbuild src/entry-exports.ts --bundle --platform=node --format=esm --packages=external --outfile=dist/ink-bundle.js" + }, + "sideEffects": true, "main": "./index.js", "types": "./index.d.ts", "exports": { ".": { + "types": "./index.d.ts", "import": "./index.js", - "default": "./index.js", - "types": "./index.d.ts" + "default": "./index.js" }, "./text-input": { + "types": "./text-input.d.ts", "import": "./text-input.js", - "default": "./text-input.js", - "types": "./text-input.d.ts" + "default": "./text-input.js" }, "./package.json": "./package.json" }, @@ -44,5 +47,8 @@ "type-fest": "^4.30.0", "usehooks-ts": "^3.1.0", "wrap-ansi": "^9.0.0" + }, + "devDependencies": { + "esbuild": "^0.25.0" } } diff --git a/ui-tui/packages/hermes-ink/src/entry-exports.ts b/ui-tui/packages/hermes-ink/src/entry-exports.ts new file mode 100644 index 0000000000..d9fd98deed --- /dev/null +++ b/ui-tui/packages/hermes-ink/src/entry-exports.ts @@ -0,0 +1,25 @@ +export { default as useStderr } from './hooks/use-stderr.js' +export { default as useStdout } from './hooks/use-stdout.js' +export { Ansi } from './ink/Ansi.js' +export { AlternateScreen } from './ink/components/AlternateScreen.js' +export { default as Box } from './ink/components/Box.js' +export { default as Link } from './ink/components/Link.js' +export { default as Newline } from './ink/components/Newline.js' +export { NoSelect } from './ink/components/NoSelect.js' +export { RawAnsi } from './ink/components/RawAnsi.js' +export { default as ScrollBox } from './ink/components/ScrollBox.js' +export { default as Spacer } from './ink/components/Spacer.js' +export { default as Text } from './ink/components/Text.js' +export { default as useApp } from './ink/hooks/use-app.js' +export { useDeclaredCursor } from './ink/hooks/use-declared-cursor.js' +export { default as useInput } from './ink/hooks/use-input.js' +export { useHasSelection, useSelection } from './ink/hooks/use-selection.js' +export { default as useStdin } from './ink/hooks/use-stdin.js' +export { useTabStatus } from './ink/hooks/use-tab-status.js' +export { useTerminalFocus } from './ink/hooks/use-terminal-focus.js' +export { useTerminalTitle } from './ink/hooks/use-terminal-title.js' +export { useTerminalViewport } from './ink/hooks/use-terminal-viewport.js' +export { default as measureElement } from './ink/measure-element.js' +export { createRoot, default as render, renderSync } from './ink/root.js' +export { stringWidth } from './ink/stringWidth.js' +export { default as TextInput, UncontrolledTextInput } from 'ink-text-input' diff --git a/ui-tui/packages/hermes-ink/src/ink/Ansi.tsx b/ui-tui/packages/hermes-ink/src/ink/Ansi.tsx index e37eca558f..de0d750c35 100644 --- a/ui-tui/packages/hermes-ink/src/ink/Ansi.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/Ansi.tsx @@ -1,4 +1,4 @@ -import React from 'react' +import React, { type ReactNode } from 'react' import { c as _c } from 'react/compiler-runtime' import Link from './components/Link.js' @@ -6,7 +6,7 @@ import Text from './components/Text.js' import type { Color } from './styles.js' import { type NamedColor, Parser, type Color as TermioColor, type TextStyle } from './termio.js' type Props = { - children: string + children?: ReactNode /** When true, force all text to be rendered with dim styling */ dimColor?: boolean } @@ -22,6 +22,11 @@ type SpanProps = { hyperlink?: string } +type Span = { + text: string + props: SpanProps +} + /** * Component that parses ANSI escape codes and renders them using Text components. * @@ -30,7 +35,7 @@ type SpanProps = { * * Memoized to prevent re-renders when parent changes but children string is the same. */ -export const Ansi = React.memo(function Ansi(t0) { +export const Ansi = React.memo(function Ansi(t0: Props) { const $ = _c(12) const { children, dimColor } = t0 @@ -78,7 +83,7 @@ export const Ansi = React.memo(function Ansi(t0) { let t3 if ($[7] !== dimColor) { - t3 = (span, i) => { + t3 = (span: Span, i: number) => { const hyperlink = span.props.hyperlink if (dimColor) { @@ -165,10 +170,6 @@ export const Ansi = React.memo(function Ansi(t0) { return t3 }) -type Span = { - text: string - props: SpanProps -} /** * Parse an ANSI string into spans using the termio parser. @@ -359,7 +360,7 @@ type BaseTextStyleProps = { } // Wrapper component that handles bold/dim mutual exclusivity for Text -function StyledText(t0) { +function StyledText(t0: BaseTextStyleProps & { bold?: boolean; dim?: boolean; children?: ReactNode }) { const $ = _c(14) let bold let children diff --git a/ui-tui/packages/hermes-ink/src/ink/components/AlternateScreen.tsx b/ui-tui/packages/hermes-ink/src/ink/components/AlternateScreen.tsx index 757f7789b8..bb18608172 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/AlternateScreen.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/AlternateScreen.tsx @@ -32,7 +32,7 @@ type Props = PropsWithChildren<{ * from scrolling content) and so signal-exit cleanup can exit the alt * screen if the component's own unmount doesn't run. */ -export function AlternateScreen(t0) { +export function AlternateScreen(t0: Props) { const $ = _c(7) const { children, mouseTracking: t1 } = t0 diff --git a/ui-tui/packages/hermes-ink/src/ink/components/Box.tsx b/ui-tui/packages/hermes-ink/src/ink/components/Box.tsx index 13ec469954..68ba67ea54 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/Box.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/Box.tsx @@ -1,6 +1,6 @@ import '../global.d.ts' -import React, { type Ref } from 'react' +import React, { type ReactNode, type Ref } from 'react' import { c as _c } from 'react/compiler-runtime' import type { Except } from 'type-fest' @@ -11,6 +11,7 @@ import type { KeyboardEvent } from '../events/keyboard-event.js' import type { Styles } from '../styles.js' import * as warn from '../warn.js' export type Props = Except & { + children?: ReactNode ref?: Ref /** * Tab order index. Nodes with `tabIndex >= 0` participate in @@ -50,7 +51,7 @@ export type Props = Except & { /** * `` is an essential Ink component to build your layout. It's like `
` in the browser. */ -function Box(t0) { +function Box(t0: Props) { const $ = _c(42) let autoFocus let children diff --git a/ui-tui/packages/hermes-ink/src/ink/components/ClockContext.tsx b/ui-tui/packages/hermes-ink/src/ink/components/ClockContext.tsx index 521cd57513..99dfc2d883 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/ClockContext.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/ClockContext.tsx @@ -1,4 +1,4 @@ -import React, { createContext, useEffect, useState } from 'react' +import React, { createContext, type ReactNode, useEffect, useState } from 'react' import { c as _c } from 'react/compiler-runtime' import { BLURRED_FRAME_INTERVAL_MS, FRAME_INTERVAL_MS } from '../constants.js' @@ -87,7 +87,7 @@ export const ClockContext = createContext(null) // Own component so App.tsx doesn't re-render when the clock is created. // The clock value is stable (created once via useState), so the provider // never causes consumer re-renders on its own. -export function ClockProvider(t0) { +export function ClockProvider(t0: { readonly children: ReactNode }) { const $ = _c(7) const { children } = t0 diff --git a/ui-tui/packages/hermes-ink/src/ink/components/Link.tsx b/ui-tui/packages/hermes-ink/src/ink/components/Link.tsx index 72c94fa11f..71c4914558 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/Link.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/Link.tsx @@ -11,7 +11,7 @@ export type Props = { readonly fallback?: ReactNode } -export default function Link(t0) { +export default function Link(t0: Props) { const $ = _c(5) const { children, url, fallback } = t0 diff --git a/ui-tui/packages/hermes-ink/src/ink/components/Newline.tsx b/ui-tui/packages/hermes-ink/src/ink/components/Newline.tsx index 54dfa50fa6..4010dc9ffd 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/Newline.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/Newline.tsx @@ -12,7 +12,7 @@ export type Props = { /** * Adds one or more newline (\n) characters. Must be used within components. */ -export default function Newline(t0) { +export default function Newline(t0: Props) { const $ = _c(4) const { count: t1 } = t0 diff --git a/ui-tui/packages/hermes-ink/src/ink/components/NoSelect.tsx b/ui-tui/packages/hermes-ink/src/ink/components/NoSelect.tsx index e3da698520..79078189e4 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/NoSelect.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/NoSelect.tsx @@ -33,7 +33,7 @@ type Props = Omit & { * tracking). No-op in the main-screen scrollback render where the * terminal's native selection is used instead. */ -export function NoSelect(t0) { +export function NoSelect(t0: Props) { const $ = _c(8) let boxProps let children diff --git a/ui-tui/packages/hermes-ink/src/ink/components/RawAnsi.tsx b/ui-tui/packages/hermes-ink/src/ink/components/RawAnsi.tsx index 2c0b2f0fee..b5bd8f2536 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/RawAnsi.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/RawAnsi.tsx @@ -25,7 +25,7 @@ type Props = { * (width × lines.length) and hands the joined string straight to output.write(), * which already splits on '\n' and parses ANSI into the screen buffer. */ -export function RawAnsi(t0) { +export function RawAnsi(t0: Props) { const $ = _c(6) const { lines, width } = t0 diff --git a/ui-tui/packages/hermes-ink/src/ink/components/ScrollBox.tsx b/ui-tui/packages/hermes-ink/src/ink/components/ScrollBox.tsx index e7b55e71d6..bed421234f 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/ScrollBox.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/ScrollBox.tsx @@ -252,7 +252,7 @@ function ScrollBox({ children, ref, stickyScroll, ...style }: PropsWithChildren< // commit, which is too late for the first frame. return ( { + ref={(el: DOMElement | null) => { domRef.current = el if (el) { diff --git a/ui-tui/packages/hermes-ink/src/ink/components/TerminalFocusContext.tsx b/ui-tui/packages/hermes-ink/src/ink/components/TerminalFocusContext.tsx index 02860485a7..e5f1acdd68 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/TerminalFocusContext.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/TerminalFocusContext.tsx @@ -1,4 +1,4 @@ -import React, { createContext, useSyncExternalStore } from 'react' +import React, { createContext, type ReactNode, useSyncExternalStore } from 'react' import { c as _c } from 'react/compiler-runtime' import { @@ -23,7 +23,7 @@ TerminalFocusContext.displayName = 'TerminalFocusContext' // Separate component so App.tsx doesn't re-render on focus changes. // Children are a stable prop reference, so they don't re-render either — // only components that consume the context will re-render. -export function TerminalFocusProvider(t0) { +export function TerminalFocusProvider(t0: { readonly children: ReactNode }) { const $ = _c(6) const { children } = t0 diff --git a/ui-tui/packages/hermes-ink/src/ink/components/Text.tsx b/ui-tui/packages/hermes-ink/src/ink/components/Text.tsx index f69d338c1f..ea2a74c9a6 100644 --- a/ui-tui/packages/hermes-ink/src/ink/components/Text.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/components/Text.tsx @@ -116,7 +116,7 @@ const memoizedStylesForWrap: Record, Styles> = { /** * This component can display text, and change its style to make it colorful, bold, underline, italic or strikethrough. */ -export default function Text(t0) { +export default function Text(t0: Props) { const $ = _c(29) const { diff --git a/ui-tui/packages/hermes-ink/src/ink/devtools.ts b/ui-tui/packages/hermes-ink/src/ink/devtools.ts new file mode 100644 index 0000000000..73b0c9448d --- /dev/null +++ b/ui-tui/packages/hermes-ink/src/ink/devtools.ts @@ -0,0 +1,2 @@ +/** Optional react-devtools hook; package may be absent. */ +export {} diff --git a/ui-tui/packages/hermes-ink/src/ink/events/paste-event.ts b/ui-tui/packages/hermes-ink/src/ink/events/paste-event.ts new file mode 100644 index 0000000000..38a88f3171 --- /dev/null +++ b/ui-tui/packages/hermes-ink/src/ink/events/paste-event.ts @@ -0,0 +1,10 @@ +import { TerminalEvent } from './terminal-event.js' + +export class PasteEvent extends TerminalEvent { + readonly text: string + + constructor(text: string) { + super('paste', { bubbles: true, cancelable: true }) + this.text = text + } +} diff --git a/ui-tui/packages/hermes-ink/src/ink/events/resize-event.ts b/ui-tui/packages/hermes-ink/src/ink/events/resize-event.ts new file mode 100644 index 0000000000..b2627bb290 --- /dev/null +++ b/ui-tui/packages/hermes-ink/src/ink/events/resize-event.ts @@ -0,0 +1,12 @@ +import { TerminalEvent } from './terminal-event.js' + +export class ResizeEvent extends TerminalEvent { + readonly columns: number + readonly rows: number + + constructor(columns: number, rows: number) { + super('resize', { bubbles: true, cancelable: true }) + this.columns = columns + this.rows = rows + } +} diff --git a/ui-tui/packages/hermes-ink/src/ink/ink.tsx b/ui-tui/packages/hermes-ink/src/ink/ink.tsx index e0163f5065..96898cee31 100644 --- a/ui-tui/packages/hermes-ink/src/ink/ink.tsx +++ b/ui-tui/packages/hermes-ink/src/ink/ink.tsx @@ -339,8 +339,6 @@ export default class Ink { } } - // @ts-expect-error @types/react-reconciler@0.32.3 declares 11 args with transitionCallbacks, - // but react-reconciler 0.33.0 source only accepts 10 args (no transitionCallbacks) this.container = reconciler.createContainer( this.rootNode, ConcurrentRoot, @@ -357,7 +355,7 @@ export default class Ink { noop // onDefaultTransitionIndicator ) - if ('production' === 'development') { + if (process.env.NODE_ENV === 'development') { reconciler.injectIntoDevTools({ bundleType: 0, // Reporting React DOM's version, not Ink's @@ -955,7 +953,6 @@ export default class Ink { } pause(): void { // Flush pending React updates and render before pausing. - // @ts-expect-error flushSyncFromReconciler exists in react-reconciler 0.31 but not in @types/react-reconciler reconciler.flushSyncFromReconciler() this.onRender() this.isPaused = true @@ -1783,9 +1780,7 @@ export default class Ink { ) - // @ts-expect-error updateContainerSync exists in react-reconciler but not in @types/react-reconciler reconciler.updateContainerSync(tree, this.container, null, noop) - // @ts-expect-error flushSyncWork exists in react-reconciler but not in @types/react-reconciler reconciler.flushSyncWork() } unmount(error?: Error | number | null): void { @@ -1857,9 +1852,7 @@ export default class Ink { this.drainTimer = null } - // @ts-expect-error updateContainerSync exists in react-reconciler but not in @types/react-reconciler reconciler.updateContainerSync(null, this.container, null, noop) - // @ts-expect-error flushSyncWork exists in react-reconciler but not in @types/react-reconciler reconciler.flushSyncWork() instances.delete(this.options.stdout) @@ -1966,8 +1959,8 @@ export default class Ink { const intercept = ( chunk: Uint8Array | string, - encodingOrCb?: BufferEncoding | ((err?: Error) => void), - cb?: (err?: Error) => void + encodingOrCb?: BufferEncoding | ((err?: Error | null) => void), + cb?: (err?: Error | null) => void ): boolean => { const callback = typeof encodingOrCb === 'function' ? encodingOrCb : cb diff --git a/ui-tui/packages/hermes-ink/src/ink/reconciler.ts b/ui-tui/packages/hermes-ink/src/ink/reconciler.ts index 2be8a7d7ca..5fdce3bf9c 100644 --- a/ui-tui/packages/hermes-ink/src/ink/reconciler.ts +++ b/ui-tui/packages/hermes-ink/src/ink/reconciler.ts @@ -176,27 +176,12 @@ export function resetProfileCounters(): void { } // --- END --- -const reconciler = createReconciler< - ElementNames, - Props, - DOMElement, - DOMElement, - TextNode, - DOMElement, - unknown, - unknown, - DOMElement, - HostContext, - null, // UpdatePayload - not used in React 19 - NodeJS.Timeout, - -1, - null ->({ +const reconciler = createReconciler({ getRootHostContext: () => ({ isInsideText: false }), prepareForCommit: () => null, preparePortalMount: () => null, clearContainer: () => false, - resetAfterCommit(rootNode) { + resetAfterCommit(rootNode: DOMElement) { _lastCommitMs = _commitStart > 0 ? performance.now() - _commitStart : 0 _commitStart = 0 @@ -261,19 +246,19 @@ const reconciler = createReconciler< return createTextNode(text) }, resetTextContent() {}, - hideTextInstance(node) { + hideTextInstance(node: TextNode) { setTextNodeValue(node, '') }, - unhideTextInstance(node, text) { + unhideTextInstance(node: TextNode, text: string) { setTextNodeValue(node, text) }, - getPublicInstance: (instance): DOMElement => instance as DOMElement, - hideInstance(node) { + getPublicInstance: (instance: DOMElement): DOMElement => instance, + hideInstance(node: DOMElement) { node.isHidden = true node.yogaNode?.setDisplay(LayoutDisplay.None) markDirty(node) }, - unhideInstance(node) { + unhideInstance(node: DOMElement) { node.isHidden = false node.yogaNode?.setDisplay(LayoutDisplay.Flex) markDirty(node) @@ -344,7 +329,7 @@ const reconciler = createReconciler< commitTextUpdate(node: TextNode, _oldText: string, newText: string): void { setTextNodeValue(node, newText) }, - removeChild(node, removeNode) { + removeChild(node: DOMElement, removeNode: DOMElement | TextNode) { removeChildNode(node, removeNode) cleanupYogaNode(removeNode) diff --git a/ui-tui/packages/hermes-ink/src/ink/render-to-screen.ts b/ui-tui/packages/hermes-ink/src/ink/render-to-screen.ts index bee9f8f1c5..57272bd36a 100644 --- a/ui-tui/packages/hermes-ink/src/ink/render-to-screen.ts +++ b/ui-tui/packages/hermes-ink/src/ink/render-to-screen.ts @@ -63,14 +63,11 @@ export function renderToScreen(el: ReactElement, width: number): { screen: Scree stylePool = new StylePool() charPool = new CharPool() hyperlinkPool = new HyperlinkPool() - // @ts-expect-error react-reconciler 0.33 takes 10 args; @types says 11 container = reconciler.createContainer(root, LegacyRoot, null, false, null, 'search-render', noop, noop, noop, noop) } const t0 = performance.now() - // @ts-expect-error updateContainerSync exists but not in @types reconciler.updateContainerSync(el, container, null, noop) - // @ts-expect-error flushSyncWork exists but not in @types reconciler.flushSyncWork() const t1 = performance.now() @@ -105,9 +102,7 @@ export function renderToScreen(el: ReactElement, width: number): { screen: Scree const t3 = performance.now() // Unmount so next call gets a fresh tree. Leaves root/container/pools. - // @ts-expect-error updateContainerSync exists but not in @types reconciler.updateContainerSync(null, container, null, noop) - // @ts-expect-error flushSyncWork exists but not in @types reconciler.flushSyncWork() timing.reconcile += t1 - t0 diff --git a/ui-tui/packages/hermes-ink/src/utils/semver.ts b/ui-tui/packages/hermes-ink/src/utils/semver.ts index ab57ecf720..87025ed0fd 100644 --- a/ui-tui/packages/hermes-ink/src/utils/semver.ts +++ b/ui-tui/packages/hermes-ink/src/utils/semver.ts @@ -53,5 +53,5 @@ export function order(a: string, b: string): -1 | 0 | 1 { return Bun.semver.order(a, b) } - return getNpmSemver().compare(a, b, { loose: true }) + return getNpmSemver().compare(a, b, { loose: true }) as -1 | 0 | 1 } diff --git a/ui-tui/src/__tests__/text.test.ts b/ui-tui/src/__tests__/text.test.ts index 55b6a272b3..d43f6d56f4 100644 --- a/ui-tui/src/__tests__/text.test.ts +++ b/ui-tui/src/__tests__/text.test.ts @@ -1,6 +1,6 @@ import { describe, expect, it } from 'vitest' -import { fmtK, isToolTrailResultLine, lastCotTrailIndex, sameToolTrailGroup } from '../lib/text.js' +import { estimateRows, fmtK, isToolTrailResultLine, lastCotTrailIndex, sameToolTrailGroup } from '../lib/text.js' describe('isToolTrailResultLine', () => { it('detects completion markers', () => { @@ -49,3 +49,17 @@ describe('fmtK', () => { expect(fmtK(1_000_000_000)).toBe('1B') }) }) + +describe('estimateRows', () => { + it('handles tilde code fences', () => { + const md = ['~~~markdown', '# heading', '~~~'].join('\n') + + expect(estimateRows(md, 40)).toBeGreaterThanOrEqual(2) + }) + + it('handles checklist bullets as list rows', () => { + const md = ['- [x] done', '- [ ] todo'].join('\n') + + expect(estimateRows(md, 40)).toBe(2) + }) +}) diff --git a/ui-tui/src/app.tsx b/ui-tui/src/app.tsx index 6fe380cb91..ee317662af 100644 --- a/ui-tui/src/app.tsx +++ b/ui-tui/src/app.tsx @@ -1594,12 +1594,17 @@ export function App({ gw }: { gw: GatewayClient }) { if (!pastes.length) { sys('no text pastes') } else { - panel('Paste Shelf', [{ - rows: pastes.map(p => [ - `#${p.id} ${p.mode}`, - `${p.lineCount}L · ${p.kind} · ${compactPreview(p.text, 60) || '(empty)'}` - ] as [string, string]) - }]) + panel('Paste Shelf', [ + { + rows: pastes.map( + p => + [ + `#${p.id} ${p.mode}`, + `${p.lineCount}L · ${p.kind} · ${compactPreview(p.text, 60) || '(empty)'}` + ] as [string, string] + ) + } + ]) } return true @@ -1652,7 +1657,6 @@ export function App({ gw }: { gw: GatewayClient }) { sys('usage: /paste [list|mode |drop |clear]') return true - case 'logs': { const logText = gw.getLogTail(Math.min(80, Math.max(1, parseInt(arg, 10) || 20))) logText ? page(logText, 'Logs') : sys('no gateway logs') @@ -1765,7 +1769,14 @@ export function App({ gw }: { gw: GatewayClient }) { case 'model': if (!arg) { rpc('config.get', { key: 'provider' }).then((r: any) => - panel('Model', [{ rows: [['Model', r.model], ['Provider', r.provider]] }]) + panel('Model', [ + { + rows: [ + ['Model', r.model], + ['Provider', r.provider] + ] + } + ]) ) } else { rpc('config.set', { session_id: sid, key: 'model', value: arg.replace('--global', '').trim() }).then( @@ -1900,6 +1911,7 @@ export function App({ gw }: { gw: GatewayClient }) { } const f = (v: number) => (v ?? 0).toLocaleString() + const cost = r.cost_usd != null ? `${r.cost_status === 'estimated' ? '~' : ''}$${r.cost_usd.toFixed(4)}` : null @@ -1913,7 +1925,9 @@ export function App({ gw }: { gw: GatewayClient }) { ['API calls', f(r.calls)] ] - if (cost) rows.push(['Cost', cost]) + if (cost) { + rows.push(['Cost', cost]) + } const sections: PanelSection[] = [{ rows }] @@ -1921,7 +1935,9 @@ export function App({ gw }: { gw: GatewayClient }) { sections.push({ text: `Context: ${f(r.context_used)} / ${f(r.context_max)} (${r.context_percent}%)` }) } - if (r.compressions) sections.push({ text: `Compressions: ${r.compressions}` }) + if (r.compressions) { + sections.push({ text: `Compressions: ${r.compressions}` }) + } panel('Usage', sections) }) @@ -1966,13 +1982,15 @@ export function App({ gw }: { gw: GatewayClient }) { case 'insights': rpc('insights.get', { days: parseInt(arg) || 30 }).then((r: any) => - panel('Insights', [{ - rows: [ - ['Period', `${r.days} days`], - ['Sessions', `${r.sessions}`], - ['Messages', `${r.messages}`] - ] - }]) + panel('Insights', [ + { + rows: [ + ['Period', `${r.days} days`], + ['Sessions', `${r.sessions}`], + ['Messages', `${r.messages}`] + ] + } + ]) ) return true @@ -1985,12 +2003,13 @@ export function App({ gw }: { gw: GatewayClient }) { return sys('no checkpoints') } - panel('Checkpoints', [{ - rows: r.checkpoints.map((c: any, i: number) => [ - `${i + 1} ${c.hash?.slice(0, 8)}`, - c.message - ] as [string, string]) - }]) + panel('Checkpoints', [ + { + rows: r.checkpoints.map( + (c: any, i: number) => [`${i + 1} ${c.hash?.slice(0, 8)}`, c.message] as [string, string] + ) + } + ]) }) } else { const hash = sub === 'restore' || sub === 'diff' ? rArgs[0] : sub @@ -2023,9 +2042,11 @@ export function App({ gw }: { gw: GatewayClient }) { return sys('no plugins') } - panel('Plugins', [{ - items: r.plugins.map((p: any) => `${p.name} v${p.version}${p.enabled ? '' : ' (disabled)'}`) - }]) + panel('Plugins', [ + { + items: r.plugins.map((p: any) => `${p.name} v${p.version}${p.enabled ? '' : ' (disabled)'}`) + } + ]) }) return true @@ -2040,10 +2061,13 @@ export function App({ gw }: { gw: GatewayClient }) { return sys('no skills installed') } - panel('Installed Skills', Object.entries(sk).map(([cat, names]) => ({ - title: cat, - items: names as string[] - }))) + panel( + 'Installed Skills', + Object.entries(sk).map(([cat, names]) => ({ + title: cat, + items: names as string[] + })) + ) }) return true @@ -2052,17 +2076,29 @@ export function App({ gw }: { gw: GatewayClient }) { if (sub === 'browse') { const pg = parseInt(sArgs[0] ?? '1', 10) || 1 rpc('skills.manage', { action: 'browse', page: pg }).then((r: any) => { - if (!r.items?.length) return sys('no skills found in the hub') + if (!r.items?.length) { + return sys('no skills found in the hub') + } - const sections: PanelSection[] = [{ - rows: r.items.map((s: any) => [ - s.name ?? '', - (s.description ?? '').slice(0, 60) + (s.description?.length > 60 ? '…' : '') - ] as [string, string]) - }] + const sections: PanelSection[] = [ + { + rows: r.items.map( + (s: any) => + [s.name ?? '', (s.description ?? '').slice(0, 60) + (s.description?.length > 60 ? '…' : '')] as [ + string, + string + ] + ) + } + ] - if (r.page < r.total_pages) sections.push({ text: `/skills browse ${r.page + 1} → next page` }) - if (r.page > 1) sections.push({ text: `/skills browse ${r.page - 1} → prev page` }) + if (r.page < r.total_pages) { + sections.push({ text: `/skills browse ${r.page + 1} → next page` }) + } + + if (r.page > 1) { + sections.push({ text: `/skills browse ${r.page - 1} → prev page` }) + } panel(`Skills Hub (page ${r.page}/${r.total_pages}, ${r.total} total)`, sections) }) @@ -2080,47 +2116,57 @@ export function App({ gw }: { gw: GatewayClient }) { case 'agents': case 'tasks': - rpc('agents.list', {}).then((r: any) => { - const procs = r.processes ?? [] - const running = procs.filter((p: any) => p.status === 'running') - const finished = procs.filter((p: any) => p.status !== 'running') - const sections: PanelSection[] = [] + rpc('agents.list', {}) + .then((r: any) => { + const procs = r.processes ?? [] + const running = procs.filter((p: any) => p.status === 'running') + const finished = procs.filter((p: any) => p.status !== 'running') + const sections: PanelSection[] = [] - if (running.length) { - sections.push({ - title: `Running (${running.length})`, - rows: running.map((p: any) => [p.session_id.slice(0, 8), p.command]) - }) - } + if (running.length) { + sections.push({ + title: `Running (${running.length})`, + rows: running.map((p: any) => [p.session_id.slice(0, 8), p.command]) + }) + } - if (finished.length) { - sections.push({ - title: `Finished (${finished.length})`, - rows: finished.map((p: any) => [p.session_id.slice(0, 8), p.command]) - }) - } + if (finished.length) { + sections.push({ + title: `Finished (${finished.length})`, + rows: finished.map((p: any) => [p.session_id.slice(0, 8), p.command]) + }) + } - if (!sections.length) sections.push({ text: 'No active processes' }) + if (!sections.length) { + sections.push({ text: 'No active processes' }) + } - panel('Agents', sections) - }).catch(() => sys('agents command failed')) + panel('Agents', sections) + }) + .catch(() => sys('agents command failed')) return true case 'cron': if (!arg || arg === 'list') { - rpc('cron.manage', { action: 'list' }).then((r: any) => { - const jobs = r.jobs ?? [] + rpc('cron.manage', { action: 'list' }) + .then((r: any) => { + const jobs = r.jobs ?? [] - if (!jobs.length) return sys('no scheduled jobs') + if (!jobs.length) { + return sys('no scheduled jobs') + } - panel('Cron', [{ - rows: jobs.map((j: any) => [ - j.name || j.job_id?.slice(0, 12), - `${j.schedule} · ${j.state ?? 'active'}` - ] as [string, string]) - }]) - }).catch(() => sys('cron command failed')) + panel('Cron', [ + { + rows: jobs.map( + (j: any) => + [j.name || j.job_id?.slice(0, 12), `${j.schedule} · ${j.state ?? 'active'}`] as [string, string] + ) + } + ]) + }) + .catch(() => sys('cron command failed')) } else { gw.request('slash.exec', { command: cmd.slice(1), session_id: sid }) .then((r: any) => sys(r?.output || '(no output)')) @@ -2130,38 +2176,59 @@ export function App({ gw }: { gw: GatewayClient }) { return true case 'config': - rpc('config.show', {}).then((r: any) => { - panel('Config', (r.sections ?? []).map((s: any) => ({ - title: s.title, - rows: s.rows - }))) - }).catch(() => sys('config command failed')) + rpc('config.show', {}) + .then((r: any) => { + panel( + 'Config', + (r.sections ?? []).map((s: any) => ({ + title: s.title, + rows: s.rows + })) + ) + }) + .catch(() => sys('config command failed')) return true case 'tools': - rpc('tools.list', { session_id: sid }).then((r: any) => { - if (!r.toolsets?.length) return sys('no tools') + rpc('tools.list', { session_id: sid }) + .then((r: any) => { + if (!r.toolsets?.length) { + return sys('no tools') + } - panel('Tools', r.toolsets.map((ts: any) => ({ - title: `${ts.enabled ? '*' : ' '} ${ts.name} [${ts.tool_count} tools]`, - items: ts.tools - }))) - }).catch(() => sys('tools command failed')) + panel( + 'Tools', + r.toolsets.map((ts: any) => ({ + title: `${ts.enabled ? '*' : ' '} ${ts.name} [${ts.tool_count} tools]`, + items: ts.tools + })) + ) + }) + .catch(() => sys('tools command failed')) return true case 'toolsets': - rpc('toolsets.list', { session_id: sid }).then((r: any) => { - if (!r.toolsets?.length) return sys('no toolsets') + rpc('toolsets.list', { session_id: sid }) + .then((r: any) => { + if (!r.toolsets?.length) { + return sys('no toolsets') + } - panel('Toolsets', [{ - rows: r.toolsets.map((ts: any) => [ - `${ts.enabled ? '(*)' : ' '} ${ts.name}`, - `[${ts.tool_count}] ${ts.description}` - ] as [string, string]) - }]) - }).catch(() => sys('toolsets command failed')) + panel('Toolsets', [ + { + rows: r.toolsets.map( + (ts: any) => + [`${ts.enabled ? '(*)' : ' '} ${ts.name}`, `[${ts.tool_count}] ${ts.description}`] as [ + string, + string + ] + ) + } + ]) + }) + .catch(() => sys('toolsets command failed')) return true @@ -2188,7 +2255,23 @@ export function App({ gw }: { gw: GatewayClient }) { return true } }, - [catalog, compact, gw, lastUserMsg, messages, newSession, page, panel, pastes, pushActivity, rpc, send, sid, statusBar, sys] + [ + catalog, + compact, + gw, + lastUserMsg, + messages, + newSession, + page, + panel, + pastes, + pushActivity, + rpc, + send, + sid, + statusBar, + sys + ] ) slashRef.current = slash diff --git a/ui-tui/src/components/branding.tsx b/ui-tui/src/components/branding.tsx index 429996db70..d37f86f712 100644 --- a/ui-tui/src/components/branding.tsx +++ b/ui-tui/src/components/branding.tsx @@ -179,4 +179,3 @@ export function Panel({ sections, t, title }: { sections: PanelSection[]; t: The ) } - diff --git a/ui-tui/src/components/markdown.tsx b/ui-tui/src/components/markdown.tsx index 64403c2977..8d5cf888fe 100644 --- a/ui-tui/src/components/markdown.tsx +++ b/ui-tui/src/components/markdown.tsx @@ -3,17 +3,104 @@ import type { ReactNode } from 'react' import type { Theme } from '../theme.js' -/** OSC 8 hyperlink — wrap-ansi / Ink keep the link active across soft line wraps. */ -const osc8 = (url: string) => '\x1b]8;;' + url + '\x1b\\' -const OSC8_END = '\x1b]8;;\x1b\\' +const FENCE_RE = /^\s*(`{3,}|~{3,})(.*)$/ +const HR_RE = /^ {0,3}([-*_])(?:\s*\1){2,}\s*$/ +const HEADING_RE = /^\s{0,3}(#{1,6})\s+(.*?)(?:\s+#+\s*)?$/ +const FOOTNOTE_RE = /^\[\^([^\]]+)\]:\s*(.*)$/ +const DEF_RE = /^\s*:\s+(.+)$/ +const TABLE_DIVIDER_CELL_RE = /^:?-{3,}:?$/ +const MD_URL_RE = '((?:[^\\s()]|\\([^\\s()]*\\))+?)' +const INLINE_RE = + new RegExp( + `(!\\[(.*?)\\]\\(${MD_URL_RE}\\)|\\[(.+?)\\]\\(${MD_URL_RE}\\)|<((?:https?:\\/\\/|mailto:)[^>\\s]+|[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,})>|~~(.+?)~~|\`([^\\\`]+)\`|\\*\\*(.+?)\\*\\*|__(.+?)__|\\*(.+?)\\*|_(.+?)_|==(.+?)==|\\[\\^([^\\]]+)\\]|\\^([^^\\s][^^]*?)\\^|~([^~\\s][^~]*?)~|(https?:\\/\\/[^\\s<]+))`, + 'g' + ) + +type Fence = { + char: '`' | '~' + lang: string + len: number +} + +const renderLink = (key: number, t: Theme, label: string) => ( + + {label} + +) + +const trimBareUrl = (value: string) => { + const trimmed = value.replace(/[),.;:!?]+$/g, '') + + return { + tail: value.slice(trimmed.length), + url: trimmed + } +} + +const renderAutolink = (key: number, t: Theme, raw: string) => ( + + {raw.replace(/^mailto:/, '')} + +) + +const indentDepth = (indent: string) => Math.floor(indent.replace(/\t/g, ' ').length / 2) + +const parseFence = (line: string): Fence | null => { + const m = line.match(FENCE_RE) + + if (!m) { + return null + } + + return { + char: m[1]![0] as '`' | '~', + lang: m[2]!.trim().toLowerCase(), + len: m[1]!.length + } +} + +const isFenceClose = (line: string, fence: Fence) => { + const end = line.match(/^\s*(`{3,}|~{3,})\s*$/) + + return Boolean(end && end[1]![0] === fence.char && end[1]!.length >= fence.len) +} + +const isMarkdownFence = (lang: string) => ['md', 'markdown'].includes(lang) + +const splitTableRow = (row: string) => + row + .trim() + .replace(/^\|/, '') + .replace(/\|$/, '') + .split('|') + .map(cell => cell.trim()) + +const isTableDivider = (row: string) => { + const cells = splitTableRow(row) + + return cells.length > 1 && cells.every(cell => TABLE_DIVIDER_CELL_RE.test(cell)) +} + +const renderTable = (key: number, rows: string[][], t: Theme) => { + const widths = rows[0]!.map((_, ci) => Math.max(...rows.map(r => (r[ci] ?? '').length))) + + return ( + + {rows.map((row, ri) => ( + + {row.map((cell, ci) => cell.padEnd(widths[ci] ?? 0)).join(' ')} + + ))} + + ) +} function MdInline({ t, text }: { t: Theme; text: string }) { const parts: ReactNode[] = [] - const re = /(\[(.+?)\]\((https?:\/\/[^\s)]+)\)|\*\*(.+?)\*\*|`([^`]+)`|\*(.+?)\*|(https?:\/\/[^\s]+))/g let last = 0 - for (const m of text.matchAll(re)) { + for (const m of text.matchAll(INLINE_RE)) { const i = m.index ?? 0 if (i > last) { @@ -22,43 +109,74 @@ function MdInline({ t, text }: { t: Theme; text: string }) { if (m[2] && m[3]) { parts.push( - - {osc8(m[3])} - - {m[2]} - - {OSC8_END} + + [image: {m[2]}] {m[3]} ) - } else if (m[4]) { + } else if (m[4] && m[5]) { + parts.push(renderLink(parts.length, t, m[4])) + } else if (m[6]) { + parts.push(renderAutolink(parts.length, t, m[6])) + } else if (m[7]) { parts.push( - - {m[4]} + + {m[7]} ) - } else if (m[5]) { + } else if (m[8]) { parts.push( - {m[5]} + {m[8]} ) - } else if (m[6]) { + } else if (m[9] || m[10]) { + parts.push( + + {m[9] ?? m[10]} + + ) + } else if (m[11] || m[12]) { parts.push( - {m[6]} + {m[11] ?? m[12]} ) - } else if (m[7]) { - const u = m[7] + } else if (m[13]) { parts.push( - - {osc8(u)} - - {u} - - {OSC8_END} + + {m[13]} ) + } else if (m[14]) { + parts.push( + + [{m[14]}] + + ) + } else if (m[15]) { + parts.push( + + ^{m[15]} + + ) + } else if (m[16]) { + parts.push( + + _{m[16]} + + ) + } else if (m[17]) { + const { tail, url } = trimBareUrl(m[17]) + + parts.push(renderAutolink(parts.length, t, url)) + + if (tail) { + parts.push( + + {tail} + + ) + } } last = i + m[0].length @@ -75,7 +193,16 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st const lines = text.split('\n') const nodes: ReactNode[] = [] let i = 0 - let prevKind: 'blank' | 'code' | 'heading' | 'list' | 'paragraph' | 'quote' | 'table' | null = null + let prevKind: + | 'blank' + | 'code' + | 'heading' + | 'list' + | 'paragraph' + | 'quote' + | 'rule' + | 'table' + | null = null const gap = () => { if (nodes.length && prevKind !== 'blank') { @@ -109,16 +236,29 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st continue } - if (line.startsWith('```')) { - start('code') - const lang = line.slice(3).trim() - const block: string[] = [] + const fence = parseFence(line) - for (i++; i < lines.length && !lines[i]!.startsWith('```'); i++) { + if (fence) { + const block: string[] = [] + const lang = fence.lang + + for (i++; i < lines.length && !isFenceClose(lines[i]!, fence); i++) { block.push(lines[i]!) } - i++ + if (i < lines.length) { + i++ + } + + if (isMarkdownFence(lang)) { + start('paragraph') + nodes.push() + + continue + } + + start('code') + const isDiff = lang === 'diff' nodes.push( @@ -146,13 +286,42 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st continue } - const heading = line.match(/^#{1,3}\s+(.*)/) + if (line.trim().startsWith('$$')) { + start('code') + + const block: string[] = [] + + for (i++; i < lines.length; i++) { + if (lines[i]!.trim().startsWith('$$')) { + i++ + + break + } + + block.push(lines[i]!) + } + + nodes.push( + + ─ math + {block.map((l, j) => ( + + {l} + + ))} + + ) + + continue + } + + const heading = line.match(HEADING_RE) if (heading) { start('heading') nodes.push( - {heading[1]} + {heading[2]} ) i++ @@ -160,14 +329,103 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st continue } - const bullet = line.match(/^\s*[-*]\s(.*)/) + if (i + 1 < lines.length && line.trim()) { + const setext = lines[i + 1]!.match(/^\s{0,3}(=+|-+)\s*$/) + + if (setext) { + start('heading') + nodes.push( + + {line.trim()} + + ) + i += 2 + + continue + } + } + + if (HR_RE.test(line)) { + start('rule') + nodes.push( + + {'─'.repeat(36)} + + ) + i++ + + continue + } + + const footnote = line.match(FOOTNOTE_RE) + + if (footnote) { + start('list') + nodes.push( + + [{footnote[1]}] + + ) + i++ + + while (i < lines.length && /^\s{2,}\S/.test(lines[i]!)) { + nodes.push( + + + + + + ) + i++ + } + + continue + } + + if (i + 1 < lines.length && DEF_RE.test(lines[i + 1]!)) { + start('list') + nodes.push( + + {line.trim()} + + ) + i++ + + while (i < lines.length) { + const def = lines[i]!.match(DEF_RE) + + if (!def) { + break + } + + nodes.push( + + · + + + ) + i++ + } + + continue + } + + const bullet = line.match(/^(\s*)[-+*]\s+(.*)$/) if (bullet) { start('list') + const depth = indentDepth(bullet[1]!) + const task = bullet[2]!.match(/^\[( |x|X)\]\s+(.*)$/) + const marker = task ? (task[1]!.toLowerCase() === 'x' ? '☑' : '☐') : '•' + const body = task ? task[2]! : bullet[2]! + nodes.push( - - + + {' '.repeat(depth * 2)} + {marker}{' '} + + ) i++ @@ -175,14 +433,19 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st continue } - const numbered = line.match(/^\s*(\d+)\.\s(.*)/) + const numbered = line.match(/^(\s*)(\d+)[.)]\s+(.*)$/) if (numbered) { start('list') + const depth = indentDepth(numbered[1]!) + nodes.push( - {numbered[1]}. - + + {' '.repeat(depth * 2)} + {numbered[2]}.{' '} + + ) i++ @@ -190,12 +453,18 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st continue } - if (line.match(/^>\s?/)) { + if (/^\s*(?:>\s*)+/.test(line)) { start('quote') - const quoteLines: string[] = [] + const quoteLines: Array<{ depth: number; text: string }> = [] - while (i < lines.length && lines[i]!.match(/^>\s?/)) { - quoteLines.push(lines[i]!.replace(/^>\s?/, '')) + while (i < lines.length && /^\s*(?:>\s*)+/.test(lines[i]!)) { + const raw = lines[i]! + const prefix = raw.match(/^\s*(?:>\s*)+/)?.[0] ?? '' + + quoteLines.push({ + depth: (prefix.match(/>/g) ?? []).length, + text: raw.slice(prefix.length) + }) i++ } @@ -203,8 +472,9 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st {quoteLines.map((ql, qi) => ( - {' │ '} - + {' '.repeat(Math.max(0, ql.depth - 1) * 2)} + {'│ '} + ))} @@ -213,6 +483,55 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st continue } + if (line.includes('|') && i + 1 < lines.length && isTableDivider(lines[i + 1]!)) { + start('table') + const tableRows: string[][] = [] + + tableRows.push(splitTableRow(line)) + i += 2 + + while (i < lines.length && lines[i]!.includes('|') && lines[i]!.trim()) { + tableRows.push(splitTableRow(lines[i]!)) + i++ + } + + nodes.push(renderTable(key, tableRows, t)) + + continue + } + + if (/^/i.test(line)) { + i++ + + continue + } + + const summary = line.match(/^(.*?)<\/summary>$/i) + + if (summary) { + start('paragraph') + nodes.push( + + ▶ {summary[1]} + + ) + i++ + + continue + } + + if (/^<\/?[^>]+>$/.test(line.trim())) { + start('paragraph') + nodes.push( + + {line.trim()} + + ) + i++ + + continue + } + if (line.includes('|') && line.trim().startsWith('|')) { start('table') const tableRows: string[][] = [] @@ -221,29 +540,14 @@ export function Md({ compact, t, text }: { compact?: boolean; t: Theme; text: st const row = lines[i]!.trim() if (!/^[|\s:-]+$/.test(row)) { - tableRows.push( - row - .split('|') - .filter(Boolean) - .map(c => c.trim()) - ) + tableRows.push(splitTableRow(row)) } i++ } if (tableRows.length) { - const widths = tableRows[0]!.map((_, ci) => Math.max(...tableRows.map(r => (r[ci] ?? '').length))) - - nodes.push( - - {tableRows.map((row, ri) => ( - - {row.map((cell, ci) => cell.padEnd(widths[ci] ?? 0)).join(' ')} - - ))} - - ) + nodes.push(renderTable(key, tableRows, t)) } continue diff --git a/ui-tui/src/components/textInput.tsx b/ui-tui/src/components/textInput.tsx index cb64c42841..ec87ec4f31 100644 --- a/ui-tui/src/components/textInput.tsx +++ b/ui-tui/src/components/textInput.tsx @@ -1,4 +1,5 @@ import * as Ink from '@hermes/ink' +import type { InputEvent, Key } from '@hermes/ink' import { useEffect, useMemo, useRef, useState } from 'react' type InkExt = typeof Ink & { @@ -276,7 +277,7 @@ export function TextInput({ columns = 80, value, onChange, onPaste, onSubmit, pl // ── Input handler ──────────────────────────────────────────────── useInput( - (inp, k, event) => { + (inp: string, k: Key, event: InputEvent) => { // Some terminals normalize Ctrl+V to "v"; others deliver raw ^V (\x16). const ctrlPaste = k.ctrl && (inp.toLowerCase() === 'v' || event.keypress.raw === '\x16') const metaPaste = k.meta && inp.toLowerCase() === 'v' diff --git a/ui-tui/src/gatewayClient.ts b/ui-tui/src/gatewayClient.ts index 5a3eac5e82..fb26d9b5e3 100644 --- a/ui-tui/src/gatewayClient.ts +++ b/ui-tui/src/gatewayClient.ts @@ -24,10 +24,10 @@ export class GatewayClient extends EventEmitter { private pending = new Map() start() { - const root = process.env.HERMES_ROOT ?? resolve(import.meta.dirname, '../../') + const root = process.env.HERMES_PYTHON_SRC_ROOT ?? resolve(import.meta.dirname, '../../') this.proc = spawn(process.env.HERMES_PYTHON ?? resolve(root, 'venv/bin/python'), ['-m', 'tui_gateway.entry'], { - cwd: root, + cwd: process.env.HERMES_CWD || root, stdio: ['pipe', 'pipe', 'pipe'] }) diff --git a/ui-tui/src/lib/text.ts b/ui-tui/src/lib/text.ts index fb42943184..461fbc8b00 100644 --- a/ui-tui/src/lib/text.ts +++ b/ui-tui/src/lib/text.ts @@ -19,14 +19,21 @@ const renderEstimateLine = (line: string) => { } return line + .replace(/!\[(.*?)\]\(([^)\s]+)\)/g, '[image: $1]') .replace(/\[(.+?)\]\((https?:\/\/[^\s)]+)\)/g, '$1') .replace(/`([^`]+)`/g, '$1') .replace(/\*\*(.+?)\*\*/g, '$1') + .replace(/__(.+?)__/g, '$1') .replace(/\*(.+?)\*/g, '$1') - .replace(/^#{1,3}\s+/, '') - .replace(/^\s*[-*]\s+/, '• ') + .replace(/_(.+?)_/g, '$1') + .replace(/~~(.+?)~~/g, '$1') + .replace(/==(.+?)==/g, '$1') + .replace(/\[\^([^\]]+)\]/g, '[$1]') + .replace(/^#{1,6}\s+/, '') + .replace(/^\s*[-*+]\s+\[( |x|X)\]\s+/, (_m, checked: string) => `• [${checked.toLowerCase() === 'x' ? 'x' : ' '}] `) + .replace(/^\s*[-*+]\s+/, '• ') .replace(/^\s*(\d+)\.\s+/, '$1. ') - .replace(/^>\s?/, '│ ') + .replace(/^\s*(?:>\s*)+/, '│ ') } export const compactPreview = (s: string, max: number) => { @@ -79,26 +86,34 @@ export const scaleHex = (hex: string, k: number) => { } export const estimateRows = (text: string, w: number, compact = false) => { - let inCode = false + let fence: { char: '`' | '~'; len: number } | null = null let rows = 0 for (const raw of text.split('\n')) { const line = stripAnsi(raw) + const maybeFence = line.match(/^\s*(`{3,}|~{3,})(.*)$/) - if (line.startsWith('```')) { - if (!inCode) { - const lang = line.slice(3).trim() + if (maybeFence) { + const marker = maybeFence[1]! + const lang = maybeFence[2]!.trim() + + if (!fence) { + fence = { + char: marker[0] as '`' | '~', + len: marker.length + } if (lang) { rows += Math.ceil((`─ ${lang}`.length || 1) / w) } + } else if (marker[0] === fence.char && marker.length >= fence.len) { + fence = null } - inCode = !inCode - continue } + const inCode = Boolean(fence) const trimmed = line.trim() if (!inCode && trimmed.startsWith('|') && /^[|\s:-]+$/.test(trimmed)) { diff --git a/ui-tui/src/types/hermes-ink.d.ts b/ui-tui/src/types/hermes-ink.d.ts index db77c9f2a0..7c8a8a7246 100644 --- a/ui-tui/src/types/hermes-ink.d.ts +++ b/ui-tui/src/types/hermes-ink.d.ts @@ -22,7 +22,13 @@ declare module '@hermes/ink' { readonly [key: string]: boolean } - export type InputHandler = (input: string, key: Key) => void + export type InputEvent = { + readonly input: string + readonly key: Key + readonly keypress: { readonly raw?: string } + } + + export type InputHandler = (input: string, key: Key, event: InputEvent) => void export type RenderOptions = { readonly stdin?: NodeJS.ReadStream diff --git a/utils.py b/utils.py index 9a2105d54f..bd2a6b70f5 100644 --- a/utils.py +++ b/utils.py @@ -1,13 +1,16 @@ """Shared utility functions for hermes-agent.""" import json +import logging import os import tempfile from pathlib import Path -from typing import Any, Union +from typing import Any, List, Optional, Union import yaml +logger = logging.getLogger(__name__) + TRUTHY_STRINGS = frozenset({"1", "true", "yes", "on"}) @@ -124,3 +127,88 @@ def atomic_yaml_write( except OSError: pass raise + + +# ─── JSON Helpers ───────────────────────────────────────────────────────────── + + +def safe_json_loads(text: str, default: Any = None) -> Any: + """Parse JSON, returning *default* on any parse error. + + Replaces the ``try: json.loads(x) except (JSONDecodeError, TypeError)`` + pattern duplicated across display.py, anthropic_adapter.py, + auxiliary_client.py, and others. + """ + try: + return json.loads(text) + except (json.JSONDecodeError, TypeError, ValueError): + return default + + +def read_json_file(path: Path, default: Any = None) -> Any: + """Read and parse a JSON file, returning *default* on any error. + + Replaces the repeated ``try: json.loads(path.read_text()) except ...`` + pattern in anthropic_adapter.py, auxiliary_client.py, credential_pool.py, + and skill_utils.py. + """ + try: + return json.loads(Path(path).read_text(encoding="utf-8")) + except (json.JSONDecodeError, OSError, IOError, ValueError) as exc: + logger.debug("Failed to read %s: %s", path, exc) + return default + + +def read_jsonl(path: Path) -> List[dict]: + """Read a JSONL file (one JSON object per line). + + Returns a list of parsed objects, skipping blank lines. + """ + entries = [] + with open(path, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + if line: + entries.append(json.loads(line)) + return entries + + +def append_jsonl(path: Path, entry: dict) -> None: + """Append a single JSON object as a new line to a JSONL file.""" + path = Path(path) + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, "a", encoding="utf-8") as f: + f.write(json.dumps(entry, ensure_ascii=False) + "\n") + + +# ─── Environment Variable Helpers ───────────────────────────────────────────── + + +def env_str(key: str, default: str = "") -> str: + """Read an environment variable, stripped of whitespace. + + Replaces the ``os.getenv("X", "").strip()`` pattern repeated 50+ times + across runtime_provider.py, anthropic_adapter.py, models.py, etc. + """ + return os.getenv(key, default).strip() + + +def env_lower(key: str, default: str = "") -> str: + """Read an environment variable, stripped and lowercased.""" + return os.getenv(key, default).strip().lower() + + +def env_int(key: str, default: int = 0) -> int: + """Read an environment variable as an integer, with fallback.""" + raw = os.getenv(key, "").strip() + if not raw: + return default + try: + return int(raw) + except (ValueError, TypeError): + return default + + +def env_bool(key: str, default: bool = False) -> bool: + """Read an environment variable as a boolean.""" + return is_truthy_value(os.getenv(key, ""), default=default) diff --git a/website/docs/guides/migrate-from-openclaw.md b/website/docs/guides/migrate-from-openclaw.md index 88dd752d88..6322c725b0 100644 --- a/website/docs/guides/migrate-from-openclaw.md +++ b/website/docs/guides/migrate-from-openclaw.md @@ -11,30 +11,32 @@ description: "Complete guide to migrating your OpenClaw / Clawdbot setup to Herm ## Quick start ```bash -# Preview what would happen (no files changed) -hermes claw migrate --dry-run - -# Run the migration (secrets excluded by default) +# Preview then migrate (always shows a preview first, then asks to confirm) hermes claw migrate -# Full migration including API keys -hermes claw migrate --preset full +# Preview only, no changes +hermes claw migrate --dry-run + +# Full migration including API keys, skip confirmation +hermes claw migrate --preset full --yes ``` -The migration reads from `~/.openclaw/` by default. If you still have a legacy `~/.clawdbot/` or `~/.moldbot/` directory, it's detected automatically. Same for legacy config filenames (`clawdbot.json`, `moldbot.json`). +The migration always shows a full preview of what will be imported before making any changes. Review the list, then confirm to proceed. + +Reads from `~/.openclaw/` by default. Legacy `~/.clawdbot/` or `~/.moldbot/` directories are detected automatically. Same for legacy config filenames (`clawdbot.json`, `moldbot.json`). ## Options | Option | Description | |--------|-------------| -| `--dry-run` | Preview what would be migrated without writing anything. | +| `--dry-run` | Preview only — stop after showing what would be migrated. | | `--preset ` | `full` (default, includes secrets) or `user-data` (excludes API keys). | | `--overwrite` | Overwrite existing Hermes files on conflicts (default: skip). | | `--migrate-secrets` | Include API keys (on by default with `--preset full`). | | `--source ` | Custom OpenClaw directory. | | `--workspace-target ` | Where to place `AGENTS.md`. | | `--skill-conflict ` | `skip` (default), `overwrite`, or `rename`. | -| `--yes` | Skip confirmation prompt. | +| `--yes` | Skip the confirmation prompt after preview. | ## What gets migrated @@ -48,7 +50,7 @@ The migration reads from `~/.openclaw/` by default. If you still have a legacy ` | User profile | `workspace/USER.md` | `~/.hermes/memories/USER.md` | Same entry-merge logic as memory. | | Daily memory files | `workspace/memory/*.md` | `~/.hermes/memories/MEMORY.md` | All daily files merged into main memory. | -All workspace files also check `workspace.default/` as a fallback path. +Workspace files are also checked at `workspace.default/` and `workspace-main/` as fallback paths (OpenClaw renamed `workspace/` to `workspace-main/` in recent versions, and uses `workspace-{agentId}` for multi-agent setups). ### Skills (4 sources) @@ -66,7 +68,7 @@ Skill conflicts are handled by `--skill-conflict`: `skip` leaves the existing He | What | OpenClaw config path | Hermes destination | Notes | |------|---------------------|-------------------|-------| | Default model | `agents.defaults.model` | `config.yaml` → `model` | Can be a string or `{primary, fallbacks}` object | -| Custom providers | `models.providers.*` | `config.yaml` → `custom_providers` | Maps `baseUrl`, `apiType` ("openai"→"chat_completions", "anthropic"→"anthropic_messages") | +| Custom providers | `models.providers.*` | `config.yaml` → `custom_providers` | Maps `baseUrl`, `apiType`/`api` — handles both short ("openai", "anthropic") and hyphenated ("openai-completions", "anthropic-messages", "google-generative-ai") values | | Provider API keys | `models.providers.*.apiKey` | `~/.hermes/.env` | Requires `--migrate-secrets`. See [API key resolution](#api-key-resolution) below. | ### Agent behavior @@ -75,7 +77,7 @@ Skill conflicts are handled by `--skill-conflict`: `skip` leaves the existing He |------|---------------------|-------------------|---------| | Max turns | `agents.defaults.timeoutSeconds` | `agent.max_turns` | `timeoutSeconds / 10`, capped at 200 | | Verbose mode | `agents.defaults.verboseDefault` | `agent.verbose` | "off" / "on" / "full" | -| Reasoning effort | `agents.defaults.thinkingDefault` | `agent.reasoning_effort` | "always"/"high" → "high", "auto"/"medium" → "medium", "off"/"low"/"none"/"minimal" → "low" | +| Reasoning effort | `agents.defaults.thinkingDefault` | `agent.reasoning_effort` | "always"/"high"/"xhigh" → "high", "auto"/"medium"/"adaptive" → "medium", "off"/"low"/"none"/"minimal" → "low" | | Compression | `agents.defaults.compaction.mode` | `compression.enabled` | "off" → false, anything else → true | | Compression model | `agents.defaults.compaction.model` | `compression.summary_model` | Direct string copy | | Human delay | `agents.defaults.humanDelay.mode` | `human_delay.mode` | "natural" / "custom" / "off" | @@ -122,26 +124,26 @@ TTS settings are read from **two** OpenClaw config locations with this priority: | ElevenLabs model ID | `config.yaml` → `tts.elevenlabs.model_id` | | OpenAI model | `config.yaml` → `tts.openai.model` | | OpenAI voice | `config.yaml` → `tts.openai.voice` | -| Edge TTS voice | `config.yaml` → `tts.edge.voice` | +| Edge TTS voice | `config.yaml` → `tts.edge.voice` (OpenClaw renamed "edge" to "microsoft" — both are recognized) | | TTS assets | `~/.hermes/tts/` (file copy) | ### Messaging platforms | Platform | OpenClaw config path | Hermes `.env` variable | Notes | |----------|---------------------|----------------------|-------| -| Telegram | `channels.telegram.botToken` | `TELEGRAM_BOT_TOKEN` | Token can be string or [SecretRef](#secretref-handling) | +| Telegram | `channels.telegram.botToken` or `.accounts.default.botToken` | `TELEGRAM_BOT_TOKEN` | Token can be string or [SecretRef](#secretref-handling). Both flat and accounts layout supported. | | Telegram | `credentials/telegram-default-allowFrom.json` | `TELEGRAM_ALLOWED_USERS` | Comma-joined from `allowFrom[]` array | -| Discord | `channels.discord.token` | `DISCORD_BOT_TOKEN` | | -| Discord | `channels.discord.allowFrom` | `DISCORD_ALLOWED_USERS` | | -| Slack | `channels.slack.botToken` | `SLACK_BOT_TOKEN` | | -| Slack | `channels.slack.appToken` | `SLACK_APP_TOKEN` | | -| Slack | `channels.slack.allowFrom` | `SLACK_ALLOWED_USERS` | | -| WhatsApp | `channels.whatsapp.allowFrom` | `WHATSAPP_ALLOWED_USERS` | Auth via Baileys QR pairing (not a token) | -| Signal | `channels.signal.account` | `SIGNAL_ACCOUNT` | | -| Signal | `channels.signal.httpUrl` | `SIGNAL_HTTP_URL` | | -| Signal | `channels.signal.allowFrom` | `SIGNAL_ALLOWED_USERS` | | -| Matrix | `channels.matrix.botToken` | `MATRIX_ACCESS_TOKEN` | Via deep-channels migration | -| Mattermost | `channels.mattermost.botToken` | `MATTERMOST_BOT_TOKEN` | Via deep-channels migration | +| Discord | `channels.discord.token` or `.accounts.default.token` | `DISCORD_BOT_TOKEN` | | +| Discord | `channels.discord.allowFrom` or `.accounts.default.allowFrom` | `DISCORD_ALLOWED_USERS` | | +| Slack | `channels.slack.botToken` or `.accounts.default.botToken` | `SLACK_BOT_TOKEN` | | +| Slack | `channels.slack.appToken` or `.accounts.default.appToken` | `SLACK_APP_TOKEN` | | +| Slack | `channels.slack.allowFrom` or `.accounts.default.allowFrom` | `SLACK_ALLOWED_USERS` | | +| WhatsApp | `channels.whatsapp.allowFrom` or `.accounts.default.allowFrom` | `WHATSAPP_ALLOWED_USERS` | Auth via Baileys QR pairing — requires re-pairing after migration | +| Signal | `channels.signal.account` or `.accounts.default.account` | `SIGNAL_ACCOUNT` | | +| Signal | `channels.signal.httpUrl` or `.accounts.default.httpUrl` | `SIGNAL_HTTP_URL` | | +| Signal | `channels.signal.allowFrom` or `.accounts.default.allowFrom` | `SIGNAL_ALLOWED_USERS` | | +| Matrix | `channels.matrix.accessToken` or `.accounts.default.accessToken` | `MATRIX_ACCESS_TOKEN` | Uses `accessToken` (not `botToken`) | +| Mattermost | `channels.mattermost.botToken` or `.accounts.default.botToken` | `MATTERMOST_BOT_TOKEN` | | ### Other config @@ -178,13 +180,14 @@ These are saved to `~/.hermes/migration/openclaw//archive/` for manua ## API key resolution -When `--migrate-secrets` is enabled, API keys are collected from **three sources** in priority order: +When `--migrate-secrets` is enabled, API keys are collected from **four sources** in priority order: 1. **Config values** — `models.providers.*.apiKey` and TTS provider keys in `openclaw.json` 2. **Environment file** — `~/.openclaw/.env` (keys like `OPENROUTER_API_KEY`, `ANTHROPIC_API_KEY`, etc.) -3. **Auth profiles** — `~/.openclaw/agents/main/agent/auth-profiles.json` (per-agent credentials) +3. **Config env sub-object** — `openclaw.json` → `"env"` or `"env"."vars"` (some setups store keys here instead of a separate `.env` file) +4. **Auth profiles** — `~/.openclaw/agents/main/agent/auth-profiles.json` (per-agent credentials) -Config values take priority. The `.env` fills any gaps. Auth profiles fill whatever remains. +Config values take priority. Each subsequent source fills any remaining gaps. ### Supported key targets @@ -207,7 +210,7 @@ OpenClaw config values for tokens and API keys can be in three formats: "channels": { "telegram": { "botToken": { "source": "env", "id": "TELEGRAM_BOT_TOKEN" } } } ``` -The migration resolves all three formats. For env templates and SecretRef objects with `source: "env"`, it looks up the value in `~/.openclaw/.env`. SecretRef objects with `source: "file"` or `source: "exec"` can't be resolved automatically — those values must be added to Hermes manually after migration. +The migration resolves all three formats. For env templates and SecretRef objects with `source: "env"`, it looks up the value in `~/.openclaw/.env` and the `openclaw.json` env sub-object. SecretRef objects with `source: "file"` or `source: "exec"` can't be resolved automatically — the migration warns about these, and those values must be added to Hermes manually via `hermes config set`. ## After migration @@ -215,13 +218,17 @@ The migration resolves all three formats. For env templates and SecretRef object 2. **Review archived files** — anything in `~/.hermes/migration/openclaw//archive/` needs manual attention. -3. **Verify API keys** — run `hermes status` to check provider authentication. +3. **Start a new session** — imported skills and memory entries take effect in new sessions, not the current one. -4. **Test messaging** — if you migrated platform tokens, restart the gateway: `systemctl --user restart hermes-gateway` +4. **Verify API keys** — run `hermes status` to check provider authentication. -5. **Check session policies** — verify `hermes config get session_reset` matches your expectations. +5. **Test messaging** — if you migrated platform tokens, restart the gateway: `systemctl --user restart hermes-gateway` -6. **Re-pair WhatsApp** — WhatsApp uses QR code pairing (Baileys), not token migration. Run `hermes whatsapp` to pair. +6. **Check session policies** — verify `hermes config get session_reset` matches your expectations. + +7. **Re-pair WhatsApp** — WhatsApp uses QR code pairing (Baileys), not token migration. Run `hermes whatsapp` to pair. + +8. **Archive cleanup** — after confirming everything works, run `hermes claw cleanup` to rename leftover OpenClaw directories to `.pre-migration/` (prevents state confusion). ## Troubleshooting @@ -231,7 +238,7 @@ The migration checks `~/.openclaw/`, then `~/.clawdbot/`, then `~/.moldbot/`. If ### "No provider API keys found" -Keys might be in your `.env` file instead of `openclaw.json`. The migration checks both — make sure `~/.openclaw/.env` exists and has the keys. If keys use `source: "file"` or `source: "exec"` SecretRefs, they can't be resolved automatically. +Keys might be stored in several places depending on your OpenClaw version: inline in `openclaw.json` under `models.providers.*.apiKey`, in `~/.openclaw/.env`, in the `openclaw.json` `"env"` sub-object, or in `agents/main/agent/auth-profiles.json`. The migration checks all four. If keys use `source: "file"` or `source: "exec"` SecretRefs, they can't be resolved automatically — add them via `hermes config set`. ### Skills not appearing after migration diff --git a/website/docs/integrations/providers.md b/website/docs/integrations/providers.md index 83ccda05d1..d9bb010d63 100644 --- a/website/docs/integrations/providers.md +++ b/website/docs/integrations/providers.md @@ -27,6 +27,7 @@ You need at least one way to connect to an LLM. Use `hermes model` to switch pro | **MiniMax China** | `MINIMAX_CN_API_KEY` in `~/.hermes/.env` (provider: `minimax-cn`) | | **Alibaba Cloud** | `DASHSCOPE_API_KEY` in `~/.hermes/.env` (provider: `alibaba`, aliases: `dashscope`, `qwen`) | | **Kilo Code** | `KILOCODE_API_KEY` in `~/.hermes/.env` (provider: `kilocode`) | +| **Xiaomi MiMo** | `XIAOMI_API_KEY` in `~/.hermes/.env` (provider: `xiaomi`, aliases: `mimo`, `xiaomi-mimo`) | | **OpenCode Zen** | `OPENCODE_ZEN_API_KEY` in `~/.hermes/.env` (provider: `opencode-zen`) | | **OpenCode Go** | `OPENCODE_GO_API_KEY` in `~/.hermes/.env` (provider: `opencode-go`) | | **DeepSeek** | `DEEPSEEK_API_KEY` in `~/.hermes/.env` (provider: `deepseek`) | @@ -157,16 +158,20 @@ hermes chat --provider minimax-cn --model MiniMax-M2.7 # Alibaba Cloud / DashScope (Qwen models) hermes chat --provider alibaba --model qwen3.5-plus # Requires: DASHSCOPE_API_KEY in ~/.hermes/.env + +# Xiaomi MiMo +hermes chat --provider xiaomi --model mimo-v2-pro +# Requires: XIAOMI_API_KEY in ~/.hermes/.env ``` Or set the provider permanently in `config.yaml`: ```yaml model: - provider: "zai" # or: kimi-coding, minimax, minimax-cn, alibaba + provider: "zai" # or: kimi-coding, minimax, minimax-cn, alibaba, xiaomi default: "glm-5" ``` -Base URLs can be overridden with `GLM_BASE_URL`, `KIMI_BASE_URL`, `MINIMAX_BASE_URL`, `MINIMAX_CN_BASE_URL`, or `DASHSCOPE_BASE_URL` environment variables. +Base URLs can be overridden with `GLM_BASE_URL`, `KIMI_BASE_URL`, `MINIMAX_BASE_URL`, `MINIMAX_CN_BASE_URL`, `DASHSCOPE_BASE_URL`, or `XIAOMI_BASE_URL` environment variables. :::note Z.AI Endpoint Auto-Detection When using the Z.AI / GLM provider, Hermes automatically probes multiple endpoints (global, China, coding variants) to find one that accepts your API key. You don't need to set `GLM_BASE_URL` manually — the working endpoint is detected and cached automatically. @@ -849,7 +854,7 @@ You can also select named custom providers from the interactive `hermes model` m | **Cost optimization** | ClawRouter or OpenRouter with `sort: "price"` | | **Maximum privacy** | Ollama, vLLM, or llama.cpp (fully local) | | **Enterprise / Azure** | Azure OpenAI with custom endpoint | -| **Chinese AI models** | z.ai (GLM), Kimi/Moonshot, or MiniMax (first-class providers) | +| **Chinese AI models** | z.ai (GLM), Kimi/Moonshot, MiniMax, or Xiaomi MiMo (first-class providers) | :::tip You can switch between providers at any time with `hermes model` — no restart required. Your conversation history, memory, and skills carry over regardless of which provider you use. @@ -924,7 +929,7 @@ fallback_model: When activated, the fallback swaps the model and provider mid-session without losing your conversation. It fires **at most once** per session. -Supported providers: `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `deepseek`, `ai-gateway`, `opencode-zen`, `opencode-go`, `kilocode`, `alibaba`, `custom`. +Supported providers: `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `deepseek`, `ai-gateway`, `opencode-zen`, `opencode-go`, `kilocode`, `xiaomi`, `alibaba`, `custom`. :::tip Fallback is configured exclusively through `config.yaml` — there are no environment variables for it. For full details on when it triggers, supported providers, and how it interacts with auxiliary tasks and delegation, see [Fallback Providers](/docs/user-guide/features/fallback-providers). diff --git a/website/docs/reference/cli-commands.md b/website/docs/reference/cli-commands.md index c430d3ba87..12394ea44e 100644 --- a/website/docs/reference/cli-commands.md +++ b/website/docs/reference/cli-commands.md @@ -76,7 +76,7 @@ Common options: | `-q`, `--query "..."` | One-shot, non-interactive prompt. | | `-m`, `--model ` | Override the model for this run. | | `-t`, `--toolsets ` | Enable a comma-separated set of toolsets. | -| `--provider ` | Force a provider: `auto`, `openrouter`, `nous`, `openai-codex`, `copilot-acp`, `copilot`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `deepseek`, `ai-gateway`, `opencode-zen`, `opencode-go`, `kilocode`, `alibaba`. | +| `--provider ` | Force a provider: `auto`, `openrouter`, `nous`, `openai-codex`, `copilot-acp`, `copilot`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `deepseek`, `ai-gateway`, `opencode-zen`, `opencode-go`, `kilocode`, `xiaomi`, `alibaba`. | | `-s`, `--skills ` | Preload one or more skills for the session (can be repeated or comma-separated). | | `-v`, `--verbose` | Verbose output. | | `-Q`, `--quiet` | Programmatic mode: suppress banner/spinner/tool previews. | diff --git a/website/docs/reference/environment-variables.md b/website/docs/reference/environment-variables.md index 56511e9139..a548a6ff6d 100644 --- a/website/docs/reference/environment-variables.md +++ b/website/docs/reference/environment-variables.md @@ -37,6 +37,8 @@ All variables go in `~/.hermes/.env`. You can also set them with `hermes config | `MINIMAX_CN_BASE_URL` | Override MiniMax China base URL (default: `https://api.minimaxi.com/v1`) | | `KILOCODE_API_KEY` | Kilo Code API key ([kilo.ai](https://kilo.ai)) | | `KILOCODE_BASE_URL` | Override Kilo Code base URL (default: `https://api.kilo.ai/api/gateway`) | +| `XIAOMI_API_KEY` | Xiaomi MiMo API key ([platform.xiaomimimo.com](https://platform.xiaomimimo.com)) | +| `XIAOMI_BASE_URL` | Override Xiaomi MiMo base URL (default: `https://api.xiaomimimo.com/v1`) | | `HF_TOKEN` | Hugging Face token for Inference Providers ([huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)) | | `HF_BASE_URL` | Override Hugging Face base URL (default: `https://router.huggingface.co/v1`) | | `GOOGLE_API_KEY` | Google AI Studio API key ([aistudio.google.com/app/apikey](https://aistudio.google.com/app/apikey)) | @@ -65,7 +67,7 @@ For native Anthropic auth, Hermes prefers Claude Code's own credential files whe | Variable | Description | |----------|-------------| -| `HERMES_INFERENCE_PROVIDER` | Override provider selection: `auto`, `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `kilocode`, `alibaba`, `deepseek`, `opencode-zen`, `opencode-go`, `ai-gateway` (default: `auto`) | +| `HERMES_INFERENCE_PROVIDER` | Override provider selection: `auto`, `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `kilocode`, `xiaomi`, `alibaba`, `deepseek`, `opencode-zen`, `opencode-go`, `ai-gateway` (default: `auto`) | | `HERMES_PORTAL_BASE_URL` | Override Nous Portal URL (for development/testing) | | `NOUS_INFERENCE_BASE_URL` | Override Nous inference API URL | | `HERMES_NOUS_MIN_KEY_TTL_SECONDS` | Min agent key TTL before re-mint (default: 1800 = 30min) | @@ -193,9 +195,12 @@ For cloud sandbox backends, persistence is filesystem-oriented. `TERMINAL_LIFETI | `SIGNAL_IGNORE_STORIES` | Ignore Signal stories/status updates | | `SIGNAL_ALLOW_ALL_USERS` | Allow all Signal users without an allowlist | | `TWILIO_ACCOUNT_SID` | Twilio Account SID (shared with telephony skill) | -| `TWILIO_AUTH_TOKEN` | Twilio Auth Token (shared with telephony skill) | +| `TWILIO_AUTH_TOKEN` | Twilio Auth Token (shared with telephony skill; also used for webhook signature validation) | | `TWILIO_PHONE_NUMBER` | Twilio phone number in E.164 format (shared with telephony skill) | +| `SMS_WEBHOOK_URL` | Public URL for Twilio signature validation — must match the webhook URL in Twilio Console (required) | | `SMS_WEBHOOK_PORT` | Webhook listener port for inbound SMS (default: `8080`) | +| `SMS_WEBHOOK_HOST` | Webhook bind address (default: `0.0.0.0`) | +| `SMS_INSECURE_NO_SIGNATURE` | Set to `true` to disable Twilio signature validation (local dev only — not for production) | | `SMS_ALLOWED_USERS` | Comma-separated E.164 phone numbers allowed to chat | | `SMS_ALLOW_ALL_USERS` | Allow all SMS senders without an allowlist | | `SMS_HOME_CHANNEL` | Phone number for cron job / notification delivery | diff --git a/website/docs/user-guide/features/fallback-providers.md b/website/docs/user-guide/features/fallback-providers.md index 39c907c7ac..b539cb1279 100644 --- a/website/docs/user-guide/features/fallback-providers.md +++ b/website/docs/user-guide/features/fallback-providers.md @@ -50,6 +50,7 @@ Both `provider` and `model` are **required**. If either is missing, the fallback | OpenCode Zen | `opencode-zen` | `OPENCODE_ZEN_API_KEY` | | OpenCode Go | `opencode-go` | `OPENCODE_GO_API_KEY` | | Kilo Code | `kilocode` | `KILOCODE_API_KEY` | +| Xiaomi MiMo | `xiaomi` | `XIAOMI_API_KEY` | | Alibaba / DashScope | `alibaba` | `DASHSCOPE_API_KEY` | | Hugging Face | `huggingface` | `HF_TOKEN` | | Custom endpoint | `custom` | `base_url` + `api_key_env` (see below) | @@ -169,7 +170,7 @@ When a task's provider is set to `"auto"` (the default), Hermes tries providers ```text OpenRouter → Nous Portal → Custom endpoint → Codex OAuth → -API-key providers (z.ai, Kimi, MiniMax, Hugging Face, Anthropic) → give up +API-key providers (z.ai, Kimi, MiniMax, Xiaomi MiMo, Hugging Face, Anthropic) → give up ``` **For vision tasks:** diff --git a/website/docs/user-guide/messaging/index.md b/website/docs/user-guide/messaging/index.md index 335c6530bc..41b0314379 100644 --- a/website/docs/user-guide/messaging/index.md +++ b/website/docs/user-guide/messaging/index.md @@ -178,6 +178,8 @@ EMAIL_ALLOWED_USERS=trusted@example.com,colleague@work.com MATTERMOST_ALLOWED_USERS=3uo8dkh1p7g1mfk49ear5fzs5c MATRIX_ALLOWED_USERS=@alice:matrix.org DINGTALK_ALLOWED_USERS=user-id-1 +FEISHU_ALLOWED_USERS=ou_xxxxxxxx,ou_yyyyyyyy +WECOM_ALLOWED_USERS=user-id-1,user-id-2 # Or allow GATEWAY_ALLOWED_USERS=123456789,987654321 diff --git a/website/docs/user-guide/messaging/sms.md b/website/docs/user-guide/messaging/sms.md index 84a3b8fa2f..c5b28cd6fd 100644 --- a/website/docs/user-guide/messaging/sms.md +++ b/website/docs/user-guide/messaging/sms.md @@ -84,6 +84,13 @@ ngrok http 8080 Set the resulting public URL as your Twilio webhook. ::: +**Set `SMS_WEBHOOK_URL` to the same URL you configured in Twilio.** This is required for Twilio signature validation — the adapter will refuse to start without it: + +```bash +# Must match the webhook URL in your Twilio Console +SMS_WEBHOOK_URL=https://your-server:8080/webhooks/twilio +``` + The webhook port defaults to `8080`. Override with: ```bash @@ -101,9 +108,11 @@ hermes gateway You should see: ``` -[sms] Twilio webhook server listening on port 8080, from: +1555***4567 +[sms] Twilio webhook server listening on 0.0.0.0:8080, from: +1555***4567 ``` +If you see `Refusing to start: SMS_WEBHOOK_URL is required`, set `SMS_WEBHOOK_URL` to the public URL configured in your Twilio Console (see Step 3). + Text your Twilio number — Hermes will respond via SMS. --- @@ -113,9 +122,12 @@ Text your Twilio number — Hermes will respond via SMS. | Variable | Required | Description | |----------|----------|-------------| | `TWILIO_ACCOUNT_SID` | Yes | Twilio Account SID (starts with `AC`) | -| `TWILIO_AUTH_TOKEN` | Yes | Twilio Auth Token | +| `TWILIO_AUTH_TOKEN` | Yes | Twilio Auth Token (also used for webhook signature validation) | | `TWILIO_PHONE_NUMBER` | Yes | Your Twilio phone number (E.164 format) | +| `SMS_WEBHOOK_URL` | Yes | Public URL for Twilio signature validation — must match the webhook URL in your Twilio Console | | `SMS_WEBHOOK_PORT` | No | Webhook listener port (default: `8080`) | +| `SMS_WEBHOOK_HOST` | No | Webhook bind address (default: `0.0.0.0`) | +| `SMS_INSECURE_NO_SIGNATURE` | No | Set to `true` to disable signature validation (local dev only — **not for production**) | | `SMS_ALLOWED_USERS` | No | Comma-separated E.164 phone numbers allowed to chat | | `SMS_ALLOW_ALL_USERS` | No | Set to `true` to allow anyone (not recommended) | | `SMS_HOME_CHANNEL` | No | Phone number for cron job / notification delivery | @@ -134,6 +146,21 @@ Text your Twilio number — Hermes will respond via SMS. ## Security +### Webhook signature validation + +Hermes validates that inbound webhooks genuinely originate from Twilio by verifying the `X-Twilio-Signature` header (HMAC-SHA1). This prevents attackers from injecting forged messages. + +**`SMS_WEBHOOK_URL` is required.** Set it to the public URL configured in your Twilio Console. The adapter will refuse to start without it. + +For local development without a public URL, you can disable validation: + +```bash +# Local dev only — NOT for production +SMS_INSECURE_NO_SIGNATURE=true +``` + +### User allowlists + **The gateway denies all users by default.** Configure an allowlist: ```bash diff --git a/website/docs/user-guide/messaging/weixin.md b/website/docs/user-guide/messaging/weixin.md index 656081a22c..f658e0e233 100644 --- a/website/docs/user-guide/messaging/weixin.md +++ b/website/docs/user-guide/messaging/weixin.md @@ -66,6 +66,9 @@ WEIXIN_ACCOUNT_ID=your-account-id WEIXIN_DM_POLICY=open WEIXIN_ALLOWED_USERS=user_id_1,user_id_2 +# Optional: restore legacy multiline splitting behavior +# WEIXIN_SPLIT_MULTILINE_MESSAGES=true + # Optional: home channel for cron/notifications WEIXIN_HOME_CHANNEL=chat_id WEIXIN_HOME_CHANNEL_NAME=Home @@ -88,7 +91,7 @@ The adapter will restore saved credentials, connect to the iLink API, and begin - **AES-128-ECB encrypted CDN** — automatic encryption/decryption for all media transfers - **Context token persistence** — disk-backed reply continuity across restarts - **Markdown formatting** — headers, tables, and code blocks are reformatted for WeChat readability -- **Smart message chunking** — long messages are split at logical boundaries (paragraphs, code fences) +- **Smart message chunking** — messages stay as a single bubble when under the limit; only oversized payloads split at logical boundaries - **Typing indicators** — shows "typing…" status in the WeChat client while the agent processes - **SSRF protection** — outbound media URLs are validated before download - **Message deduplication** — 5-minute sliding window prevents double-processing @@ -108,6 +111,7 @@ Set these in `config.yaml` under `platforms.weixin.extra`: | `group_policy` | `disabled` | Group access: `open`, `allowlist`, `disabled` | | `allow_from` | `[]` | User IDs allowed for DMs (when dm_policy=allowlist) | | `group_allow_from` | `[]` | Group IDs allowed (when group_policy=allowlist) | +| `split_multiline_messages` | `false` | When `true`, split multi-line replies into multiple chat messages (legacy behavior). When `false`, keep multi-line replies as one message unless they exceed the length limit. | ## Access Policies @@ -211,13 +215,14 @@ WeChat's personal chat does not natively render full Markdown. The adapter refor ## Message Chunking -Long messages are split intelligently for chat delivery: +Messages are delivered as a single chat message whenever they fit within the platform limit. Only oversized payloads are split for delivery: - Maximum message length: **4000 characters** -- Split points prefer paragraph boundaries and blank lines -- Code fences are kept intact (never split mid-block) -- Indented continuation lines (sub-items in reformatted tables/lists) stay with their parent +- Messages under the limit stay intact even when they contain multiple paragraphs or line breaks +- Oversized messages split at logical boundaries (paragraphs, blank lines, code fences) +- Code fences are kept intact whenever possible (never split mid-block unless the fence itself exceeds the limit) - Oversized individual blocks fall back to the base adapter's truncation logic +- A 0.3 s inter-chunk delay prevents WeChat rate-limit drops when multiple chunks are sent ## Typing Indicators