diff --git a/Dockerfile b/Dockerfile index a9624530c0..0eddaba0bc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,8 @@ FROM debian:13.4 +# Disable Python stdout buffering to ensure logs are printed immediately +ENV PYTHONUNBUFFERED=1 + # Install system dependencies in one layer, clear APT cache RUN apt-get update && \ apt-get install -y --no-install-recommends \ diff --git a/README.md b/README.md index fde4cae334..b77cd6202f 100644 --- a/README.md +++ b/README.md @@ -33,8 +33,10 @@ Use any model you want — [Nous Portal](https://portal.nousresearch.com), [Open curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash ``` -Works on Linux, macOS, and WSL2. The installer handles everything — Python, Node.js, dependencies, and the `hermes` command. No prerequisites except git. +Works on Linux, macOS, WSL2, and Android via Termux. The installer handles the platform-specific setup for you. +> **Android / Termux:** The tested manual path is documented in the [Termux guide](https://hermes-agent.nousresearch.com/docs/getting-started/termux). On Termux, Hermes installs a curated `.[termux]` extra because the full `.[all]` extra currently pulls Android-incompatible voice dependencies. +> > **Windows:** Native Windows is not supported. Please install [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) and run the command above. After installation: diff --git a/agent/anthropic_adapter.py b/agent/anthropic_adapter.py index fa5e391a4f..d5c0c06fbb 100644 --- a/agent/anthropic_adapter.py +++ b/agent/anthropic_adapter.py @@ -1238,10 +1238,27 @@ def build_anthropic_kwargs( ) -> Dict[str, Any]: """Build kwargs for anthropic.messages.create(). - When *max_tokens* is None, the model's native output limit is used - (e.g. 128K for Opus 4.6, 64K for Sonnet 4.6). If *context_length* - is provided, the effective limit is clamped so it doesn't exceed - the context window. + Naming note — two distinct concepts, easily confused: + max_tokens = OUTPUT token cap for a single response. + Anthropic's API calls this "max_tokens" but it only + limits the *output*. Anthropic's own native SDK + renamed it "max_output_tokens" for clarity. + context_length = TOTAL context window (input tokens + output tokens). + The API enforces: input_tokens + max_tokens ≤ context_length. + Stored on the ContextCompressor; reduced on overflow errors. + + When *max_tokens* is None the model's native output ceiling is used + (e.g. 128K for Opus 4.6, 64K for Sonnet 4.6). + + When *context_length* is provided and the model's native output ceiling + exceeds it (e.g. a local endpoint with an 8K window), the output cap is + clamped to context_length − 1. This only kicks in for unusually small + context windows; for full-size models the native output cap is always + smaller than the context window so no clamping happens. + NOTE: this clamping does not account for prompt size — if the prompt is + large, Anthropic may still reject the request. The caller must detect + "max_tokens too large given prompt" errors and retry with a smaller cap + (see parse_available_output_tokens_from_error + _ephemeral_max_output_tokens). When *is_oauth* is True, applies Claude Code compatibility transforms: system prompt prefix, tool name prefixing, and prompt sanitization. @@ -1256,10 +1273,14 @@ def build_anthropic_kwargs( anthropic_tools = convert_tools_to_anthropic(tools) if tools else [] model = normalize_model_name(model, preserve_dots=preserve_dots) + # effective_max_tokens = output cap for this call (≠ total context window) effective_max_tokens = max_tokens or _get_anthropic_max_output(model) - # Clamp to context window if the user set a lower context_length - # (e.g. custom endpoint with limited capacity). + # Clamp output cap to fit inside the total context window. + # Only matters for small custom endpoints where context_length < native + # output ceiling. For standard Anthropic models context_length (e.g. + # 200K) is always larger than the output ceiling (e.g. 128K), so this + # branch is not taken. if context_length and effective_max_tokens > context_length: effective_max_tokens = max(context_length - 1, 1) diff --git a/agent/auxiliary_client.py b/agent/auxiliary_client.py index 27c67c10a3..a757f42699 100644 --- a/agent/auxiliary_client.py +++ b/agent/auxiliary_client.py @@ -702,7 +702,7 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]: logger.debug("Auxiliary text client: %s (%s) via pool", pconfig.name, model) extra = {} if "api.kimi.com" in base_url.lower(): - extra["default_headers"] = {"User-Agent": "KimiCLI/1.0"} + extra["default_headers"] = {"User-Agent": "KimiCLI/1.3"} elif "api.githubcopilot.com" in base_url.lower(): from hermes_cli.models import copilot_default_headers @@ -721,7 +721,7 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]: logger.debug("Auxiliary text client: %s (%s)", pconfig.name, model) extra = {} if "api.kimi.com" in base_url.lower(): - extra["default_headers"] = {"User-Agent": "KimiCLI/1.0"} + extra["default_headers"] = {"User-Agent": "KimiCLI/1.3"} elif "api.githubcopilot.com" in base_url.lower(): from hermes_cli.models import copilot_default_headers @@ -1047,6 +1047,32 @@ def _is_payment_error(exc: Exception) -> bool: return False +def _is_connection_error(exc: Exception) -> bool: + """Detect connection/network errors that warrant provider fallback. + + Returns True for errors indicating the provider endpoint is unreachable + (DNS failure, connection refused, TLS errors, timeouts). These are + distinct from API errors (4xx/5xx) which indicate the provider IS + reachable but returned an error. + """ + from openai import APIConnectionError, APITimeoutError + + if isinstance(exc, (APIConnectionError, APITimeoutError)): + return True + # urllib3 / httpx / httpcore connection errors + err_type = type(exc).__name__ + if any(kw in err_type for kw in ("Connection", "Timeout", "DNS", "SSL")): + return True + err_lower = str(exc).lower() + if any(kw in err_lower for kw in ( + "connection refused", "name or service not known", + "no route to host", "network is unreachable", + "timed out", "connection reset", + )): + return True + return False + + def _try_payment_fallback( failed_provider: str, task: str = None, @@ -1111,7 +1137,7 @@ def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]: main_model = _read_main_model() if (main_provider and main_model and main_provider not in _AGGREGATOR_PROVIDERS - and main_provider not in ("auto", "custom", "")): + and main_provider not in ("auto", "")): client, resolved = resolve_provider_client(main_provider, main_model) if client is not None: logger.info("Auxiliary auto-detect: using main provider %s (%s)", @@ -1169,7 +1195,7 @@ def _to_async_client(sync_client, model: str): async_kwargs["default_headers"] = copilot_default_headers() elif "api.kimi.com" in base_lower: - async_kwargs["default_headers"] = {"User-Agent": "KimiCLI/1.0"} + async_kwargs["default_headers"] = {"User-Agent": "KimiCLI/1.3"} return AsyncOpenAI(**async_kwargs), model @@ -1289,7 +1315,13 @@ def resolve_provider_client( ) return None, None final_model = model or _read_main_model() or "gpt-4o-mini" - client = OpenAI(api_key=custom_key, base_url=custom_base) + extra = {} + if "api.kimi.com" in custom_base.lower(): + extra["default_headers"] = {"User-Agent": "KimiCLI/1.3"} + elif "api.githubcopilot.com" in custom_base.lower(): + from hermes_cli.models import copilot_default_headers + extra["default_headers"] = copilot_default_headers() + client = OpenAI(api_key=custom_key, base_url=custom_base, **extra) return (_to_async_client(client, final_model) if async_mode else (client, final_model)) # Try custom first, then codex, then API-key providers @@ -1368,7 +1400,7 @@ def resolve_provider_client( # Provider-specific headers headers = {} if "api.kimi.com" in base_url.lower(): - headers["User-Agent"] = "KimiCLI/1.0" + headers["User-Agent"] = "KimiCLI/1.3" elif "api.githubcopilot.com" in base_url.lower(): from hermes_cli.models import copilot_default_headers @@ -2093,7 +2125,18 @@ def call_llm( # try alternative providers instead of giving up. This handles the # common case where a user runs out of OpenRouter credits but has # Codex OAuth or another provider available. - if _is_payment_error(first_err): + # + # ── Connection error fallback ──────────────────────────────── + # When a provider endpoint is unreachable (DNS failure, connection + # refused, timeout), try alternative providers. This handles stale + # Codex/OAuth tokens that authenticate but whose endpoint is down, + # and providers the user never configured that got picked up by + # the auto-detection chain. + should_fallback = _is_payment_error(first_err) or _is_connection_error(first_err) + if should_fallback: + reason = "payment error" if _is_payment_error(first_err) else "connection error" + logger.info("Auxiliary %s: %s on %s (%s), trying fallback", + task or "call", reason, resolved_provider, first_err) fb_client, fb_model, fb_label = _try_payment_fallback( resolved_provider, task) if fb_client is not None: diff --git a/agent/context_compressor.py b/agent/context_compressor.py index c61cf2c5a7..eba2de3f3f 100644 --- a/agent/context_compressor.py +++ b/agent/context_compressor.py @@ -691,33 +691,43 @@ Write only the summary body. Do not include any preamble or prefix.""" ) compressed.append(msg) - _merge_summary_into_tail = False - if summary: - last_head_role = messages[compress_start - 1].get("role", "user") if compress_start > 0 else "user" - first_tail_role = messages[compress_end].get("role", "user") if compress_end < n_messages else "user" - # Pick a role that avoids consecutive same-role with both neighbors. - # Priority: avoid colliding with head (already committed), then tail. - if last_head_role in ("assistant", "tool"): - summary_role = "user" - else: - summary_role = "assistant" - # If the chosen role collides with the tail AND flipping wouldn't - # collide with the head, flip it. - if summary_role == first_tail_role: - flipped = "assistant" if summary_role == "user" else "user" - if flipped != last_head_role: - summary_role = flipped - else: - # Both roles would create consecutive same-role messages - # (e.g. head=assistant, tail=user — neither role works). - # Merge the summary into the first tail message instead - # of inserting a standalone message that breaks alternation. - _merge_summary_into_tail = True - if not _merge_summary_into_tail: - compressed.append({"role": summary_role, "content": summary}) - else: + # If LLM summary failed, insert a static fallback so the model + # knows context was lost rather than silently dropping everything. + if not summary: if not self.quiet_mode: - logger.debug("No summary model available — middle turns dropped without summary") + logger.warning("Summary generation failed — inserting static fallback context marker") + n_dropped = compress_end - compress_start + summary = ( + f"{SUMMARY_PREFIX}\n" + f"Summary generation was unavailable. {n_dropped} conversation turns were " + f"removed to free context space but could not be summarized. The removed " + f"turns contained earlier work in this session. Continue based on the " + f"recent messages below and the current state of any files or resources." + ) + + _merge_summary_into_tail = False + last_head_role = messages[compress_start - 1].get("role", "user") if compress_start > 0 else "user" + first_tail_role = messages[compress_end].get("role", "user") if compress_end < n_messages else "user" + # Pick a role that avoids consecutive same-role with both neighbors. + # Priority: avoid colliding with head (already committed), then tail. + if last_head_role in ("assistant", "tool"): + summary_role = "user" + else: + summary_role = "assistant" + # If the chosen role collides with the tail AND flipping wouldn't + # collide with the head, flip it. + if summary_role == first_tail_role: + flipped = "assistant" if summary_role == "user" else "user" + if flipped != last_head_role: + summary_role = flipped + else: + # Both roles would create consecutive same-role messages + # (e.g. head=assistant, tail=user — neither role works). + # Merge the summary into the first tail message instead + # of inserting a standalone message that breaks alternation. + _merge_summary_into_tail = True + if not _merge_summary_into_tail: + compressed.append({"role": summary_role, "content": summary}) for i in range(compress_end, n_messages): msg = messages[i].copy() diff --git a/agent/credential_pool.py b/agent/credential_pool.py index dd2c9abc5e..a17d71ba5e 100644 --- a/agent/credential_pool.py +++ b/agent/credential_pool.py @@ -18,12 +18,14 @@ import hermes_cli.auth as auth_mod from hermes_cli.auth import ( CODEX_ACCESS_TOKEN_REFRESH_SKEW_SECONDS, DEFAULT_AGENT_KEY_MIN_TTL_SECONDS, + KIMI_CODE_BASE_URL, PROVIDER_REGISTRY, _codex_access_token_is_expiring, _decode_jwt_claims, _import_codex_cli_tokens, _load_auth_store, _load_provider_state, + _resolve_kimi_base_url, _resolve_zai_base_url, read_credential_pool, write_credential_pool, @@ -1084,7 +1086,9 @@ def _seed_from_env(provider: str, entries: List[PooledCredential]) -> Tuple[bool active_sources.add(source) auth_type = AUTH_TYPE_OAUTH if provider == "anthropic" and not token.startswith("sk-ant-api") else AUTH_TYPE_API_KEY base_url = env_url or pconfig.inference_base_url - if provider == "zai": + if provider == "kimi-coding": + base_url = _resolve_kimi_base_url(token, pconfig.inference_base_url, env_url) + elif provider == "zai": base_url = _resolve_zai_base_url(token, pconfig.inference_base_url, env_url) changed |= _upsert_entry( entries, diff --git a/agent/error_classifier.py b/agent/error_classifier.py index b227932ad7..1f6b48a095 100644 --- a/agent/error_classifier.py +++ b/agent/error_classifier.py @@ -596,6 +596,9 @@ def _classify_400( err_obj = body.get("error", {}) if isinstance(err_obj, dict): err_body_msg = (err_obj.get("message") or "").strip().lower() + # Responses API (and some providers) use flat body: {"message": "..."} + if not err_body_msg: + err_body_msg = (body.get("message") or "").strip().lower() is_generic = len(err_body_msg) < 30 or err_body_msg in ("error", "") is_large = approx_tokens > context_length * 0.4 or approx_tokens > 80000 or num_messages > 80 @@ -674,6 +677,27 @@ def _classify_by_message( should_compress=True, ) + # Usage-limit patterns need the same disambiguation as 402: some providers + # surface "usage limit" errors without an HTTP status code. A transient + # signal ("try again", "resets at", …) means it's a periodic quota, not + # billing exhaustion. + has_usage_limit = any(p in error_msg for p in _USAGE_LIMIT_PATTERNS) + if has_usage_limit: + has_transient_signal = any(p in error_msg for p in _USAGE_LIMIT_TRANSIENT_SIGNALS) + if has_transient_signal: + return result_fn( + FailoverReason.rate_limit, + retryable=True, + should_rotate_credential=True, + should_fallback=True, + ) + return result_fn( + FailoverReason.billing, + retryable=False, + should_rotate_credential=True, + should_fallback=True, + ) + # Billing patterns if any(p in error_msg for p in _BILLING_PATTERNS): return result_fn( diff --git a/agent/model_metadata.py b/agent/model_metadata.py index 9282586fea..791f778c22 100644 --- a/agent/model_metadata.py +++ b/agent/model_metadata.py @@ -603,6 +603,49 @@ def parse_context_limit_from_error(error_msg: str) -> Optional[int]: return None +def parse_available_output_tokens_from_error(error_msg: str) -> Optional[int]: + """Detect an "output cap too large" error and return how many output tokens are available. + + Background — two distinct context errors exist: + 1. "Prompt too long" — the INPUT itself exceeds the context window. + Fix: compress history and/or halve context_length. + 2. "max_tokens too large" — input is fine, but input + requested_output > window. + Fix: reduce max_tokens (the output cap) for this call. + Do NOT touch context_length — the window hasn't shrunk. + + Anthropic's API returns errors like: + "max_tokens: 32768 > context_window: 200000 - input_tokens: 190000 = available_tokens: 10000" + + Returns the number of output tokens that would fit (e.g. 10000 above), or None if + the error does not look like a max_tokens-too-large error. + """ + error_lower = error_msg.lower() + + # Must look like an output-cap error, not a prompt-length error. + is_output_cap_error = ( + "max_tokens" in error_lower + and ("available_tokens" in error_lower or "available tokens" in error_lower) + ) + if not is_output_cap_error: + return None + + # Extract the available_tokens figure. + # Anthropic format: "… = available_tokens: 10000" + patterns = [ + r'available_tokens[:\s]+(\d+)', + r'available\s+tokens[:\s]+(\d+)', + # fallback: last number after "=" in expressions like "200000 - 190000 = 10000" + r'=\s*(\d+)\s*$', + ] + for pattern in patterns: + match = re.search(pattern, error_lower) + if match: + tokens = int(match.group(1)) + if tokens >= 1: + return tokens + return None + + def _model_id_matches(candidate_id: str, lookup_model: str) -> bool: """Return True if *candidate_id* (from server) matches *lookup_model* (configured). diff --git a/batch_runner.py b/batch_runner.py index 32cd203b24..195452c0ae 100644 --- a/batch_runner.py +++ b/batch_runner.py @@ -1158,7 +1158,7 @@ def main( providers_order (str): Comma-separated list of OpenRouter providers to try in order (e.g. "anthropic,openai,google") provider_sort (str): Sort providers by "price", "throughput", or "latency" (OpenRouter only) max_tokens (int): Maximum tokens for model responses (optional, uses model default if not set) - reasoning_effort (str): OpenRouter reasoning effort level: "xhigh", "high", "medium", "low", "minimal", "none" (default: "medium") + reasoning_effort (str): OpenRouter reasoning effort level: "none", "minimal", "low", "medium", "high", "xhigh" (default: "medium") reasoning_disabled (bool): Completely disable reasoning/thinking tokens (default: False) prefill_messages_file (str): Path to JSON file containing prefill messages (list of {role, content} dicts) max_samples (int): Only process the first N samples from the dataset (optional, processes all if not set) @@ -1227,7 +1227,7 @@ def main( print("🧠 Reasoning: DISABLED (effort=none)") elif reasoning_effort: # Use specified effort level - valid_efforts = ["xhigh", "high", "medium", "low", "minimal", "none"] + valid_efforts = ["none", "minimal", "low", "medium", "high", "xhigh"] if reasoning_effort not in valid_efforts: print(f"❌ Error: --reasoning_effort must be one of: {', '.join(valid_efforts)}") return diff --git a/cli-config.yaml.example b/cli-config.yaml.example index d75284443f..346e6e851f 100644 --- a/cli-config.yaml.example +++ b/cli-config.yaml.example @@ -48,6 +48,25 @@ model: # api_key: "your-key-here" # Uncomment to set here instead of .env base_url: "https://openrouter.ai/api/v1" + # ── Token limits — two settings, easy to confuse ────────────────────────── + # + # context_length: TOTAL context window (input + output tokens combined). + # Controls when Hermes compresses history and validates requests. + # Leave unset — Hermes auto-detects the correct value from the provider. + # Set manually only when auto-detection is wrong (e.g. a local server with + # a custom num_ctx, or a proxy that doesn't expose /v1/models). + # + # context_length: 131072 + # + # max_tokens: OUTPUT cap — maximum tokens the model may generate per response. + # Unrelated to how long your conversation history can be. + # The OpenAI-standard name "max_tokens" is a misnomer; Anthropic's native + # API has since renamed it "max_output_tokens" for clarity. + # Leave unset to use the model's native output ceiling (recommended). + # Set only if you want to deliberately limit individual response length. + # + # max_tokens: 8192 + # ============================================================================= # OpenRouter Provider Routing (only applies when using OpenRouter) # ============================================================================= diff --git a/cli.py b/cli.py index 8b5bfea2d0..6303e54f7c 100644 --- a/cli.py +++ b/cli.py @@ -1046,7 +1046,7 @@ def _cprint(text: str): # --------------------------------------------------------------------------- -# File-drop detection — extracted as a pure function for testability. +# File-drop / local attachment detection — extracted as pure helpers for tests. # --------------------------------------------------------------------------- _IMAGE_EXTENSIONS = frozenset({ @@ -1055,12 +1055,103 @@ _IMAGE_EXTENSIONS = frozenset({ }) -def _detect_file_drop(user_input: str) -> "dict | None": - """Detect if *user_input* is a dragged/pasted file path, not a slash command. +from hermes_constants import is_termux as _is_termux_environment - When a user drags a file into the terminal, macOS pastes the absolute path - (e.g. ``/Users/roland/Desktop/file.png``) which starts with ``/`` and would - otherwise be mistaken for a slash command. + +def _termux_example_image_path(filename: str = "cat.png") -> str: + """Return a realistic example media path for the current Termux setup.""" + candidates = [ + os.path.expanduser("~/storage/shared"), + "/sdcard", + "/storage/emulated/0", + "/storage/self/primary", + ] + for root in candidates: + if os.path.isdir(root): + return os.path.join(root, "Pictures", filename) + return os.path.join("~/storage/shared", "Pictures", filename) + + +def _split_path_input(raw: str) -> tuple[str, str]: + """Split a leading file path token from trailing free-form text. + + Supports quoted paths and backslash-escaped spaces so callers can accept + inputs like: + /tmp/pic.png describe this + ~/storage/shared/My\ Photos/cat.png what is this? + "/storage/emulated/0/DCIM/Camera/cat 1.png" summarize + """ + raw = str(raw or "").strip() + if not raw: + return "", "" + + if raw[0] in {'"', "'"}: + quote = raw[0] + pos = 1 + while pos < len(raw): + ch = raw[pos] + if ch == '\\' and pos + 1 < len(raw): + pos += 2 + continue + if ch == quote: + token = raw[1:pos] + remainder = raw[pos + 1 :].strip() + return token, remainder + pos += 1 + return raw[1:], "" + + pos = 0 + while pos < len(raw): + ch = raw[pos] + if ch == '\\' and pos + 1 < len(raw) and raw[pos + 1] == ' ': + pos += 2 + elif ch == ' ': + break + else: + pos += 1 + + token = raw[:pos].replace('\\ ', ' ') + remainder = raw[pos:].strip() + return token, remainder + + +def _resolve_attachment_path(raw_path: str) -> Path | None: + """Resolve a user-supplied local attachment path. + + Accepts quoted or unquoted paths, expands ``~`` and env vars, and resolves + relative paths from ``TERMINAL_CWD`` when set (matching terminal tool cwd). + Returns ``None`` when the path does not resolve to an existing file. + """ + token = str(raw_path or "").strip() + if not token: + return None + + if (token.startswith('"') and token.endswith('"')) or (token.startswith("'") and token.endswith("'")): + token = token[1:-1].strip() + if not token: + return None + + expanded = os.path.expandvars(os.path.expanduser(token)) + path = Path(expanded) + if not path.is_absolute(): + base_dir = Path(os.getenv("TERMINAL_CWD", os.getcwd())) + path = base_dir / path + + try: + resolved = path.resolve() + except Exception: + resolved = path + + if not resolved.exists() or not resolved.is_file(): + return None + return resolved + + +def _detect_file_drop(user_input: str) -> "dict | None": + """Detect if *user_input* starts with a real local file path. + + This catches dragged/pasted paths before they are mistaken for slash + commands, and also supports Termux-friendly paths like ``~/storage/...``. Returns a dict on match:: @@ -1072,29 +1163,31 @@ def _detect_file_drop(user_input: str) -> "dict | None": Returns ``None`` when the input is not a real file path. """ - if not isinstance(user_input, str) or not user_input.startswith("/"): + if not isinstance(user_input, str): return None - # Walk the string absorbing backslash-escaped spaces ("\ "). - raw = user_input - pos = 0 - while pos < len(raw): - ch = raw[pos] - if ch == '\\' and pos + 1 < len(raw) and raw[pos + 1] == ' ': - pos += 2 # skip escaped space - elif ch == ' ': - break - else: - pos += 1 - - first_token_raw = raw[:pos] - first_token = first_token_raw.replace('\\ ', ' ') - drop_path = Path(first_token) - - if not drop_path.exists() or not drop_path.is_file(): + stripped = user_input.strip() + if not stripped: + return None + + starts_like_path = ( + stripped.startswith("/") + or stripped.startswith("~") + or stripped.startswith("./") + or stripped.startswith("../") + or stripped.startswith('"/') + or stripped.startswith('"~') + or stripped.startswith("'/") + or stripped.startswith("'~") + ) + if not starts_like_path: + return None + + first_token, remainder = _split_path_input(stripped) + drop_path = _resolve_attachment_path(first_token) + if drop_path is None: return None - remainder = raw[pos:].strip() return { "path": drop_path, "is_image": drop_path.suffix.lower() in _IMAGE_EXTENSIONS, @@ -1102,6 +1195,69 @@ def _detect_file_drop(user_input: str) -> "dict | None": } +def _format_image_attachment_badges(attached_images: list[Path], image_counter: int, width: int | None = None) -> str: + """Format the attached-image badge row for the interactive CLI. + + Narrow terminals such as Termux should get a compact summary that fits on a + single row, while wider terminals can show the classic per-image badges. + """ + if not attached_images: + return "" + + width = width or shutil.get_terminal_size((80, 24)).columns + + def _trunc(name: str, limit: int) -> str: + return name if len(name) <= limit else name[: max(1, limit - 3)] + "..." + + if width < 52: + if len(attached_images) == 1: + return f"[📎 {_trunc(attached_images[0].name, 20)}]" + return f"[📎 {len(attached_images)} images attached]" + + if width < 80: + if len(attached_images) == 1: + return f"[📎 {_trunc(attached_images[0].name, 32)}]" + first = _trunc(attached_images[0].name, 20) + extra = len(attached_images) - 1 + return f"[📎 {first}] [+{extra}]" + + base = image_counter - len(attached_images) + 1 + return " ".join( + f"[📎 Image #{base + i}]" + for i in range(len(attached_images)) + ) + + +def _collect_query_images(query: str | None, image_arg: str | None = None) -> tuple[str, list[Path]]: + """Collect local image attachments for single-query CLI flows.""" + message = query or "" + images: list[Path] = [] + + if isinstance(message, str): + dropped = _detect_file_drop(message) + if dropped and dropped.get("is_image"): + images.append(dropped["path"]) + message = dropped["remainder"] or f"[User attached image: {dropped['path'].name}]" + + if image_arg: + explicit_path = _resolve_attachment_path(image_arg) + if explicit_path is None: + raise ValueError(f"Image file not found: {image_arg}") + if explicit_path.suffix.lower() not in _IMAGE_EXTENSIONS: + raise ValueError(f"Not a supported image file: {explicit_path}") + images.append(explicit_path) + + deduped: list[Path] = [] + seen: set[str] = set() + for img in images: + key = str(img) + if key in seen: + continue + seen.add(key) + deduped.append(img) + return message, deduped + + class ChatConsole: """Rich Console adapter for prompt_toolkit's patch_stdout context. @@ -1641,7 +1797,12 @@ class HermesCLI: return f"[{('█' * filled) + ('░' * max(0, width - filled))}]" def _get_status_bar_snapshot(self) -> Dict[str, Any]: - model_name = self.model or "unknown" + # Prefer the agent's model name — it updates on fallback. + # self.model reflects the originally configured model and never + # changes mid-session, so the TUI would show a stale name after + # _try_activate_fallback() switches provider/model. + agent = getattr(self, "agent", None) + model_name = (getattr(agent, "model", None) or self.model or "unknown") model_short = model_name.split("/")[-1] if "/" in model_name else model_name if model_short.endswith(".gguf"): model_short = model_short[:-5] @@ -1667,7 +1828,6 @@ class HermesCLI: "compressions": 0, } - agent = getattr(self, "agent", None) if not agent: return snapshot @@ -1735,15 +1895,70 @@ class HermesCLI: width += ch_width return "".join(out).rstrip() + ellipsis + @staticmethod + def _get_tui_terminal_width(default: tuple[int, int] = (80, 24)) -> int: + """Return the live prompt_toolkit width, falling back to ``shutil``. + + The TUI layout can be narrower than ``shutil.get_terminal_size()`` reports, + especially on Termux/mobile shells, so prefer prompt_toolkit's width whenever + an app is active. + """ + try: + from prompt_toolkit.application import get_app + return get_app().output.get_size().columns + except Exception: + return shutil.get_terminal_size(default).columns + + def _use_minimal_tui_chrome(self, width: Optional[int] = None) -> bool: + """Hide low-value chrome on narrow/mobile terminals to preserve rows.""" + if width is None: + width = self._get_tui_terminal_width() + return width < 64 + + def _tui_input_rule_height(self, position: str, width: Optional[int] = None) -> int: + """Return the visible height for the top/bottom input separator rules.""" + if position not in {"top", "bottom"}: + raise ValueError(f"Unknown input rule position: {position}") + if position == "top": + return 1 + return 0 if self._use_minimal_tui_chrome(width=width) else 1 + + def _agent_spacer_height(self, width: Optional[int] = None) -> int: + """Return the spacer height shown above the status bar while the agent runs.""" + if not getattr(self, "_agent_running", False): + return 0 + return 0 if self._use_minimal_tui_chrome(width=width) else 1 + + def _spinner_widget_height(self, width: Optional[int] = None) -> int: + """Return the visible height for the spinner/status text line above the status bar.""" + if not getattr(self, "_spinner_text", ""): + return 0 + return 0 if self._use_minimal_tui_chrome(width=width) else 1 + + def _get_voice_status_fragments(self, width: Optional[int] = None): + """Return the voice status bar fragments for the interactive TUI.""" + width = width or self._get_tui_terminal_width() + compact = self._use_minimal_tui_chrome(width=width) + if self._voice_recording: + if compact: + return [("class:voice-status-recording", " ● REC ")] + return [("class:voice-status-recording", " ● REC Ctrl+B to stop ")] + if self._voice_processing: + if compact: + return [("class:voice-status", " ◉ STT ")] + return [("class:voice-status", " ◉ Transcribing... ")] + if compact: + return [("class:voice-status", " 🎤 Ctrl+B ")] + tts = " | TTS on" if self._voice_tts else "" + cont = " | Continuous" if self._voice_continuous else "" + return [("class:voice-status", f" 🎤 Voice mode{tts}{cont} — Ctrl+B to record ")] + def _build_status_bar_text(self, width: Optional[int] = None) -> str: + """Return a compact one-line session status string for the TUI footer.""" try: snapshot = self._get_status_bar_snapshot() if width is None: - try: - from prompt_toolkit.application import get_app - width = get_app().output.get_size().columns - except Exception: - width = shutil.get_terminal_size((80, 24)).columns + width = self._get_tui_terminal_width() percent = snapshot["context_percent"] percent_label = f"{percent}%" if percent is not None else "--" duration_label = snapshot["duration"] @@ -1779,11 +1994,7 @@ class HermesCLI: # values (especially on SSH) that differ from what prompt_toolkit # actually renders, causing the fragments to overflow to a second # line and produce duplicated status bar rows over long sessions. - try: - from prompt_toolkit.application import get_app - width = get_app().output.get_size().columns - except Exception: - width = shutil.get_terminal_size((80, 24)).columns + width = self._get_tui_terminal_width() duration_label = snapshot["duration"] if width < 52: @@ -2985,6 +3196,14 @@ class HermesCLI: doesn't fire for image-only clipboard content (e.g., VSCode terminal, Windows Terminal with WSL2). """ + if _is_termux_environment(): + _cprint( + f" {_DIM}Clipboard image paste is not available on Termux — " + f"use /image or paste a local image path like " + f"{_termux_example_image_path()}{_RST}" + ) + return + from hermes_cli.clipboard import has_clipboard_image if has_clipboard_image(): if self._try_attach_clipboard_image(): @@ -2995,6 +3214,7 @@ class HermesCLI: else: _cprint(f" {_DIM}(._.) No image found in clipboard{_RST}") +<<<<<<< HEAD def _write_osc52_clipboard(self, text: str) -> None: """Copy *text* to terminal clipboard via OSC 52.""" payload = base64.b64encode(text.encode("utf-8")).decode("ascii") @@ -3051,6 +3271,33 @@ class HermesCLI: _cprint(f" Clipboard copy failed: {e}") def _preprocess_images_with_vision(self, text: str, images: list) -> str: +======= + def _handle_image_command(self, cmd_original: str): + """Handle /image — attach a local image file for the next prompt.""" + raw_args = (cmd_original.split(None, 1)[1].strip() if " " in cmd_original else "") + if not raw_args: + hint = _termux_example_image_path() if _is_termux_environment() else "/path/to/image.png" + _cprint(f" {_DIM}Usage: /image e.g. /image {hint}{_RST}") + return + + path_token, _remainder = _split_path_input(raw_args) + image_path = _resolve_attachment_path(path_token) + if image_path is None: + _cprint(f" {_DIM}(>_<) File not found: {path_token}{_RST}") + return + if image_path.suffix.lower() not in _IMAGE_EXTENSIONS: + _cprint(f" {_DIM}(._.) Not a supported image file: {image_path.name}{_RST}") + return + + self._attached_images.append(image_path) + _cprint(f" 📎 Attached image: {image_path.name}") + if _remainder: + _cprint(f" {_DIM}Now type your prompt (or use --image in single-query mode): {_remainder}{_RST}") + elif _is_termux_environment(): + _cprint(f" {_DIM}Tip: type your next message, or run hermes chat -q --image {_termux_example_image_path(image_path.name)} \"What do you see?\"{_RST}") + + def _preprocess_images_with_vision(self, text: str, images: list, *, announce: bool = True) -> str: +>>>>>>> main """Analyze attached images via the vision tool and return enriched text. Instead of embedding raw base64 ``image_url`` content parts in the @@ -3077,7 +3324,8 @@ class HermesCLI: if not img_path.exists(): continue size_kb = img_path.stat().st_size // 1024 - _cprint(f" {_DIM}👁️ analyzing {img_path.name} ({size_kb}KB)...{_RST}") + if announce: + _cprint(f" {_DIM}👁️ analyzing {img_path.name} ({size_kb}KB)...{_RST}") try: result_json = _asyncio.run( vision_analyze_tool(image_url=str(img_path), user_prompt=analysis_prompt) @@ -3090,21 +3338,24 @@ class HermesCLI: f"[If you need a closer look, use vision_analyze with " f"image_url: {img_path}]" ) - _cprint(f" {_DIM}✓ image analyzed{_RST}") + if announce: + _cprint(f" {_DIM}✓ image analyzed{_RST}") else: enriched_parts.append( f"[The user attached an image but it couldn't be analyzed. " f"You can try examining it with vision_analyze using " f"image_url: {img_path}]" ) - _cprint(f" {_DIM}⚠ vision analysis failed — path included for retry{_RST}") + if announce: + _cprint(f" {_DIM}⚠ vision analysis failed — path included for retry{_RST}") except Exception as e: enriched_parts.append( f"[The user attached an image but analysis failed ({e}). " f"You can try examining it with vision_analyze using " f"image_url: {img_path}]" ) - _cprint(f" {_DIM}⚠ vision analysis error — path included for retry{_RST}") + if announce: + _cprint(f" {_DIM}⚠ vision analysis error — path included for retry{_RST}") # Combine: vision descriptions first, then the user's original text user_text = text if isinstance(text, str) and text else "" @@ -3198,7 +3449,10 @@ class HermesCLI: _cprint(f"\n {_DIM}Tip: Just type your message to chat with Hermes!{_RST}") _cprint(f" {_DIM}Multi-line: Alt+Enter for a new line{_RST}") - _cprint(f" {_DIM}Paste image: Alt+V (or /paste){_RST}\n") + if _is_termux_environment(): + _cprint(f" {_DIM}Attach image: /image {_termux_example_image_path()} or start your prompt with a local image path{_RST}\n") + else: + _cprint(f" {_DIM}Paste image: Alt+V (or /paste){_RST}\n") def show_tools(self): """Display available tools with kawaii ASCII art.""" @@ -4102,59 +4356,7 @@ class HermesCLI: print(" To change model or provider, use: hermes model") - def _handle_prompt_command(self, cmd: str): - """Handle the /prompt command to view or set system prompt.""" - parts = cmd.split(maxsplit=1) - - if len(parts) > 1: - # Set new prompt - new_prompt = parts[1].strip() - - if new_prompt.lower() == "clear": - self.system_prompt = "" - self.agent = None # Force re-init - if save_config_value("agent.system_prompt", ""): - print("(^_^)b System prompt cleared (saved to config)") - else: - print("(^_^) System prompt cleared (session only)") - else: - self.system_prompt = new_prompt - self.agent = None # Force re-init - if save_config_value("agent.system_prompt", new_prompt): - print("(^_^)b System prompt set (saved to config)") - else: - print("(^_^) System prompt set (session only)") - print(f" \"{new_prompt[:60]}{'...' if len(new_prompt) > 60 else ''}\"") - else: - # Show current prompt - print() - print("+" + "-" * 50 + "+") - print("|" + " " * 15 + "(^_^) System Prompt" + " " * 15 + "|") - print("+" + "-" * 50 + "+") - print() - if self.system_prompt: - # Word wrap the prompt for display - words = self.system_prompt.split() - lines = [] - current_line = "" - for word in words: - if len(current_line) + len(word) + 1 <= 50: - current_line += (" " if current_line else "") + word - else: - lines.append(current_line) - current_line = word - if current_line: - lines.append(current_line) - for line in lines: - print(f" {line}") - else: - print(" (no custom prompt set - using default)") - print() - print(" Usage:") - print(" /prompt - Set a custom system prompt") - print(" /prompt clear - Remove custom prompt") - print(" /personality - Use a predefined personality") - print() + @staticmethod @@ -4654,9 +4856,7 @@ class HermesCLI: self._handle_model_switch(cmd_original) elif canonical == "provider": self._show_model_and_providers() - elif canonical == "prompt": - # Use original case so prompt text isn't lowercased - self._handle_prompt_command(cmd_original) + elif canonical == "personality": # Use original case (handler lowercases the personality name itself) self._handle_personality_command(cmd_original) @@ -4700,6 +4900,8 @@ class HermesCLI: self._handle_copy_command(cmd_original) elif canonical == "paste": self._handle_paste_command() + elif canonical == "image": + self._handle_image_command(cmd_original) elif canonical == "reload-mcp": with self._busy_command(self._slow_command_status(cmd_original)): self._reload_mcp() @@ -5140,6 +5342,9 @@ class HermesCLI: def _try_launch_chrome_debug(port: int, system: str) -> bool: """Try to launch Chrome/Chromium with remote debugging enabled. + Uses a dedicated user-data-dir so the debug instance doesn't conflict + with an already-running Chrome using the default profile. + Returns True if a launch command was executed (doesn't guarantee success). """ import subprocess as _sp @@ -5149,10 +5354,20 @@ class HermesCLI: if not candidates: return False + # Dedicated profile dir so debug Chrome won't collide with normal Chrome + data_dir = str(_hermes_home / "chrome-debug") + os.makedirs(data_dir, exist_ok=True) + chrome = candidates[0] try: _sp.Popen( - [chrome, f"--remote-debugging-port={port}"], + [ + chrome, + f"--remote-debugging-port={port}", + f"--user-data-dir={data_dir}", + "--no-first-run", + "--no-default-browser-check", + ], stdout=_sp.DEVNULL, stderr=_sp.DEVNULL, start_new_session=True, # detach from terminal @@ -5227,18 +5442,33 @@ class HermesCLI: print(f" ✓ Chrome launched and listening on port {_port}") else: print(f" ⚠ Chrome launched but port {_port} isn't responding yet") - print(" You may need to close existing Chrome windows first and retry") + print(" Try again in a few seconds — the debug instance may still be starting") else: print(" ⚠ Could not auto-launch Chrome") # Show manual instructions as fallback + _data_dir = str(_hermes_home / "chrome-debug") sys_name = _plat.system() if sys_name == "Darwin": - chrome_cmd = 'open -a "Google Chrome" --args --remote-debugging-port=9222' + chrome_cmd = ( + 'open -a "Google Chrome" --args' + f" --remote-debugging-port=9222" + f' --user-data-dir="{_data_dir}"' + " --no-first-run --no-default-browser-check" + ) elif sys_name == "Windows": - chrome_cmd = 'chrome.exe --remote-debugging-port=9222' + chrome_cmd = ( + f'chrome.exe --remote-debugging-port=9222' + f' --user-data-dir="{_data_dir}"' + f" --no-first-run --no-default-browser-check" + ) else: - chrome_cmd = "google-chrome --remote-debugging-port=9222" - print(f" Launch Chrome manually: {chrome_cmd}") + chrome_cmd = ( + f"google-chrome --remote-debugging-port=9222" + f' --user-data-dir="{_data_dir}"' + f" --no-first-run --no-default-browser-check" + ) + print(f" Launch Chrome manually:") + print(f" {chrome_cmd}") else: print(f" ⚠ Port {_port} is not reachable at {cdp_url}") @@ -5411,7 +5641,7 @@ class HermesCLI: Usage: /reasoning Show current effort level and display state - /reasoning Set reasoning effort (none, low, medium, high, xhigh) + /reasoning Set reasoning effort (none, minimal, low, medium, high, xhigh) /reasoning show|on Show model thinking/reasoning in output /reasoning hide|off Hide model thinking/reasoning from output """ @@ -5429,7 +5659,7 @@ class HermesCLI: display_state = "on ✓" if self.show_reasoning else "off" _cprint(f" {_GOLD}Reasoning effort: {level}{_RST}") _cprint(f" {_GOLD}Reasoning display: {display_state}{_RST}") - _cprint(f" {_DIM}Usage: /reasoning {_RST}") + _cprint(f" {_DIM}Usage: /reasoning {_RST}") return arg = parts[1].strip().lower() @@ -5455,7 +5685,7 @@ class HermesCLI: parsed = _parse_reasoning_config(arg) if parsed is None: _cprint(f" {_DIM}(._.) Unknown argument: {arg}{_RST}") - _cprint(f" {_DIM}Valid levels: none, low, minimal, medium, high, xhigh{_RST}") + _cprint(f" {_DIM}Valid levels: none, minimal, low, medium, high, xhigh{_RST}") _cprint(f" {_DIM}Display: show, hide{_RST}") return @@ -5867,10 +6097,23 @@ class HermesCLI: """Start capturing audio from the microphone.""" if getattr(self, '_should_exit', False): return - from tools.voice_mode import AudioRecorder, check_voice_requirements + from tools.voice_mode import create_audio_recorder, check_voice_requirements reqs = check_voice_requirements() if not reqs["audio_available"]: + if _is_termux_environment(): + details = reqs.get("details", "") + if "Termux:API Android app is not installed" in details: + raise RuntimeError( + "Termux:API command package detected, but the Android app is missing.\n" + "Install/update the Termux:API Android app, then retry /voice on.\n" + "Fallback: pkg install python-numpy portaudio && python -m pip install sounddevice" + ) + raise RuntimeError( + "Voice mode requires either Termux:API microphone access or Python audio libraries.\n" + "Option 1: pkg install termux-api and install the Termux:API Android app\n" + "Option 2: pkg install python-numpy portaudio && python -m pip install sounddevice" + ) raise RuntimeError( "Voice mode requires sounddevice and numpy.\n" "Install with: pip install sounddevice numpy\n" @@ -5899,7 +6142,7 @@ class HermesCLI: pass if self._voice_recorder is None: - self._voice_recorder = AudioRecorder() + self._voice_recorder = create_audio_recorder() # Apply config-driven silence params self._voice_recorder._silence_threshold = voice_cfg.get("silence_threshold", 200) @@ -5928,7 +6171,13 @@ class HermesCLI: with self._voice_lock: self._voice_recording = False raise - _cprint(f"\n{_GOLD}● Recording...{_RST} {_DIM}(auto-stops on silence | Ctrl+B to stop & exit continuous){_RST}") + if getattr(self._voice_recorder, "supports_silence_autostop", True): + _recording_hint = "auto-stops on silence | Ctrl+B to stop & exit continuous" + elif _is_termux_environment(): + _recording_hint = "Termux:API capture | Ctrl+B to stop" + else: + _recording_hint = "Ctrl+B to stop" + _cprint(f"\n{_GOLD}● Recording...{_RST} {_DIM}({_recording_hint}){_RST}") # Periodically refresh prompt to update audio level indicator def _refresh_level(): @@ -6136,8 +6385,13 @@ class HermesCLI: for line in reqs["details"].split("\n"): _cprint(f" {_DIM}{line}{_RST}") if reqs["missing_packages"]: - _cprint(f"\n {_BOLD}Install: pip install {' '.join(reqs['missing_packages'])}{_RST}") - _cprint(f" {_DIM}Or: pip install hermes-agent[voice]{_RST}") + if _is_termux_environment(): + _cprint(f"\n {_BOLD}Option 1: pkg install termux-api{_RST}") + _cprint(f" {_DIM}Then install/update the Termux:API Android app for microphone capture{_RST}") + _cprint(f" {_BOLD}Option 2: pkg install python-numpy portaudio && python -m pip install sounddevice{_RST}") + else: + _cprint(f"\n {_BOLD}Install: pip install {' '.join(reqs['missing_packages'])}{_RST}") + _cprint(f" {_DIM}Or: pip install hermes-agent[voice]{_RST}") return with self._voice_lock: @@ -7091,27 +7345,39 @@ class HermesCLI: def _get_tui_prompt_fragments(self): """Return the prompt_toolkit fragments for the current interactive state.""" symbol, state_suffix = self._get_tui_prompt_symbols() + compact = self._use_minimal_tui_chrome(width=self._get_tui_terminal_width()) + + def _state_fragment(style: str, icon: str, extra: str = ""): + if compact: + text = icon + if extra: + text = f"{text} {extra.strip()}".rstrip() + return [(style, text + " ")] + if extra: + return [(style, f"{icon} {extra} {state_suffix}")] + return [(style, f"{icon} {state_suffix}")] + if self._voice_recording: bar = self._audio_level_bar() - return [("class:voice-recording", f"● {bar} {state_suffix}")] + return _state_fragment("class:voice-recording", "●", bar) if self._voice_processing: - return [("class:voice-processing", f"◉ {state_suffix}")] + return _state_fragment("class:voice-processing", "◉") if self._sudo_state: - return [("class:sudo-prompt", f"🔐 {state_suffix}")] + return _state_fragment("class:sudo-prompt", "🔐") if self._secret_state: - return [("class:sudo-prompt", f"🔑 {state_suffix}")] + return _state_fragment("class:sudo-prompt", "🔑") if self._approval_state: - return [("class:prompt-working", f"⚠ {state_suffix}")] + return _state_fragment("class:prompt-working", "⚠") if self._clarify_freetext: - return [("class:clarify-selected", f"✎ {state_suffix}")] + return _state_fragment("class:clarify-selected", "✎") if self._clarify_state: - return [("class:prompt-working", f"? {state_suffix}")] + return _state_fragment("class:prompt-working", "?") if self._command_running: - return [("class:prompt-working", f"{self._command_spinner_frame()} {state_suffix}")] + return _state_fragment("class:prompt-working", self._command_spinner_frame()) if self._agent_running: - return [("class:prompt-working", f"⚕ {state_suffix}")] + return _state_fragment("class:prompt-working", "⚕") if self._voice_mode: - return [("class:voice-prompt", f"🎤 {state_suffix}")] + return _state_fragment("class:voice-prompt", "🎤") return [("class:prompt", symbol)] def _get_tui_prompt_text(self) -> str: @@ -7967,9 +8233,9 @@ class HermesCLI: def get_hint_height(): if cli_ref._sudo_state or cli_ref._secret_state or cli_ref._approval_state or cli_ref._clarify_state or cli_ref._command_running: return 1 - # Keep a 1-line spacer while agent runs so output doesn't push - # right up against the top rule of the input area - return 1 if cli_ref._agent_running else 0 + # Keep a spacer while the agent runs on roomy terminals, but reclaim + # the row on narrow/mobile screens where every line matters. + return cli_ref._agent_spacer_height() def get_spinner_text(): txt = cli_ref._spinner_text @@ -7978,7 +8244,7 @@ class HermesCLI: return [('class:hint', f' {txt}')] def get_spinner_height(): - return 1 if cli_ref._spinner_text else 0 + return cli_ref._spinner_widget_height() spinner_widget = Window( content=FormattedTextControl(get_spinner_text), @@ -8169,18 +8435,17 @@ class HermesCLI: filter=Condition(lambda: cli_ref._approval_state is not None), ) - # Horizontal rules above and below the input (bronze, 1 line each). - # The bottom rule moves down as the TextArea grows with newlines. - # Using char='─' instead of hardcoded repetition so the rule - # always spans the full terminal width on any screen size. + # Horizontal rules above and below the input. + # On narrow/mobile terminals we keep the top separator for structure but + # hide the bottom one to recover a full row for conversation content. input_rule_top = Window( char='─', - height=1, + height=lambda: cli_ref._tui_input_rule_height("top"), style='class:input-rule', ) input_rule_bot = Window( char='─', - height=1, + height=lambda: cli_ref._tui_input_rule_height("bottom"), style='class:input-rule', ) @@ -8190,10 +8455,9 @@ class HermesCLI: def _get_image_bar(): if not cli_ref._attached_images: return [] - base = cli_ref._image_counter - len(cli_ref._attached_images) + 1 - badges = " ".join( - f"[📎 Image #{base + i}]" - for i in range(len(cli_ref._attached_images)) + badges = _format_image_attachment_badges( + cli_ref._attached_images, + cli_ref._image_counter, ) return [("class:image-badge", f" {badges} ")] @@ -8204,13 +8468,7 @@ class HermesCLI: # Persistent voice mode status bar (visible only when voice mode is on) def _get_voice_status(): - if cli_ref._voice_recording: - return [('class:voice-status-recording', ' ● REC Ctrl+B to stop ')] - if cli_ref._voice_processing: - return [('class:voice-status', ' ◉ Transcribing... ')] - tts = " | TTS on" if cli_ref._voice_tts else "" - cont = " | Continuous" if cli_ref._voice_continuous else "" - return [('class:voice-status', f' 🎤 Voice mode{tts}{cont} — Ctrl+B to record ')] + return cli_ref._get_voice_status_fragments() voice_status_bar = ConditionalContainer( Window( @@ -8666,6 +8924,7 @@ class HermesCLI: def main( query: str = None, q: str = None, + image: str = None, toolsets: str = None, skills: str | list[str] | tuple[str, ...] = None, model: str = None, @@ -8691,6 +8950,7 @@ def main( Args: query: Single query to execute (then exit). Alias: -q q: Shorthand for --query + image: Optional local image path to attach to a single query toolsets: Comma-separated list of toolsets to enable (e.g., "web,terminal") skills: Comma-separated or repeated list of skills to preload for the session model: Model to use (default: anthropic/claude-opus-4-20250514) @@ -8711,6 +8971,7 @@ def main( python cli.py --toolsets web,terminal # Use specific toolsets python cli.py --skills hermes-agent-dev,github-auth python cli.py -q "What is Python?" # Single query mode + python cli.py -q "Describe this" --image ~/storage/shared/Pictures/cat.png python cli.py --list-tools # List tools and exit python cli.py --resume 20260225_143052_a1b2c3 # Resume session python cli.py -w # Start in isolated git worktree @@ -8833,13 +9094,21 @@ def main( atexit.register(_run_cleanup) # Handle single query mode - if query: + if query or image: + query, single_query_images = _collect_query_images(query, image) if quiet: # Quiet mode: suppress banner, spinner, tool previews. # Only print the final response and parseable session info. cli.tool_progress_mode = "off" if cli._ensure_runtime_credentials(): - turn_route = cli._resolve_turn_agent_config(query) + effective_query = query + if single_query_images: + effective_query = cli._preprocess_images_with_vision( + query, + single_query_images, + announce=False, + ) + turn_route = cli._resolve_turn_agent_config(effective_query) if turn_route["signature"] != cli._active_agent_route_signature: cli.agent = None if cli._init_agent( @@ -8848,8 +9117,9 @@ def main( route_label=turn_route["label"], ): cli.agent.quiet_mode = True + cli.agent.suppress_status_output = True result = cli.agent.run_conversation( - user_message=query, + user_message=effective_query, conversation_history=cli.conversation_history, ) response = result.get("final_response", "") if isinstance(result, dict) else str(result) @@ -8864,8 +9134,10 @@ def main( sys.exit(1) else: cli.show_banner() - cli.console.print(f"[bold blue]Query:[/] {query}") - cli.chat(query) + _query_label = query or ("[image attached]" if single_query_images else "") + if _query_label: + cli.console.print(f"[bold blue]Query:[/] {_query_label}") + cli.chat(query, images=single_query_images or None) cli._print_exit_summary() return diff --git a/constraints-termux.txt b/constraints-termux.txt new file mode 100644 index 0000000000..dcc1becf64 --- /dev/null +++ b/constraints-termux.txt @@ -0,0 +1,15 @@ +# Termux / Android dependency constraints for Hermes Agent. +# +# Usage: +# python -m pip install -e '.[termux]' -c constraints-termux.txt +# +# These pins keep the tested Android install path stable when upstream packages +# move faster than Termux-compatible wheels / sdists. + +ipython<10 +jedi>=0.18.1,<0.20 +parso>=0.8.4,<0.9 +stack-data>=0.6,<0.7 +pexpect>4.3,<5 +matplotlib-inline>=0.1.7,<0.2 +asttokens>=2.1,<3 diff --git a/gateway/config.py b/gateway/config.py index 96ee831701..a50c9331ca 100644 --- a/gateway/config.py +++ b/gateway/config.py @@ -532,6 +532,8 @@ def load_gateway_config() -> GatewayConfig: bridged["reply_prefix"] = platform_cfg["reply_prefix"] if "require_mention" in platform_cfg: bridged["require_mention"] = platform_cfg["require_mention"] + if "free_response_channels" in platform_cfg: + bridged["free_response_channels"] = platform_cfg["free_response_channels"] if "mention_patterns" in platform_cfg: bridged["mention_patterns"] = platform_cfg["mention_patterns"] if not bridged: @@ -546,6 +548,19 @@ def load_gateway_config() -> GatewayConfig: plat_data["extra"] = extra extra.update(bridged) + # Slack settings → env vars (env vars take precedence) + slack_cfg = yaml_cfg.get("slack", {}) + if isinstance(slack_cfg, dict): + if "require_mention" in slack_cfg and not os.getenv("SLACK_REQUIRE_MENTION"): + os.environ["SLACK_REQUIRE_MENTION"] = str(slack_cfg["require_mention"]).lower() + if "allow_bots" in slack_cfg and not os.getenv("SLACK_ALLOW_BOTS"): + os.environ["SLACK_ALLOW_BOTS"] = str(slack_cfg["allow_bots"]).lower() + frc = slack_cfg.get("free_response_channels") + if frc is not None and not os.getenv("SLACK_FREE_RESPONSE_CHANNELS"): + if isinstance(frc, list): + frc = ",".join(str(v) for v in frc) + os.environ["SLACK_FREE_RESPONSE_CHANNELS"] = str(frc) + # Discord settings → env vars (env vars take precedence) discord_cfg = yaml_cfg.get("discord", {}) if isinstance(discord_cfg, dict): diff --git a/gateway/platforms/base.py b/gateway/platforms/base.py index aa40ece6d8..2831eb98fa 100644 --- a/gateway/platforms/base.py +++ b/gateway/platforms/base.py @@ -10,18 +10,142 @@ import logging import os import random import re +import subprocess +import sys import uuid from abc import ABC, abstractmethod from urllib.parse import urlsplit logger = logging.getLogger(__name__) + + +def _detect_macos_system_proxy() -> str | None: + """Read the macOS system HTTP(S) proxy via ``scutil --proxy``. + + Returns an ``http://host:port`` URL string if an HTTP or HTTPS proxy is + enabled, otherwise *None*. Falls back silently on non-macOS or on any + subprocess error. + """ + if sys.platform != "darwin": + return None + try: + out = subprocess.check_output( + ["scutil", "--proxy"], timeout=3, text=True, stderr=subprocess.DEVNULL, + ) + except Exception: + return None + + props: dict[str, str] = {} + for line in out.splitlines(): + line = line.strip() + if " : " in line: + key, _, val = line.partition(" : ") + props[key.strip()] = val.strip() + + # Prefer HTTPS, fall back to HTTP + for enable_key, host_key, port_key in ( + ("HTTPSEnable", "HTTPSProxy", "HTTPSPort"), + ("HTTPEnable", "HTTPProxy", "HTTPPort"), + ): + if props.get(enable_key) == "1": + host = props.get(host_key) + port = props.get(port_key) + if host and port: + return f"http://{host}:{port}" + return None + + +def resolve_proxy_url(platform_env_var: str | None = None) -> str | None: + """Return a proxy URL from env vars, or macOS system proxy. + + Check order: + 0. *platform_env_var* (e.g. ``DISCORD_PROXY``) — highest priority + 1. HTTPS_PROXY / HTTP_PROXY / ALL_PROXY (and lowercase variants) + 2. macOS system proxy via ``scutil --proxy`` (auto-detect) + + Returns *None* if no proxy is found. + """ + if platform_env_var: + value = (os.environ.get(platform_env_var) or "").strip() + if value: + return value + for key in ("HTTPS_PROXY", "HTTP_PROXY", "ALL_PROXY", + "https_proxy", "http_proxy", "all_proxy"): + value = (os.environ.get(key) or "").strip() + if value: + return value + return _detect_macos_system_proxy() + + +def proxy_kwargs_for_bot(proxy_url: str | None) -> dict: + """Build kwargs for ``commands.Bot()`` / ``discord.Client()`` with proxy. + + Returns: + - SOCKS URL → ``{"connector": ProxyConnector(..., rdns=True)}`` + - HTTP URL → ``{"proxy": url}`` + - *None* → ``{}`` + + ``rdns=True`` forces remote DNS resolution through the proxy — required + by many SOCKS implementations (Shadowrocket, Clash) and essential for + bypassing DNS pollution behind the GFW. + """ + if not proxy_url: + return {} + if proxy_url.lower().startswith("socks"): + try: + from aiohttp_socks import ProxyConnector + + connector = ProxyConnector.from_url(proxy_url, rdns=True) + return {"connector": connector} + except ImportError: + logger.warning( + "aiohttp_socks not installed — SOCKS proxy %s ignored. " + "Run: pip install aiohttp-socks", + proxy_url, + ) + return {} + return {"proxy": proxy_url} + + +def proxy_kwargs_for_aiohttp(proxy_url: str | None) -> tuple[dict, dict]: + """Build kwargs for standalone ``aiohttp.ClientSession`` with proxy. + + Returns ``(session_kwargs, request_kwargs)`` where: + - SOCKS → ``({"connector": ProxyConnector(...)}, {})`` + - HTTP → ``({}, {"proxy": url})`` + - None → ``({}, {})`` + + Usage:: + + sess_kw, req_kw = proxy_kwargs_for_aiohttp(proxy_url) + async with aiohttp.ClientSession(**sess_kw) as session: + async with session.get(url, **req_kw) as resp: + ... + """ + if not proxy_url: + return {}, {} + if proxy_url.lower().startswith("socks"): + try: + from aiohttp_socks import ProxyConnector + + connector = ProxyConnector.from_url(proxy_url, rdns=True) + return {"connector": connector}, {} + except ImportError: + logger.warning( + "aiohttp_socks not installed — SOCKS proxy %s ignored. " + "Run: pip install aiohttp-socks", + proxy_url, + ) + return {}, {} + return {}, {"proxy": proxy_url} + + from dataclasses import dataclass, field from datetime import datetime from pathlib import Path from typing import Dict, List, Optional, Any, Callable, Awaitable, Tuple from enum import Enum -import sys from pathlib import Path as _Path sys.path.insert(0, str(_Path(__file__).resolve().parents[2])) diff --git a/gateway/platforms/discord.py b/gateway/platforms/discord.py index 2ace06e779..a19b6d6663 100644 --- a/gateway/platforms/discord.py +++ b/gateway/platforms/discord.py @@ -529,10 +529,17 @@ class DiscordAdapter(BasePlatformAdapter): intents.members = any(not entry.isdigit() for entry in self._allowed_user_ids) intents.voice_states = True - # Create bot + # Resolve proxy (DISCORD_PROXY > generic env vars > macOS system proxy) + from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_bot + proxy_url = resolve_proxy_url(platform_env_var="DISCORD_PROXY") + if proxy_url: + logger.info("[%s] Using proxy for Discord: %s", self.name, proxy_url) + + # Create bot — proxy= for HTTP, connector= for SOCKS self._client = commands.Bot( command_prefix="!", # Not really used, we handle raw messages intents=intents, + **proxy_kwargs_for_bot(proxy_url), ) adapter_self = self # capture for closure @@ -1307,8 +1314,11 @@ class DiscordAdapter(BasePlatformAdapter): # Download the image and send as a Discord file attachment # (Discord renders attachments inline, unlike plain URLs) - async with aiohttp.ClientSession() as session: - async with session.get(image_url, timeout=aiohttp.ClientTimeout(total=30)) as resp: + from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp + _proxy = resolve_proxy_url(platform_env_var="DISCORD_PROXY") + _sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy) + async with aiohttp.ClientSession(**_sess_kw) as session: + async with session.get(image_url, timeout=aiohttp.ClientTimeout(total=30), **_req_kw) as resp: if resp.status != 200: raise Exception(f"Failed to download image: HTTP {resp.status}") @@ -1585,7 +1595,7 @@ class DiscordAdapter(BasePlatformAdapter): await self._run_simple_slash(interaction, f"/model {name}".strip()) @tree.command(name="reasoning", description="Show or change reasoning effort") - @discord.app_commands.describe(effort="Reasoning effort: xhigh, high, medium, low, minimal, or none.") + @discord.app_commands.describe(effort="Reasoning effort: none, minimal, low, medium, high, or xhigh.") async def slash_reasoning(interaction: discord.Interaction, effort: str = ""): await self._run_simple_slash(interaction, f"/reasoning {effort}".strip()) @@ -2391,10 +2401,14 @@ class DiscordAdapter(BasePlatformAdapter): else: try: import aiohttp - async with aiohttp.ClientSession() as session: + from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp + _proxy = resolve_proxy_url(platform_env_var="DISCORD_PROXY") + _sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy) + async with aiohttp.ClientSession(**_sess_kw) as session: async with session.get( att.url, timeout=aiohttp.ClientTimeout(total=30), + **_req_kw, ) as resp: if resp.status != 200: raise Exception(f"HTTP {resp.status}") diff --git a/gateway/platforms/slack.py b/gateway/platforms/slack.py index 26184b7eb5..b4973bbbdd 100644 --- a/gateway/platforms/slack.py +++ b/gateway/platforms/slack.py @@ -14,6 +14,7 @@ import logging import os import re import time +from dataclasses import dataclass, field from typing import Dict, Optional, Any, Tuple try: @@ -45,6 +46,14 @@ from gateway.platforms.base import ( logger = logging.getLogger(__name__) +@dataclass +class _ThreadContextCache: + """Cache entry for fetched thread context.""" + content: str + fetched_at: float = field(default_factory=time.monotonic) + message_count: int = 0 + + def check_slack_requirements() -> bool: """Check if Slack dependencies are available.""" return SLACK_AVAILABLE @@ -101,6 +110,9 @@ class SlackAdapter(BasePlatformAdapter): # session + memory scoping. self._assistant_threads: Dict[Tuple[str, str], Dict[str, str]] = {} self._ASSISTANT_THREADS_MAX = 5000 + # Cache for _fetch_thread_context results: cache_key → _ThreadContextCache + self._thread_context_cache: Dict[str, _ThreadContextCache] = {} + self._THREAD_CACHE_TTL = 60.0 async def connect(self) -> bool: """Connect to Slack via Socket Mode.""" @@ -281,6 +293,7 @@ class SlackAdapter(BasePlatformAdapter): kwargs = { "channel": chat_id, "text": chunk, + "mrkdwn": True, } if thread_ts: kwargs["thread_ts"] = thread_ts @@ -323,9 +336,7 @@ class SlackAdapter(BasePlatformAdapter): if not self._app: return SendResult(success=False, error="Not connected") try: - # Convert standard markdown → Slack mrkdwn formatted = self.format_message(content) - await self._get_client(chat_id).chat_update( channel=chat_id, ts=message_id, @@ -457,13 +468,36 @@ class SlackAdapter(BasePlatformAdapter): text = re.sub(r'(`[^`]+`)', lambda m: _ph(m.group(0)), text) # 3) Convert markdown links [text](url) → + def _convert_markdown_link(m): + label = m.group(1) + url = m.group(2).strip() + if url.startswith('<') and url.endswith('>'): + url = url[1:-1].strip() + return _ph(f'<{url}|{label}>') + text = re.sub( - r'\[([^\]]+)\]\(([^)]+)\)', - lambda m: _ph(f'<{m.group(2)}|{m.group(1)}>'), + r'\[([^\]]+)\]\(([^()]*(?:\([^()]*\)[^()]*)*)\)', + _convert_markdown_link, text, ) - # 4) Convert headers (## Title) → *Title* (bold) + # 4) Protect existing Slack entities/manual links so escaping and later + # formatting passes don't break them. + text = re.sub( + r'(<(?:[@#!]|(?:https?|mailto|tel):)[^>\n]+>)', + lambda m: _ph(m.group(1)), + text, + ) + + # 5) Protect blockquote markers before escaping + text = re.sub(r'^(>+\s)', lambda m: _ph(m.group(0)), text, flags=re.MULTILINE) + + # 6) Escape Slack control characters in remaining plain text. + # Unescape first so already-escaped input doesn't get double-escaped. + text = text.replace('&', '&').replace('<', '<').replace('>', '>') + text = text.replace('&', '&').replace('<', '<').replace('>', '>') + + # 7) Convert headers (## Title) → *Title* (bold) def _convert_header(m): inner = m.group(1).strip() # Strip redundant bold markers inside a header @@ -474,34 +508,39 @@ class SlackAdapter(BasePlatformAdapter): r'^#{1,6}\s+(.+)$', _convert_header, text, flags=re.MULTILINE ) - # 5) Convert bold: **text** → *text* (Slack bold) + # 8) Convert bold+italic: ***text*** → *_text_* (Slack bold wrapping italic) + text = re.sub( + r'\*\*\*(.+?)\*\*\*', + lambda m: _ph(f'*_{m.group(1)}_*'), + text, + ) + + # 9) Convert bold: **text** → *text* (Slack bold) text = re.sub( r'\*\*(.+?)\*\*', lambda m: _ph(f'*{m.group(1)}*'), text, ) - # 6) Convert italic: _text_ stays as _text_ (already Slack italic) - # Single *text* → _text_ (Slack italic) + # 10) Convert italic: _text_ stays as _text_ (already Slack italic) + # Single *text* → _text_ (Slack italic) text = re.sub( r'(? text → > text (same syntax, just ensure - # no extra escaping happens to the > character) - # Slack uses the same > prefix, so this is a no-op for content. + # 12) Blockquotes: > prefix is already protected by step 5 above. - # 9) Restore placeholders in reverse order - for key in reversed(list(placeholders.keys())): + # 13) Restore placeholders in reverse order + for key in reversed(placeholders): text = text.replace(key, placeholders[key]) return text @@ -914,9 +953,26 @@ class SlackAdapter(BasePlatformAdapter): if v > cutoff } - # Ignore bot messages (including our own) + # Bot message filtering (SLACK_ALLOW_BOTS / config allow_bots): + # "none" — ignore all bot messages (default, backward-compatible) + # "mentions" — accept bot messages only when they @mention us + # "all" — accept all bot messages (except our own) if event.get("bot_id") or event.get("subtype") == "bot_message": - return + allow_bots = self.config.extra.get("allow_bots", "") + if not allow_bots: + allow_bots = os.getenv("SLACK_ALLOW_BOTS", "none") + allow_bots = str(allow_bots).lower().strip() + if allow_bots == "none": + return + elif allow_bots == "mentions": + text_check = event.get("text", "") + if self._bot_user_id and f"<@{self._bot_user_id}>" not in text_check: + return + # "all" falls through to process the message + # Always ignore our own messages to prevent echo loops + msg_user = event.get("user", "") + if msg_user and self._bot_user_id and msg_user == self._bot_user_id: + return # Ignore message edits and deletions subtype = event.get("subtype") @@ -948,7 +1004,7 @@ class SlackAdapter(BasePlatformAdapter): channel_type = event.get("channel_type", "") if not channel_type and channel_id.startswith("D"): channel_type = "im" - is_dm = channel_type == "im" + is_dm = channel_type in ("im", "mpim") # Both 1:1 and group DMs # Build thread_ts for session keying. # In channels: fall back to ts so each top-level @mention starts a @@ -961,6 +1017,8 @@ class SlackAdapter(BasePlatformAdapter): thread_ts = event.get("thread_ts") or ts # ts fallback for channels # In channels, respond if: + # 0. Channel is in free_response_channels, OR require_mention is + # disabled — always process regardless of mention. # 1. The bot is @mentioned in this message, OR # 2. The message is a reply in a thread the bot started/participated in, OR # 3. The message is in a thread where the bot was previously @mentioned, OR @@ -970,24 +1028,29 @@ class SlackAdapter(BasePlatformAdapter): event_thread_ts = event.get("thread_ts") is_thread_reply = bool(event_thread_ts and event_thread_ts != ts) - if not is_dm and bot_uid and not is_mentioned: - reply_to_bot_thread = ( - is_thread_reply and event_thread_ts in self._bot_message_ts - ) - in_mentioned_thread = ( - event_thread_ts is not None - and event_thread_ts in self._mentioned_threads - ) - has_session = ( - is_thread_reply - and self._has_active_session_for_thread( - channel_id=channel_id, - thread_ts=event_thread_ts, - user_id=user_id, + if not is_dm and bot_uid: + if channel_id in self._slack_free_response_channels(): + pass # Free-response channel — always process + elif not self._slack_require_mention(): + pass # Mention requirement disabled globally for Slack + elif not is_mentioned: + reply_to_bot_thread = ( + is_thread_reply and event_thread_ts in self._bot_message_ts ) - ) - if not reply_to_bot_thread and not in_mentioned_thread and not has_session: - return + in_mentioned_thread = ( + event_thread_ts is not None + and event_thread_ts in self._mentioned_threads + ) + has_session = ( + is_thread_reply + and self._has_active_session_for_thread( + channel_id=channel_id, + thread_ts=event_thread_ts, + user_id=user_id, + ) + ) + if not reply_to_bot_thread and not in_mentioned_thread and not has_session: + return if is_mentioned: # Strip the bot mention from the text @@ -1128,14 +1191,19 @@ class SlackAdapter(BasePlatformAdapter): reply_to_message_id=thread_ts if thread_ts != ts else None, ) - # Add 👀 reaction to acknowledge receipt - await self._add_reaction(channel_id, ts, "eyes") + # Only react when bot is directly addressed (DM or @mention). + # In listen-all channels (require_mention=false), reacting to every + # casual message would be noisy. + _should_react = is_dm or is_mentioned + + if _should_react: + await self._add_reaction(channel_id, ts, "eyes") await self.handle_message(msg_event) - # Replace 👀 with ✅ when done - await self._remove_reaction(channel_id, ts, "eyes") - await self._add_reaction(channel_id, ts, "white_check_mark") + if _should_react: + await self._remove_reaction(channel_id, ts, "eyes") + await self._add_reaction(channel_id, ts, "white_check_mark") # ----- Approval button support (Block Kit) ----- @@ -1229,6 +1297,20 @@ class SlackAdapter(BasePlatformAdapter): msg_ts = message.get("ts", "") channel_id = body.get("channel", {}).get("id", "") user_name = body.get("user", {}).get("name", "unknown") + user_id = body.get("user", {}).get("id", "") + + # Only authorized users may click approval buttons. Button clicks + # bypass the normal message auth flow in gateway/run.py, so we must + # check here as well. + allowed_csv = os.getenv("SLACK_ALLOWED_USERS", "").strip() + if allowed_csv: + allowed_ids = {uid.strip() for uid in allowed_csv.split(",") if uid.strip()} + if "*" not in allowed_ids and user_id not in allowed_ids: + logger.warning( + "[Slack] Unauthorized approval click by %s (%s) — ignoring", + user_name, user_id, + ) + return # Map action_id to approval choice choice_map = { @@ -1239,10 +1321,9 @@ class SlackAdapter(BasePlatformAdapter): } choice = choice_map.get(action_id, "deny") - # Prevent double-clicks - if self._approval_resolved.get(msg_ts, False): + # Prevent double-clicks — atomic pop; first caller gets False, others get True (default) + if self._approval_resolved.pop(msg_ts, True): return - self._approval_resolved[msg_ts] = True # Update the message to show the decision and remove buttons label_map = { @@ -1297,8 +1378,7 @@ class SlackAdapter(BasePlatformAdapter): except Exception as exc: logger.error("Failed to resolve gateway approval from Slack button: %s", exc) - # Clean up stale approval state - self._approval_resolved.pop(msg_ts, None) + # (approval state already consumed by atomic pop above) # ----- Thread context fetching ----- @@ -1309,57 +1389,104 @@ class SlackAdapter(BasePlatformAdapter): """Fetch recent thread messages to provide context when the bot is mentioned mid-thread for the first time. - Returns a formatted string with thread history, or empty string on - failure or if the thread is empty (just the parent message). + This method is only called when there is NO active session for the + thread (guarded at the call site by _has_active_session_for_thread). + That guard ensures thread messages are prepended only on the very + first turn — after that the session history already holds them, so + there is no duplication across subsequent turns. + + Results are cached for _THREAD_CACHE_TTL seconds per thread to avoid + hammering conversations.replies (Tier 3, ~50 req/min). + + Returns a formatted string with prior thread history, or empty string + on failure or if the thread has no prior messages. """ + cache_key = f"{channel_id}:{thread_ts}" + now = time.monotonic() + cached = self._thread_context_cache.get(cache_key) + if cached and (now - cached.fetched_at) < self._THREAD_CACHE_TTL: + return cached.content + try: client = self._get_client(channel_id) - result = await client.conversations_replies( - channel=channel_id, - ts=thread_ts, - limit=limit + 1, # +1 because it includes the current message - inclusive=True, - ) + + # Retry with exponential backoff for Tier-3 rate limits (429). + result = None + for attempt in range(3): + try: + result = await client.conversations_replies( + channel=channel_id, + ts=thread_ts, + limit=limit + 1, # +1 because it includes the current message + inclusive=True, + ) + break + except Exception as exc: + # Check for rate-limit error from slack_sdk + err_str = str(exc).lower() + is_rate_limit = ( + "ratelimited" in err_str + or "429" in err_str + or "rate_limited" in err_str + ) + if is_rate_limit and attempt < 2: + retry_after = 1.0 * (2 ** attempt) # 1s, 2s + logger.warning( + "[Slack] conversations.replies rate limited; retrying in %.1fs (attempt %d/3)", + retry_after, attempt + 1, + ) + await asyncio.sleep(retry_after) + continue + raise + + if result is None: + return "" + messages = result.get("messages", []) if not messages: return "" + bot_uid = self._team_bot_user_ids.get(team_id, self._bot_user_id) context_parts = [] for msg in messages: msg_ts = msg.get("ts", "") - # Skip the current message (the one that triggered this fetch) + # Exclude the current triggering message — it will be delivered + # as the user message itself, so including it here would duplicate it. if msg_ts == current_ts: continue - # Skip bot messages from ourselves + # Exclude our own bot messages to avoid circular context. if msg.get("bot_id") or msg.get("subtype") == "bot_message": continue - msg_user = msg.get("user", "unknown") msg_text = msg.get("text", "").strip() if not msg_text: continue # Strip bot mentions from context messages - bot_uid = self._team_bot_user_ids.get(team_id, self._bot_user_id) if bot_uid: msg_text = msg_text.replace(f"<@{bot_uid}>", "").strip() - # Mark the thread parent + msg_user = msg.get("user", "unknown") is_parent = msg_ts == thread_ts prefix = "[thread parent] " if is_parent else "" - - # Resolve user name (cached) name = await self._resolve_user_name(msg_user, chat_id=channel_id) context_parts.append(f"{prefix}{name}: {msg_text}") - if not context_parts: - return "" + content = "" + if context_parts: + content = ( + "[Thread context — prior messages in this thread (not yet in conversation history):]\n" + + "\n".join(context_parts) + + "\n[End of thread context]\n\n" + ) - return ( - "[Thread context — previous messages in this thread:]\n" - + "\n".join(context_parts) - + "\n[End of thread context]\n\n" + self._thread_context_cache[cache_key] = _ThreadContextCache( + content=content, + fetched_at=now, + message_count=len(context_parts), ) + return content + except Exception as e: logger.warning("[Slack] Failed to fetch thread context: %s", e) return "" @@ -1515,3 +1642,30 @@ class SlackAdapter(BasePlatformAdapter): continue raise raise last_exc + + # ── Channel mention gating ───────────────────────────────────────────── + + def _slack_require_mention(self) -> bool: + """Return whether channel messages require an explicit bot mention. + + Uses explicit-false parsing (like Discord/Matrix) rather than + truthy parsing, since the safe default is True (gating on). + Unrecognised or empty values keep gating enabled. + """ + configured = self.config.extra.get("require_mention") + if configured is not None: + if isinstance(configured, str): + return configured.lower() not in ("false", "0", "no", "off") + return bool(configured) + return os.getenv("SLACK_REQUIRE_MENTION", "true").lower() not in ("false", "0", "no", "off") + + def _slack_free_response_channels(self) -> set: + """Return channel IDs where no @mention is required.""" + raw = self.config.extra.get("free_response_channels") + if raw is None: + raw = os.getenv("SLACK_FREE_RESPONSE_CHANNELS", "") + if isinstance(raw, list): + return {str(part).strip() for part in raw if str(part).strip()} + if isinstance(raw, str) and raw.strip(): + return {part.strip() for part in raw.split(",") if part.strip()} + return set() diff --git a/gateway/platforms/telegram.py b/gateway/platforms/telegram.py index 85b8afc97d..e127841b5d 100644 --- a/gateway/platforms/telegram.py +++ b/gateway/platforms/telegram.py @@ -1398,6 +1398,15 @@ class TelegramAdapter(BasePlatformAdapter): await query.answer(text="Invalid approval data.") return + # Only authorized users may click approval buttons. + caller_id = str(getattr(query.from_user, "id", "")) + allowed_csv = os.getenv("TELEGRAM_ALLOWED_USERS", "").strip() + if allowed_csv: + allowed_ids = {uid.strip() for uid in allowed_csv.split(",") if uid.strip()} + if "*" not in allowed_ids and caller_id not in allowed_ids: + await query.answer(text="⛔ You are not authorized to approve commands.") + return + session_key = self._approval_state.pop(approval_id, None) if not session_key: await query.answer(text="This approval has already been resolved.") diff --git a/gateway/platforms/telegram_network.py b/gateway/platforms/telegram_network.py index 9f6d8bb460..2b26ab9163 100644 --- a/gateway/platforms/telegram_network.py +++ b/gateway/platforms/telegram_network.py @@ -45,11 +45,9 @@ _SEED_FALLBACK_IPS: list[str] = ["149.154.167.220"] def _resolve_proxy_url() -> str | None: - for key in ("HTTPS_PROXY", "HTTP_PROXY", "ALL_PROXY", "https_proxy", "http_proxy", "all_proxy"): - value = (os.environ.get(key) or "").strip() - if value: - return value - return None + # Delegate to shared implementation (env vars + macOS system proxy detection) + from gateway.platforms.base import resolve_proxy_url + return resolve_proxy_url() class TelegramFallbackTransport(httpx.AsyncBaseTransport): diff --git a/gateway/run.py b/gateway/run.py index 91e4a7d567..b184b74d4a 100644 --- a/gateway/run.py +++ b/gateway/run.py @@ -925,8 +925,8 @@ class GatewayRunner: def _load_reasoning_config() -> dict | None: """Load reasoning effort from config.yaml. - Reads agent.reasoning_effort from config.yaml. Valid: "xhigh", - "high", "medium", "low", "minimal", "none". Returns None to use + Reads agent.reasoning_effort from config.yaml. Valid: "none", + "minimal", "low", "medium", "high", "xhigh". Returns None to use default (medium). """ from hermes_constants import parse_reasoning_effort @@ -4937,7 +4937,7 @@ class GatewayRunner: Usage: /reasoning Show current effort level and display state - /reasoning Set reasoning effort (none, low, medium, high, xhigh) + /reasoning Set reasoning effort (none, minimal, low, medium, high, xhigh) /reasoning show|on Show model reasoning in responses /reasoning hide|off Hide model reasoning from responses """ @@ -4982,7 +4982,7 @@ class GatewayRunner: "🧠 **Reasoning Settings**\n\n" f"**Effort:** `{level}`\n" f"**Display:** {display_state}\n\n" - "_Usage:_ `/reasoning `" + "_Usage:_ `/reasoning `" ) # Display toggle @@ -5000,12 +5000,12 @@ class GatewayRunner: effort = args.strip() if effort == "none": parsed = {"enabled": False} - elif effort in ("xhigh", "high", "medium", "low", "minimal"): + elif effort in ("minimal", "low", "medium", "high", "xhigh"): parsed = {"enabled": True, "effort": effort} else: return ( f"⚠️ Unknown argument: `{effort}`\n\n" - "**Valid levels:** none, low, minimal, medium, high, xhigh\n" + "**Valid levels:** none, minimal, low, medium, high, xhigh\n" "**Display:** show, hide" ) diff --git a/gateway/stream_consumer.py b/gateway/stream_consumer.py index cc3d64d136..ce6820abca 100644 --- a/gateway/stream_consumer.py +++ b/gateway/stream_consumer.py @@ -136,7 +136,34 @@ class GatewayStreamConsumer: if should_edit and self._accumulated: # Split overflow: if accumulated text exceeds the platform - # limit, finalize the current message and start a new one. + # limit, split into properly sized chunks. + if ( + len(self._accumulated) > _safe_limit + and self._message_id is None + ): + # No existing message to edit (first message or after a + # segment break). Use truncate_message — the same + # helper the non-streaming path uses — to split with + # proper word/code-fence boundaries and chunk + # indicators like "(1/2)". + chunks = self.adapter.truncate_message( + self._accumulated, _safe_limit + ) + for chunk in chunks: + await self._send_new_chunk(chunk, self._message_id) + self._accumulated = "" + self._last_sent_text = "" + self._last_edit_time = time.monotonic() + if got_done: + return + if got_segment_break: + self._message_id = None + self._fallback_final_send = False + self._fallback_prefix = "" + continue + + # Existing message: edit it with the first chunk, then + # start a new message for the overflow remainder. while ( len(self._accumulated) > _safe_limit and self._message_id is not None @@ -226,6 +253,34 @@ class GatewayStreamConsumer: # Strip trailing whitespace/newlines but preserve leading content return cleaned.rstrip() + async def _send_new_chunk(self, text: str, reply_to_id: Optional[str]) -> Optional[str]: + """Send a new message chunk, optionally threaded to a previous message. + + Returns the message_id so callers can thread subsequent chunks. + """ + text = self._clean_for_display(text) + if not text.strip(): + return reply_to_id + try: + meta = dict(self.metadata) if self.metadata else {} + result = await self.adapter.send( + chat_id=self.chat_id, + content=text, + reply_to=reply_to_id, + metadata=meta, + ) + if result.success and result.message_id: + self._message_id = str(result.message_id) + self._already_sent = True + self._last_sent_text = text + return str(result.message_id) + else: + self._edit_supported = False + return reply_to_id + except Exception as e: + logger.error("Stream send chunk error: %s", e) + return reply_to_id + def _visible_prefix(self) -> str: """Return the visible text already shown in the streamed message.""" prefix = self._last_sent_text or "" diff --git a/hermes_cli/auth.py b/hermes_cli/auth.py index b7360fdd32..4d59f7dbf9 100644 --- a/hermes_cli/auth.py +++ b/hermes_cli/auth.py @@ -250,7 +250,7 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = { # Kimi Code Endpoint Detection # ============================================================================= -# Kimi Code (platform.kimi.ai) issues keys prefixed "sk-kimi-" that only work +# Kimi Code (kimi.com/code) issues keys prefixed "sk-kimi-" that only work # on api.kimi.com/coding/v1. Legacy keys from platform.moonshot.ai work on # api.moonshot.ai/v1 (the default). Auto-detect when user hasn't set # KIMI_BASE_URL explicitly. @@ -3017,12 +3017,15 @@ def _login_nous(args, pconfig: ProviderConfig) -> None: _save_provider_state(auth_store, "nous", auth_state) saved_to = _save_auth_store(auth_store) - config_path = _update_config_for_provider("nous", inference_base_url) print() print("Login successful!") print(f" Auth state: {saved_to}") - print(f" Config updated: {config_path} (model.provider=nous)") + # Resolve model BEFORE writing provider to config.yaml so we never + # leave the config in a half-updated state (provider=nous but model + # still set to the previous provider's model, e.g. opus from + # OpenRouter). The auth.json active_provider was already set above. + selected_model = None try: runtime_key = auth_state.get("agent_key") or auth_state.get("access_token") if not isinstance(runtime_key, str) or not runtime_key: @@ -3056,9 +3059,6 @@ def _login_nous(args, pconfig: ProviderConfig) -> None: unavailable_models=unavailable_models, portal_url=_portal, ) - if selected_model: - _save_model_choice(selected_model) - print(f"Default model set to: {selected_model}") elif unavailable_models: _url = (_portal or DEFAULT_NOUS_PORTAL_URL).rstrip("/") print("No free models currently available.") @@ -3070,6 +3070,15 @@ def _login_nous(args, pconfig: ProviderConfig) -> None: print() print(f"Login succeeded, but could not fetch available models. Reason: {message}") + # Write provider + model atomically so config is never mismatched. + config_path = _update_config_for_provider( + "nous", inference_base_url, default_model=selected_model, + ) + if selected_model: + _save_model_choice(selected_model) + print(f"Default model set to: {selected_model}") + print(f" Config updated: {config_path} (model.provider=nous)") + except KeyboardInterrupt: print("\nLogin cancelled.") raise SystemExit(130) diff --git a/hermes_cli/commands.py b/hermes_cli/commands.py index a679817b38..5230839ef5 100644 --- a/hermes_cli/commands.py +++ b/hermes_cli/commands.py @@ -89,8 +89,7 @@ COMMAND_REGISTRY: list[CommandDef] = [ CommandDef("model", "Switch model for this session", "Configuration", args_hint="[model] [--global]"), CommandDef("provider", "Show available providers and current provider", "Configuration"), - CommandDef("prompt", "View/set custom system prompt", "Configuration", - cli_only=True, args_hint="[text]", subcommands=("clear",)), + CommandDef("personality", "Set a predefined personality", "Configuration", args_hint="[name]"), CommandDef("statusbar", "Toggle the context/model status bar", "Configuration", @@ -102,7 +101,7 @@ COMMAND_REGISTRY: list[CommandDef] = [ "Configuration"), CommandDef("reasoning", "Manage reasoning effort and display", "Configuration", args_hint="[level|show|hide]", - subcommands=("none", "low", "minimal", "medium", "high", "xhigh", "show", "hide", "on", "off")), + subcommands=("none", "minimal", "low", "medium", "high", "xhigh", "show", "hide", "on", "off")), CommandDef("skin", "Show or change the display skin/theme", "Configuration", args_hint="[name]"), CommandDef("voice", "Toggle voice mode", "Configuration", @@ -140,6 +139,8 @@ COMMAND_REGISTRY: list[CommandDef] = [ cli_only=True, args_hint="[number]"), CommandDef("paste", "Check clipboard for an image and attach it", "Info", cli_only=True), + CommandDef("image", "Attach a local image file for your next prompt", "Info", + cli_only=True, args_hint=""), CommandDef("update", "Update Hermes Agent to the latest version", "Info", gateway_only=True), diff --git a/hermes_cli/config.py b/hermes_cli/config.py index a981b1bbbf..80dce6c048 100644 --- a/hermes_cli/config.py +++ b/hermes_cli/config.py @@ -197,14 +197,44 @@ def _ensure_default_soul_md(home: Path) -> None: def ensure_hermes_home(): - """Ensure ~/.hermes directory structure exists with secure permissions.""" + """Ensure ~/.hermes directory structure exists with secure permissions. + + In managed mode (NixOS), dirs are created by the activation script with + setgid + group-writable (2770). We skip mkdir and set umask(0o007) so + any files created (e.g. SOUL.md) are group-writable (0660). + """ home = get_hermes_home() - home.mkdir(parents=True, exist_ok=True) - _secure_dir(home) + if is_managed(): + old_umask = os.umask(0o007) + try: + _ensure_hermes_home_managed(home) + finally: + os.umask(old_umask) + else: + home.mkdir(parents=True, exist_ok=True) + _secure_dir(home) + for subdir in ("cron", "sessions", "logs", "memories"): + d = home / subdir + d.mkdir(parents=True, exist_ok=True) + _secure_dir(d) + _ensure_default_soul_md(home) + + +def _ensure_hermes_home_managed(home: Path): + """Managed-mode variant: verify dirs exist (activation creates them), seed SOUL.md.""" + if not home.is_dir(): + raise RuntimeError( + f"HERMES_HOME {home} does not exist. " + "Run 'sudo nixos-rebuild switch' first." + ) for subdir in ("cron", "sessions", "logs", "memories"): d = home / subdir - d.mkdir(parents=True, exist_ok=True) - _secure_dir(d) + if not d.is_dir(): + raise RuntimeError( + f"{d} does not exist. " + "Run 'sudo nixos-rebuild switch' first." + ) + # Inside umask(0o007) scope — SOUL.md will be created as 0660 _ensure_default_soul_md(home) diff --git a/hermes_cli/doctor.py b/hermes_cli/doctor.py index 361e81d214..fb629e0f18 100644 --- a/hermes_cli/doctor.py +++ b/hermes_cli/doctor.py @@ -54,6 +54,32 @@ _PROVIDER_ENV_HINTS = ( ) +from hermes_constants import is_termux as _is_termux + + +def _python_install_cmd() -> str: + return "python -m pip install" if _is_termux() else "uv pip install" + + +def _system_package_install_cmd(pkg: str) -> str: + if _is_termux(): + return f"pkg install {pkg}" + if sys.platform == "darwin": + return f"brew install {pkg}" + return f"sudo apt install {pkg}" + + +def _termux_browser_setup_steps(node_installed: bool) -> list[str]: + steps: list[str] = [] + step = 1 + if not node_installed: + steps.append(f"{step}) pkg install nodejs") + step += 1 + steps.append(f"{step}) npm install -g agent-browser") + steps.append(f"{step + 1}) agent-browser install") + return steps + + def _has_provider_env_config(content: str) -> bool: """Return True when ~/.hermes/.env contains provider auth/base URL settings.""" return any(key in content for key in _PROVIDER_ENV_HINTS) @@ -200,7 +226,7 @@ def run_doctor(args): check_ok(name) except ImportError: check_fail(name, "(missing)") - issues.append(f"Install {name}: uv pip install {module}") + issues.append(f"Install {name}: {_python_install_cmd()} {module}") for module, name in optional_packages: try: @@ -503,7 +529,7 @@ def run_doctor(args): check_ok("ripgrep (rg)", "(faster file search)") else: check_warn("ripgrep (rg) not found", "(file search uses grep fallback)") - check_info("Install for faster search: sudo apt install ripgrep") + check_info(f"Install for faster search: {_system_package_install_cmd('ripgrep')}") # Docker (optional) terminal_env = os.getenv("TERMINAL_ENV", "local") @@ -526,7 +552,10 @@ def run_doctor(args): if shutil.which("docker"): check_ok("docker", "(optional)") else: - check_warn("docker not found", "(optional)") + if _is_termux(): + check_info("Docker backend is not available inside Termux (expected on Android)") + else: + check_warn("docker not found", "(optional)") # SSH (if using ssh backend) if terminal_env == "ssh": @@ -574,9 +603,23 @@ def run_doctor(args): if agent_browser_path.exists(): check_ok("agent-browser (Node.js)", "(browser automation)") else: - check_warn("agent-browser not installed", "(run: npm install)") + if _is_termux(): + check_info("agent-browser is not installed (expected in the tested Termux path)") + check_info("Install it manually later with: npm install -g agent-browser && agent-browser install") + check_info("Termux browser setup:") + for step in _termux_browser_setup_steps(node_installed=True): + check_info(step) + else: + check_warn("agent-browser not installed", "(run: npm install)") else: - check_warn("Node.js not found", "(optional, needed for browser tools)") + if _is_termux(): + check_info("Node.js not found (browser tools are optional in the tested Termux path)") + check_info("Install Node.js on Termux with: pkg install nodejs") + check_info("Termux browser setup:") + for step in _termux_browser_setup_steps(node_installed=False): + check_info(step) + else: + check_warn("Node.js not found", "(optional, needed for browser tools)") # npm audit for all Node.js packages if shutil.which("npm"): @@ -739,8 +782,9 @@ def run_doctor(args): __import__("tinker_atropos") check_ok("tinker-atropos", "(RL training backend)") except ImportError: - check_warn("tinker-atropos found but not installed", "(run: uv pip install -e ./tinker-atropos)") - issues.append("Install tinker-atropos: uv pip install -e ./tinker-atropos") + install_cmd = f"{_python_install_cmd()} -e ./tinker-atropos" + check_warn("tinker-atropos found but not installed", f"(run: {install_cmd})") + issues.append(f"Install tinker-atropos: {install_cmd}") else: check_warn("tinker-atropos requires Python 3.11+", f"(current: {py_version.major}.{py_version.minor})") else: diff --git a/hermes_cli/gateway.py b/hermes_cli/gateway.py index 82689f8fff..b19ceaac9a 100644 --- a/hermes_cli/gateway.py +++ b/hermes_cli/gateway.py @@ -39,7 +39,7 @@ def _get_service_pids() -> set: pids: set = set() # --- systemd (Linux): user and system scopes --- - if is_linux(): + if supports_systemd_services(): for scope_args in [["systemctl", "--user"], ["systemctl"]]: try: result = subprocess.run( @@ -225,6 +225,14 @@ def stop_profile_gateway() -> bool: def is_linux() -> bool: return sys.platform.startswith('linux') + +from hermes_constants import is_termux + + +def supports_systemd_services() -> bool: + return is_linux() and not is_termux() + + def is_macos() -> bool: return sys.platform == 'darwin' @@ -477,13 +485,15 @@ def install_linux_gateway_from_setup(force: bool = False) -> tuple[str | None, b def get_systemd_linger_status() -> tuple[bool | None, str]: - """Return whether systemd user lingering is enabled for the current user. + """Return systemd linger status for the current user. Returns: (True, "") when linger is enabled. (False, "") when linger is disabled. (None, detail) when the status could not be determined. """ + if is_termux(): + return None, "not supported in Termux" if not is_linux(): return None, "not supported on this platform" @@ -766,7 +776,7 @@ def _print_linger_enable_warning(username: str, detail: str | None = None) -> No def _ensure_linger_enabled() -> None: """Enable linger when possible so the user gateway survives logout.""" - if not is_linux(): + if is_termux() or not is_linux(): return import getpass @@ -1801,7 +1811,7 @@ def _setup_whatsapp(): def _is_service_installed() -> bool: """Check if the gateway is installed as a system service.""" - if is_linux(): + if supports_systemd_services(): return get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists() elif is_macos(): return get_launchd_plist_path().exists() @@ -1810,7 +1820,7 @@ def _is_service_installed() -> bool: def _is_service_running() -> bool: """Check if the gateway service is currently running.""" - if is_linux(): + if supports_systemd_services(): user_unit_exists = get_systemd_unit_path(system=False).exists() system_unit_exists = get_systemd_unit_path(system=True).exists() @@ -1983,7 +1993,7 @@ def gateway_setup(): service_installed = _is_service_installed() service_running = _is_service_running() - if is_linux() and has_conflicting_systemd_units(): + if supports_systemd_services() and has_conflicting_systemd_units(): print_systemd_scope_conflict_warning() print() @@ -1993,7 +2003,7 @@ def gateway_setup(): print_warning("Gateway service is installed but not running.") if prompt_yes_no(" Start it now?", True): try: - if is_linux(): + if supports_systemd_services(): systemd_start() elif is_macos(): launchd_start() @@ -2044,7 +2054,7 @@ def gateway_setup(): if service_running: if prompt_yes_no(" Restart the gateway to pick up changes?", True): try: - if is_linux(): + if supports_systemd_services(): systemd_restart() elif is_macos(): launchd_restart() @@ -2056,7 +2066,7 @@ def gateway_setup(): elif service_installed: if prompt_yes_no(" Start the gateway service?", True): try: - if is_linux(): + if supports_systemd_services(): systemd_start() elif is_macos(): launchd_start() @@ -2064,13 +2074,13 @@ def gateway_setup(): print_error(f" Start failed: {e}") else: print() - if is_linux() or is_macos(): - platform_name = "systemd" if is_linux() else "launchd" + if supports_systemd_services() or is_macos(): + platform_name = "systemd" if supports_systemd_services() else "launchd" if prompt_yes_no(f" Install the gateway as a {platform_name} service? (runs in background, starts on boot)", True): try: installed_scope = None did_install = False - if is_linux(): + if supports_systemd_services(): installed_scope, did_install = install_linux_gateway_from_setup(force=False) else: launchd_install(force=False) @@ -2078,7 +2088,7 @@ def gateway_setup(): print() if did_install and prompt_yes_no(" Start the service now?", True): try: - if is_linux(): + if supports_systemd_services(): systemd_start(system=installed_scope == "system") else: launchd_start() @@ -2089,12 +2099,18 @@ def gateway_setup(): print_info(" You can try manually: hermes gateway install") else: print_info(" You can install later: hermes gateway install") - if is_linux(): + if supports_systemd_services(): print_info(" Or as a boot-time service: sudo hermes gateway install --system") print_info(" Or run in foreground: hermes gateway") else: - print_info(" Service install not supported on this platform.") - print_info(" Run in foreground: hermes gateway") + if is_termux(): + from hermes_constants import display_hermes_home as _dhh + print_info(" Termux does not use systemd/launchd services.") + print_info(" Run in foreground: hermes gateway") + print_info(f" Or start it manually in the background (best effort): nohup hermes gateway >{_dhh()}/logs/gateway.log 2>&1 &") + else: + print_info(" Service install not supported on this platform.") + print_info(" Run in foreground: hermes gateway") else: print() print_info("No platforms configured. Run 'hermes gateway setup' when ready.") @@ -2130,7 +2146,11 @@ def gateway_command(args): force = getattr(args, 'force', False) system = getattr(args, 'system', False) run_as_user = getattr(args, 'run_as_user', None) - if is_linux(): + if is_termux(): + print("Gateway service installation is not supported on Termux.") + print("Run manually: hermes gateway") + sys.exit(1) + if supports_systemd_services(): systemd_install(force=force, system=system, run_as_user=run_as_user) elif is_macos(): launchd_install(force) @@ -2144,7 +2164,11 @@ def gateway_command(args): managed_error("uninstall gateway service (managed by NixOS)") return system = getattr(args, 'system', False) - if is_linux(): + if is_termux(): + print("Gateway service uninstall is not supported on Termux because there is no managed service to remove.") + print("Stop manual runs with: hermes gateway stop") + sys.exit(1) + if supports_systemd_services(): systemd_uninstall(system=system) elif is_macos(): launchd_uninstall() @@ -2154,7 +2178,11 @@ def gateway_command(args): elif subcmd == "start": system = getattr(args, 'system', False) - if is_linux(): + if is_termux(): + print("Gateway service start is not supported on Termux because there is no system service manager.") + print("Run manually: hermes gateway") + sys.exit(1) + if supports_systemd_services(): systemd_start(system=system) elif is_macos(): launchd_start() @@ -2169,7 +2197,7 @@ def gateway_command(args): if stop_all: # --all: kill every gateway process on the machine service_available = False - if is_linux() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()): + if supports_systemd_services() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()): try: systemd_stop(system=system) service_available = True @@ -2190,7 +2218,7 @@ def gateway_command(args): else: # Default: stop only the current profile's gateway service_available = False - if is_linux() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()): + if supports_systemd_services() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()): try: systemd_stop(system=system) service_available = True @@ -2218,7 +2246,7 @@ def gateway_command(args): system = getattr(args, 'system', False) service_configured = False - if is_linux() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()): + if supports_systemd_services() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()): service_configured = True try: systemd_restart(system=system) @@ -2235,7 +2263,7 @@ def gateway_command(args): if not service_available: # systemd/launchd restart failed — check if linger is the issue - if is_linux(): + if supports_systemd_services(): linger_ok, _detail = get_systemd_linger_status() if linger_ok is not True: import getpass @@ -2272,7 +2300,7 @@ def gateway_command(args): system = getattr(args, 'system', False) # Check for service first - if is_linux() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()): + if supports_systemd_services() and (get_systemd_unit_path(system=False).exists() or get_systemd_unit_path(system=True).exists()): systemd_status(deep, system=system) elif is_macos() and get_launchd_plist_path().exists(): launchd_status(deep) @@ -2289,9 +2317,13 @@ def gateway_command(args): for line in runtime_lines: print(f" {line}") print() - print("To install as a service:") - print(" hermes gateway install") - print(" sudo hermes gateway install --system") + if is_termux(): + print("Termux note:") + print(" Android may stop background jobs when Termux is suspended") + else: + print("To install as a service:") + print(" hermes gateway install") + print(" sudo hermes gateway install --system") else: print("✗ Gateway is not running") runtime_lines = _runtime_health_lines() @@ -2303,5 +2335,8 @@ def gateway_command(args): print() print("To start:") print(" hermes gateway # Run in foreground") - print(" hermes gateway install # Install as user service") - print(" sudo hermes gateway install --system # Install as boot-time system service") + if is_termux(): + print(" nohup hermes gateway > ~/.hermes/logs/gateway.log 2>&1 & # Best-effort background start") + else: + print(" hermes gateway install # Install as user service") + print(" sudo hermes gateway install --system # Install as boot-time system service") diff --git a/hermes_cli/main.py b/hermes_cli/main.py index 58761dcb0c..e7455608cf 100644 --- a/hermes_cli/main.py +++ b/hermes_cli/main.py @@ -781,6 +781,7 @@ def cmd_chat(args): "verbose": args.verbose, "quiet": getattr(args, "quiet", False), "query": args.query, + "image": getattr(args, "image", None), "resume": getattr(args, "resume", None), "worktree": getattr(args, "worktree", False), "checkpoints": getattr(args, "checkpoints", False), @@ -1946,7 +1947,10 @@ def _set_reasoning_effort(config, effort: str) -> None: def _prompt_reasoning_effort_selection(efforts, current_effort=""): """Prompt for a reasoning effort. Returns effort, 'none', or None to keep current.""" - ordered = list(dict.fromkeys(str(effort).strip().lower() for effort in efforts if str(effort).strip())) + deduped = list(dict.fromkeys(str(effort).strip().lower() for effort in efforts if str(effort).strip())) + canonical_order = ("minimal", "low", "medium", "high", "xhigh") + ordered = [effort for effort in canonical_order if effort in deduped] + ordered.extend(effort for effort in deduped if effort not in canonical_order) if not ordered: return None @@ -3895,7 +3899,7 @@ def cmd_update(args): # running gateway needs restarting to pick up the new code. try: from hermes_cli.gateway import ( - is_macos, is_linux, _ensure_user_systemd_env, + is_macos, supports_systemd_services, _ensure_user_systemd_env, find_gateway_pids, _get_service_pids, ) @@ -3906,7 +3910,7 @@ def cmd_update(args): # --- Systemd services (Linux) --- # Discover all hermes-gateway* units (default + profiles) - if is_linux(): + if supports_systemd_services(): try: _ensure_user_systemd_env() except Exception: @@ -4429,6 +4433,10 @@ For more help on a command: "-q", "--query", help="Single query (non-interactive mode)" ) + chat_parser.add_argument( + "--image", + help="Optional local image path to attach to a single query" + ) chat_parser.add_argument( "-m", "--model", help="Model to use (e.g., anthropic/claude-sonnet-4)" diff --git a/hermes_cli/model_switch.py b/hermes_cli/model_switch.py index 7d120d94f1..ef35108df0 100644 --- a/hermes_cli/model_switch.py +++ b/hermes_cli/model_switch.py @@ -733,6 +733,7 @@ def list_authenticated_providers( fetch_models_dev, get_provider_info as _mdev_pinfo, ) + from hermes_cli.auth import PROVIDER_REGISTRY from hermes_cli.models import OPENROUTER_MODELS, _PROVIDER_MODELS results: List[dict] = [] @@ -753,9 +754,16 @@ def list_authenticated_providers( if not isinstance(pdata, dict): continue - env_vars = pdata.get("env", []) - if not isinstance(env_vars, list): - continue + # Prefer auth.py PROVIDER_REGISTRY for env var names — it's our + # source of truth. models.dev can have wrong mappings (e.g. + # minimax-cn → MINIMAX_API_KEY instead of MINIMAX_CN_API_KEY). + pconfig = PROVIDER_REGISTRY.get(hermes_id) + if pconfig and pconfig.api_key_env_vars: + env_vars = list(pconfig.api_key_env_vars) + else: + env_vars = pdata.get("env", []) + if not isinstance(env_vars, list): + continue # Check if any env var is set has_creds = any(os.environ.get(ev) for ev in env_vars) diff --git a/hermes_cli/setup.py b/hermes_cli/setup.py index 95c9fa6228..72b8aab18e 100644 --- a/hermes_cli/setup.py +++ b/hermes_cli/setup.py @@ -2572,9 +2572,120 @@ _OPENCLAW_SCRIPT = ( ) +def _load_openclaw_migration_module(): + """Load the openclaw_to_hermes migration script as a module. + + Returns the loaded module, or None if the script can't be loaded. + """ + if not _OPENCLAW_SCRIPT.exists(): + return None + + spec = importlib.util.spec_from_file_location( + "openclaw_to_hermes", _OPENCLAW_SCRIPT + ) + if spec is None or spec.loader is None: + return None + + mod = importlib.util.module_from_spec(spec) + # Register in sys.modules so @dataclass can resolve the module + # (Python 3.11+ requires this for dynamically loaded modules) + import sys as _sys + _sys.modules[spec.name] = mod + try: + spec.loader.exec_module(mod) + except Exception: + _sys.modules.pop(spec.name, None) + raise + return mod + + +# Item kinds that represent high-impact changes warranting explicit warnings. +# Gateway tokens/channels can hijack messaging platforms from the old agent. +# Config values may have different semantics between OpenClaw and Hermes. +# Instruction/context files (.md) can contain incompatible setup procedures. +_HIGH_IMPACT_KIND_KEYWORDS = { + "gateway": "⚠ Gateway/messaging — this will configure Hermes to use your OpenClaw messaging channels", + "telegram": "⚠ Telegram — this will point Hermes at your OpenClaw Telegram bot", + "slack": "⚠ Slack — this will point Hermes at your OpenClaw Slack workspace", + "discord": "⚠ Discord — this will point Hermes at your OpenClaw Discord bot", + "whatsapp": "⚠ WhatsApp — this will point Hermes at your OpenClaw WhatsApp connection", + "config": "⚠ Config values — OpenClaw settings may not map 1:1 to Hermes equivalents", + "soul": "⚠ Instruction file — may contain OpenClaw-specific setup/restart procedures", + "memory": "⚠ Memory/context file — may reference OpenClaw-specific infrastructure", + "context": "⚠ Context file — may contain OpenClaw-specific instructions", +} + + +def _print_migration_preview(report: dict): + """Print a detailed dry-run preview of what migration would do. + + Groups items by category and adds explicit warnings for high-impact + changes like gateway token takeover and config value differences. + """ + items = report.get("items", []) + if not items: + print_info("Nothing to migrate.") + return + + migrated_items = [i for i in items if i.get("status") == "migrated"] + conflict_items = [i for i in items if i.get("status") == "conflict"] + skipped_items = [i for i in items if i.get("status") == "skipped"] + + warnings_shown = set() + + if migrated_items: + print(color(" Would import:", Colors.GREEN)) + for item in migrated_items: + kind = item.get("kind", "unknown") + dest = item.get("destination", "") + if dest: + dest_short = str(dest).replace(str(Path.home()), "~") + print(f" {kind:<22s} → {dest_short}") + else: + print(f" {kind}") + + # Check for high-impact items and collect warnings + kind_lower = kind.lower() + dest_lower = str(dest).lower() + for keyword, warning in _HIGH_IMPACT_KIND_KEYWORDS.items(): + if keyword in kind_lower or keyword in dest_lower: + warnings_shown.add(warning) + print() + + if conflict_items: + print(color(" Would overwrite (conflicts with existing Hermes config):", Colors.YELLOW)) + for item in conflict_items: + kind = item.get("kind", "unknown") + reason = item.get("reason", "already exists") + print(f" {kind:<22s} {reason}") + print() + + if skipped_items: + print(color(" Would skip:", Colors.DIM)) + for item in skipped_items: + kind = item.get("kind", "unknown") + reason = item.get("reason", "") + print(f" {kind:<22s} {reason}") + print() + + # Print collected warnings + if warnings_shown: + print(color(" ── Warnings ──", Colors.YELLOW)) + for warning in sorted(warnings_shown): + print(color(f" {warning}", Colors.YELLOW)) + print() + print(color(" Note: OpenClaw config values may have different semantics in Hermes.", Colors.YELLOW)) + print(color(" For example, OpenClaw's tool_call_execution: \"auto\" ≠ Hermes's yolo mode.", Colors.YELLOW)) + print(color(" Instruction files (.md) from OpenClaw may contain incompatible procedures.", Colors.YELLOW)) + print() + + def _offer_openclaw_migration(hermes_home: Path) -> bool: """Detect ~/.openclaw and offer to migrate during first-time setup. + Runs a dry-run first to show the user exactly what would be imported, + overwritten, or taken over. Only executes after explicit confirmation. + Returns True if migration ran successfully, False otherwise. """ openclaw_dir = Path.home() / ".openclaw" @@ -2587,12 +2698,12 @@ def _offer_openclaw_migration(hermes_home: Path) -> bool: print() print_header("OpenClaw Installation Detected") print_info(f"Found OpenClaw data at {openclaw_dir}") - print_info("Hermes can import your settings, memories, skills, and API keys.") + print_info("Hermes can preview what would be imported before making any changes.") print() - if not prompt_yes_no("Would you like to import from OpenClaw?", default=True): + if not prompt_yes_no("Would you like to see what can be imported?", default=True): print_info( - "Skipping migration. You can run it later via the openclaw-migration skill." + "Skipping migration. You can run it later with: hermes claw migrate --dry-run" ) return False @@ -2601,34 +2712,71 @@ def _offer_openclaw_migration(hermes_home: Path) -> bool: if not config_path.exists(): save_config(load_config()) - # Dynamically load the migration script + # Load the migration module try: - spec = importlib.util.spec_from_file_location( - "openclaw_to_hermes", _OPENCLAW_SCRIPT - ) - if spec is None or spec.loader is None: + mod = _load_openclaw_migration_module() + if mod is None: print_warning("Could not load migration script.") return False + except Exception as e: + print_warning(f"Could not load migration script: {e}") + logger.debug("OpenClaw migration module load error", exc_info=True) + return False - mod = importlib.util.module_from_spec(spec) - # Register in sys.modules so @dataclass can resolve the module - # (Python 3.11+ requires this for dynamically loaded modules) - import sys as _sys - _sys.modules[spec.name] = mod - try: - spec.loader.exec_module(mod) - except Exception: - _sys.modules.pop(spec.name, None) - raise - - # Run migration with the "full" preset, execute mode, no overwrite + # ── Phase 1: Dry-run preview ── + try: selected = mod.resolve_selected_options(None, None, preset="full") + dry_migrator = mod.Migrator( + source_root=openclaw_dir.resolve(), + target_root=hermes_home.resolve(), + execute=False, # dry-run — no files modified + workspace_target=None, + overwrite=True, # show everything including conflicts + migrate_secrets=True, + output_dir=None, + selected_options=selected, + preset_name="full", + ) + preview_report = dry_migrator.migrate() + except Exception as e: + print_warning(f"Migration preview failed: {e}") + logger.debug("OpenClaw migration preview error", exc_info=True) + return False + + # Display the full preview + preview_summary = preview_report.get("summary", {}) + preview_count = preview_summary.get("migrated", 0) + + if preview_count == 0: + print() + print_info("Nothing to import from OpenClaw.") + return False + + print() + print_header(f"Migration Preview — {preview_count} item(s) would be imported") + print_info("No changes have been made yet. Review the list below:") + print() + _print_migration_preview(preview_report) + + # ── Phase 2: Confirm and execute ── + if not prompt_yes_no("Proceed with migration?", default=False): + print_info( + "Migration cancelled. You can run it later with: hermes claw migrate" + ) + print_info( + "Use --dry-run to preview again, or --preset minimal for a lighter import." + ) + return False + + # Execute the migration — overwrite=False so existing Hermes configs are + # preserved. The user saw the preview; conflicts are skipped by default. + try: migrator = mod.Migrator( source_root=openclaw_dir.resolve(), target_root=hermes_home.resolve(), execute=True, workspace_target=None, - overwrite=True, + overwrite=False, # preserve existing Hermes config migrate_secrets=True, output_dir=None, selected_options=selected, @@ -2640,7 +2788,7 @@ def _offer_openclaw_migration(hermes_home: Path) -> bool: logger.debug("OpenClaw migration error", exc_info=True) return False - # Print summary + # Print final summary summary = report.get("summary", {}) migrated = summary.get("migrated", 0) skipped = summary.get("skipped", 0) @@ -2651,7 +2799,7 @@ def _offer_openclaw_migration(hermes_home: Path) -> bool: if migrated: print_success(f"Imported {migrated} item(s) from OpenClaw.") if conflicts: - print_info(f"Skipped {conflicts} item(s) that already exist in Hermes.") + print_info(f"Skipped {conflicts} item(s) that already exist in Hermes (use hermes claw migrate --overwrite to force).") if skipped: print_info(f"Skipped {skipped} item(s) (not found or unchanged).") if errors: diff --git a/hermes_cli/status.py b/hermes_cli/status.py index eed89885d2..11f4371b63 100644 --- a/hermes_cli/status.py +++ b/hermes_cli/status.py @@ -79,6 +79,9 @@ def _effective_provider_label() -> str: return provider_label(effective) +from hermes_constants import is_termux as _is_termux + + def show_status(args): """Show status of all Hermes Agent components.""" show_all = getattr(args, 'all', False) @@ -325,7 +328,25 @@ def show_status(args): print() print(color("◆ Gateway Service", Colors.CYAN, Colors.BOLD)) - if sys.platform.startswith('linux'): + if _is_termux(): + try: + from hermes_cli.gateway import find_gateway_pids + gateway_pids = find_gateway_pids() + except Exception: + gateway_pids = [] + is_running = bool(gateway_pids) + print(f" Status: {check_mark(is_running)} {'running' if is_running else 'stopped'}") + print(" Manager: Termux / manual process") + if gateway_pids: + rendered = ", ".join(str(pid) for pid in gateway_pids[:3]) + if len(gateway_pids) > 3: + rendered += ", ..." + print(f" PID(s): {rendered}") + else: + print(" Start with: hermes gateway") + print(" Note: Android may stop background jobs when Termux is suspended") + + elif sys.platform.startswith('linux'): try: from hermes_cli.gateway import get_service_name _gw_svc = get_service_name() @@ -339,7 +360,7 @@ def show_status(args): timeout=5 ) is_active = result.stdout.strip() == "active" - except subprocess.TimeoutExpired: + except (FileNotFoundError, subprocess.TimeoutExpired): is_active = False print(f" Status: {check_mark(is_active)} {'running' if is_active else 'stopped'}") print(" Manager: systemd (user)") diff --git a/hermes_cli/uninstall.py b/hermes_cli/uninstall.py index fa49e3c928..7ab154afed 100644 --- a/hermes_cli/uninstall.py +++ b/hermes_cli/uninstall.py @@ -122,6 +122,10 @@ def uninstall_gateway_service(): if platform.system() != "Linux": return False + + prefix = os.getenv("PREFIX", "") + if os.getenv("TERMUX_VERSION") or "com.termux/files/usr" in prefix: + return False try: from hermes_cli.gateway import get_service_name diff --git a/hermes_constants.py b/hermes_constants.py index c28f6dc8fa..09005227ac 100644 --- a/hermes_constants.py +++ b/hermes_constants.py @@ -72,13 +72,13 @@ def display_hermes_home() -> str: return str(home) -VALID_REASONING_EFFORTS = ("xhigh", "high", "medium", "low", "minimal") +VALID_REASONING_EFFORTS = ("minimal", "low", "medium", "high", "xhigh") def parse_reasoning_effort(effort: str) -> dict | None: """Parse a reasoning effort level into a config dict. - Valid levels: "xhigh", "high", "medium", "low", "minimal", "none". + Valid levels: "none", "minimal", "low", "medium", "high", "xhigh". Returns None when the input is empty or unrecognized (caller uses default). Returns {"enabled": False} for "none". Returns {"enabled": True, "effort": } for valid effort levels. @@ -93,6 +93,16 @@ def parse_reasoning_effort(effort: str) -> dict | None: return None +def is_termux() -> bool: + """Return True when running inside a Termux (Android) environment. + + Checks ``TERMUX_VERSION`` (set by Termux) or the Termux-specific + ``PREFIX`` path. Import-safe — no heavy deps. + """ + prefix = os.getenv("PREFIX", "") + return bool(os.getenv("TERMUX_VERSION") or "com.termux/files/usr" in prefix) + + OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1" OPENROUTER_MODELS_URL = f"{OPENROUTER_BASE_URL}/models" OPENROUTER_CHAT_URL = f"{OPENROUTER_BASE_URL}/chat/completions" diff --git a/hermes_logging.py b/hermes_logging.py index 6d8f4fa7b5..5d71590c3f 100644 --- a/hermes_logging.py +++ b/hermes_logging.py @@ -13,6 +13,7 @@ secrets are never written to disk. """ import logging +import os from logging.handlers import RotatingFileHandler from pathlib import Path from typing import Optional @@ -177,6 +178,38 @@ def setup_verbose_logging() -> None: # Internal helpers # --------------------------------------------------------------------------- +class _ManagedRotatingFileHandler(RotatingFileHandler): + """RotatingFileHandler that ensures group-writable perms in managed mode. + + In managed mode (NixOS), the stateDir uses setgid (2770) so new files + inherit the hermes group. However, both _open() (initial creation) and + doRollover() create files via open(), which uses the process umask — + typically 0022, producing 0644. This subclass applies chmod 0660 after + both operations so the gateway and interactive users can share log files. + """ + + def __init__(self, *args, **kwargs): + from hermes_cli.config import is_managed + self._managed = is_managed() + super().__init__(*args, **kwargs) + + def _chmod_if_managed(self): + if self._managed: + try: + os.chmod(self.baseFilename, 0o660) + except OSError: + pass + + def _open(self): + stream = super()._open() + self._chmod_if_managed() + return stream + + def doRollover(self): + super().doRollover() + self._chmod_if_managed() + + def _add_rotating_handler( logger: logging.Logger, path: Path, @@ -198,7 +231,7 @@ def _add_rotating_handler( return # already attached path.parent.mkdir(parents=True, exist_ok=True) - handler = RotatingFileHandler( + handler = _ManagedRotatingFileHandler( str(path), maxBytes=max_bytes, backupCount=backup_count, ) handler.setLevel(level) diff --git a/hermes_state.py b/hermes_state.py index a845dbb9f9..c6825a3e66 100644 --- a/hermes_state.py +++ b/hermes_state.py @@ -944,7 +944,8 @@ class SessionDB: try: msg["tool_calls"] = json.loads(msg["tool_calls"]) except (json.JSONDecodeError, TypeError): - pass + logger.warning("Failed to deserialize tool_calls in get_messages, falling back to []") + msg["tool_calls"] = [] result.append(msg) return result @@ -972,7 +973,8 @@ class SessionDB: try: msg["tool_calls"] = json.loads(row["tool_calls"]) except (json.JSONDecodeError, TypeError): - pass + logger.warning("Failed to deserialize tool_calls in conversation replay, falling back to []") + msg["tool_calls"] = [] # Restore reasoning fields on assistant messages so providers # that replay reasoning (OpenRouter, OpenAI, Nous) receive # coherent multi-turn reasoning context. @@ -983,12 +985,14 @@ class SessionDB: try: msg["reasoning_details"] = json.loads(row["reasoning_details"]) except (json.JSONDecodeError, TypeError): - pass + logger.warning("Failed to deserialize reasoning_details, falling back to None") + msg["reasoning_details"] = None if row["codex_reasoning_items"]: try: msg["codex_reasoning_items"] = json.loads(row["codex_reasoning_items"]) except (json.JSONDecodeError, TypeError): - pass + logger.warning("Failed to deserialize codex_reasoning_items, falling back to None") + msg["codex_reasoning_items"] = None messages.append(msg) return messages diff --git a/nix/nixosModules.nix b/nix/nixosModules.nix index 948f7df8c5..b1be031df2 100644 --- a/nix/nixosModules.nix +++ b/nix/nixosModules.nix @@ -560,10 +560,14 @@ # ── Directories ─────────────────────────────────────────────────── { systemd.tmpfiles.rules = [ - "d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -" - "d ${cfg.stateDir}/.hermes 0750 ${cfg.user} ${cfg.group} - -" + "d ${cfg.stateDir} 2770 ${cfg.user} ${cfg.group} - -" + "d ${cfg.stateDir}/.hermes 2770 ${cfg.user} ${cfg.group} - -" + "d ${cfg.stateDir}/.hermes/cron 2770 ${cfg.user} ${cfg.group} - -" + "d ${cfg.stateDir}/.hermes/sessions 2770 ${cfg.user} ${cfg.group} - -" + "d ${cfg.stateDir}/.hermes/logs 2770 ${cfg.user} ${cfg.group} - -" + "d ${cfg.stateDir}/.hermes/memories 2770 ${cfg.user} ${cfg.group} - -" "d ${cfg.stateDir}/home 0750 ${cfg.user} ${cfg.group} - -" - "d ${cfg.workingDirectory} 0750 ${cfg.user} ${cfg.group} - -" + "d ${cfg.workingDirectory} 2770 ${cfg.user} ${cfg.group} - -" ]; } @@ -575,7 +579,21 @@ mkdir -p ${cfg.stateDir}/home mkdir -p ${cfg.workingDirectory} chown ${cfg.user}:${cfg.group} ${cfg.stateDir} ${cfg.stateDir}/.hermes ${cfg.stateDir}/home ${cfg.workingDirectory} - chmod 0750 ${cfg.stateDir} ${cfg.stateDir}/.hermes ${cfg.stateDir}/home ${cfg.workingDirectory} + chmod 2770 ${cfg.stateDir} ${cfg.stateDir}/.hermes ${cfg.workingDirectory} + chmod 0750 ${cfg.stateDir}/home + + # Create subdirs, set setgid + group-writable, migrate existing files. + # Nix-managed files (config.yaml, .env, .managed) stay 0640/0644. + find ${cfg.stateDir}/.hermes -maxdepth 1 \ + \( -name "*.db" -o -name "*.db-wal" -o -name "*.db-shm" -o -name "SOUL.md" \) \ + -exec chmod g+rw {} + 2>/dev/null || true + for _subdir in cron sessions logs memories; do + mkdir -p "${cfg.stateDir}/.hermes/$_subdir" + chown ${cfg.user}:${cfg.group} "${cfg.stateDir}/.hermes/$_subdir" + chmod 2770 "${cfg.stateDir}/.hermes/$_subdir" + find "${cfg.stateDir}/.hermes/$_subdir" -type f \ + -exec chmod g+rw {} + 2>/dev/null || true + done # Merge Nix settings into existing config.yaml. # Preserves user-added keys (skills, streaming, etc.); Nix keys win. @@ -662,6 +680,10 @@ HERMES_NIX_ENV_EOF Restart = cfg.restart; RestartSec = cfg.restartSec; + # Shared-state: files created by the gateway should be group-writable + # so interactive users in the hermes group can read/write them. + UMask = "0007"; + # Hardening NoNewPrivileges = true; ProtectSystem = "strict"; diff --git a/pyproject.toml b/pyproject.toml index fbe7910496..43567c4eca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,6 +63,17 @@ homeassistant = ["aiohttp>=3.9.0,<4"] sms = ["aiohttp>=3.9.0,<4"] acp = ["agent-client-protocol>=0.9.0,<1.0"] mistral = ["mistralai>=2.3.0,<3"] +termux = [ + # Tested Android / Termux path: keeps the core CLI feature-rich while + # avoiding extras that currently depend on non-Android wheels (notably + # faster-whisper -> ctranslate2 via the voice extra). + "hermes-agent[cron]", + "hermes-agent[cli]", + "hermes-agent[pty]", + "hermes-agent[mcp]", + "hermes-agent[honcho]", + "hermes-agent[acp]", +] dingtalk = ["dingtalk-stream>=0.1.0,<1"] feishu = ["lark-oapi>=1.5.3,<2"] rl = [ diff --git a/run_agent.py b/run_agent.py index 499843585c..751f7b398d 100644 --- a/run_agent.py +++ b/run_agent.py @@ -87,6 +87,7 @@ from agent.model_metadata import ( fetch_model_metadata, estimate_tokens_rough, estimate_messages_tokens_rough, estimate_request_tokens_rough, get_next_probe_tier, parse_context_limit_from_error, + parse_available_output_tokens_from_error, save_context_length, is_local_endpoint, query_ollama_num_ctx, ) @@ -621,6 +622,7 @@ class AIAgent: self.tool_progress_callback = tool_progress_callback self.tool_start_callback = tool_start_callback self.tool_complete_callback = tool_complete_callback + self.suppress_status_output = False self.thinking_callback = thinking_callback self.reasoning_callback = reasoning_callback self._reasoning_deltas_fired = False # Set by _fire_reasoning_delta, reset per API call @@ -1459,7 +1461,14 @@ class AIAgent: After the main response has been delivered and the remaining tool calls are post-response housekeeping (``_mute_post_response``), all non-forced output is suppressed. + + ``suppress_status_output`` is a stricter CLI automation mode used by + parseable single-query flows such as ``hermes chat -q``. In that mode, + all status/diagnostic prints routed through ``_vprint`` are suppressed + so stdout stays machine-readable. """ + if getattr(self, "suppress_status_output", False): + return if not force and getattr(self, "_mute_post_response", False): return if not force and self._has_stream_consumers() and not self._executing_tools: @@ -1485,6 +1494,17 @@ class AIAgent: except (AttributeError, ValueError, OSError): return False + def _should_emit_quiet_tool_messages(self) -> bool: + """Return True when quiet-mode tool summaries should print directly. + + When the caller provides ``tool_progress_callback`` (for example the CLI + TUI or a gateway progress renderer), that callback owns progress display. + Emitting quiet-mode summary lines here duplicates progress and leaks tool + previews into flows that are expected to stay silent, such as + ``hermes chat -q``. + """ + return self.quiet_mode and not self.tool_progress_callback + def _emit_status(self, message: str) -> None: """Emit a lifecycle status message to both CLI and gateway channels. @@ -4969,9 +4989,21 @@ class AIAgent: # Swap OpenAI client and config in-place self.api_key = fb_client.api_key self.client = fb_client + # Preserve provider-specific headers that + # resolve_provider_client() may have baked into + # fb_client via the default_headers kwarg. The OpenAI + # SDK stores these in _custom_headers. Without this, + # subsequent request-client rebuilds (via + # _create_request_openai_client) drop the headers, + # causing 403s from providers like Kimi Coding that + # require a User-Agent sentinel. + fb_headers = getattr(fb_client, "_custom_headers", None) + if not fb_headers: + fb_headers = getattr(fb_client, "default_headers", None) self._client_kwargs = { "api_key": fb_client.api_key, "base_url": fb_base_url, + **({"default_headers": dict(fb_headers)} if fb_headers else {}), } # Re-evaluate prompt caching for the new provider/model @@ -5386,15 +5418,22 @@ class AIAgent: if self.api_mode == "anthropic_messages": from agent.anthropic_adapter import build_anthropic_kwargs anthropic_messages = self._prepare_anthropic_messages_for_api(api_messages) - # Pass context_length so the adapter can clamp max_tokens if the - # user configured a smaller context window than the model's output limit. + # Pass context_length (total input+output window) so the adapter can + # clamp max_tokens (output cap) when the user configured a smaller + # context window than the model's native output limit. ctx_len = getattr(self, "context_compressor", None) ctx_len = ctx_len.context_length if ctx_len else None + # _ephemeral_max_output_tokens is set for one call when the API + # returns "max_tokens too large given prompt" — it caps output to + # the available window space without touching context_length. + ephemeral_out = getattr(self, "_ephemeral_max_output_tokens", None) + if ephemeral_out is not None: + self._ephemeral_max_output_tokens = None # consume immediately return build_anthropic_kwargs( model=self.model, messages=anthropic_messages, tools=self.tools, - max_tokens=self.max_tokens, + max_tokens=ephemeral_out if ephemeral_out is not None else self.max_tokens, reasoning_config=self.reasoning_config, is_oauth=self._is_anthropic_oauth, preserve_dots=self._anthropic_preserve_dots(), @@ -6328,7 +6367,7 @@ class AIAgent: # Start spinner for CLI mode (skip when TUI handles tool progress) spinner = None - if self.quiet_mode and not self.tool_progress_callback and self._should_start_quiet_spinner(): + if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner(): face = random.choice(KawaiiSpinner.KAWAII_WAITING) spinner = KawaiiSpinner(f"{face} ⚡ running {num_tools} tools concurrently", spinner_type='dots', print_fn=self._print_fn) spinner.start() @@ -6378,7 +6417,7 @@ class AIAgent: logging.debug(f"Tool result ({len(function_result)} chars): {function_result}") # Print cute message per tool - if self.quiet_mode: + if self._should_emit_quiet_tool_messages(): cute_msg = _get_cute_tool_message_impl(name, args, tool_duration, result=function_result) self._safe_print(f" {cute_msg}") elif not self.quiet_mode: @@ -6535,7 +6574,7 @@ class AIAgent: store=self._todo_store, ) tool_duration = time.time() - tool_start_time - if self.quiet_mode: + if self._should_emit_quiet_tool_messages(): self._vprint(f" {_get_cute_tool_message_impl('todo', function_args, tool_duration, result=function_result)}") elif function_name == "session_search": if not self._session_db: @@ -6550,7 +6589,7 @@ class AIAgent: current_session_id=self.session_id, ) tool_duration = time.time() - tool_start_time - if self.quiet_mode: + if self._should_emit_quiet_tool_messages(): self._vprint(f" {_get_cute_tool_message_impl('session_search', function_args, tool_duration, result=function_result)}") elif function_name == "memory": target = function_args.get("target", "memory") @@ -6563,7 +6602,7 @@ class AIAgent: store=self._memory_store, ) tool_duration = time.time() - tool_start_time - if self.quiet_mode: + if self._should_emit_quiet_tool_messages(): self._vprint(f" {_get_cute_tool_message_impl('memory', function_args, tool_duration, result=function_result)}") elif function_name == "clarify": from tools.clarify_tool import clarify_tool as _clarify_tool @@ -6573,7 +6612,7 @@ class AIAgent: callback=self.clarify_callback, ) tool_duration = time.time() - tool_start_time - if self.quiet_mode: + if self._should_emit_quiet_tool_messages(): self._vprint(f" {_get_cute_tool_message_impl('clarify', function_args, tool_duration, result=function_result)}") elif function_name == "delegate_task": from tools.delegate_tool import delegate_task as _delegate_task @@ -6584,7 +6623,7 @@ class AIAgent: goal_preview = (function_args.get("goal") or "")[:30] spinner_label = f"🔀 {goal_preview}" if goal_preview else "🔀 delegating" spinner = None - if self.quiet_mode and not self.tool_progress_callback and self._should_start_quiet_spinner(): + if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner(): face = random.choice(KawaiiSpinner.KAWAII_WAITING) spinner = KawaiiSpinner(f"{face} {spinner_label}", spinner_type='dots', print_fn=self._print_fn) spinner.start() @@ -6606,13 +6645,13 @@ class AIAgent: cute_msg = _get_cute_tool_message_impl('delegate_task', function_args, tool_duration, result=_delegate_result) if spinner: spinner.stop(cute_msg) - elif self.quiet_mode: + elif self._should_emit_quiet_tool_messages(): self._vprint(f" {cute_msg}") elif self._memory_manager and self._memory_manager.has_tool(function_name): # Memory provider tools (hindsight_retain, honcho_search, etc.) # These are not in the tool registry — route through MemoryManager. spinner = None - if self.quiet_mode and not self.tool_progress_callback: + if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner(): face = random.choice(KawaiiSpinner.KAWAII_WAITING) emoji = _get_tool_emoji(function_name) preview = _build_tool_preview(function_name, function_args) or function_name @@ -6630,11 +6669,11 @@ class AIAgent: cute_msg = _get_cute_tool_message_impl(function_name, function_args, tool_duration, result=_mem_result) if spinner: spinner.stop(cute_msg) - elif self.quiet_mode: + elif self._should_emit_quiet_tool_messages(): self._vprint(f" {cute_msg}") elif self.quiet_mode: spinner = None - if not self.tool_progress_callback: + if self._should_emit_quiet_tool_messages() and self._should_start_quiet_spinner(): face = random.choice(KawaiiSpinner.KAWAII_WAITING) emoji = _get_tool_emoji(function_name) preview = _build_tool_preview(function_name, function_args) or function_name @@ -6657,7 +6696,7 @@ class AIAgent: cute_msg = _get_cute_tool_message_impl(function_name, function_args, tool_duration, result=_spinner_result) if spinner: spinner.stop(cute_msg) - else: + elif self._should_emit_quiet_tool_messages(): self._vprint(f" {cute_msg}") else: try: @@ -8295,6 +8334,48 @@ class AIAgent: compressor = self.context_compressor old_ctx = compressor.context_length + # ── Distinguish two very different errors ─────────── + # 1. "Prompt too long": the INPUT exceeds the context window. + # Fix: reduce context_length + compress history. + # 2. "max_tokens too large": input is fine, but + # input_tokens + requested max_tokens > context_window. + # Fix: reduce max_tokens (the OUTPUT cap) for this call. + # Do NOT shrink context_length — the window is unchanged. + # + # Note: max_tokens = output token cap (one response). + # context_length = total window (input + output combined). + available_out = parse_available_output_tokens_from_error(error_msg) + if available_out is not None: + # Error is purely about the output cap being too large. + # Cap output to the available space and retry without + # touching context_length or triggering compression. + safe_out = max(1, available_out - 64) # small safety margin + self._ephemeral_max_output_tokens = safe_out + self._vprint( + f"{self.log_prefix}⚠️ Output cap too large for current prompt — " + f"retrying with max_tokens={safe_out:,} " + f"(available_tokens={available_out:,}; context_length unchanged at {old_ctx:,})", + force=True, + ) + # Still count against compression_attempts so we don't + # loop forever if the error keeps recurring. + compression_attempts += 1 + if compression_attempts > max_compression_attempts: + self._vprint(f"{self.log_prefix}❌ Max compression attempts ({max_compression_attempts}) reached.", force=True) + self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True) + logging.error(f"{self.log_prefix}Context compression failed after {max_compression_attempts} attempts.") + self._persist_session(messages, conversation_history) + return { + "messages": messages, + "completed": False, + "api_calls": api_call_count, + "error": f"Context length exceeded: max compression attempts ({max_compression_attempts}) reached.", + "partial": True + } + restart_with_compressed_messages = True + break + + # Error is about the INPUT being too large — reduce context_length. # Try to parse the actual limit from the error message parsed_limit = parse_context_limit_from_error(error_msg) if parsed_limit and parsed_limit < old_ctx: diff --git a/scripts/install.sh b/scripts/install.sh index b44f538fa1..e157153343 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -2,8 +2,8 @@ # ============================================================================ # Hermes Agent Installer # ============================================================================ -# Installation script for Linux and macOS. -# Uses uv for fast Python provisioning and package management. +# Installation script for Linux, macOS, and Android/Termux. +# Uses uv for desktop/server installs and Python's stdlib venv + pip on Termux. # # Usage: # curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash @@ -117,6 +117,36 @@ log_error() { echo -e "${RED}✗${NC} $1" } +is_termux() { + [ -n "${TERMUX_VERSION:-}" ] || [[ "${PREFIX:-}" == *"com.termux/files/usr"* ]] +} + +get_command_link_dir() { + if is_termux && [ -n "${PREFIX:-}" ]; then + echo "$PREFIX/bin" + else + echo "$HOME/.local/bin" + fi +} + +get_command_link_display_dir() { + if is_termux && [ -n "${PREFIX:-}" ]; then + echo '$PREFIX/bin' + else + echo '~/.local/bin' + fi +} + +get_hermes_command_path() { + local link_dir + link_dir="$(get_command_link_dir)" + if [ -x "$link_dir/hermes" ]; then + echo "$link_dir/hermes" + else + echo "hermes" + fi +} + # ============================================================================ # System detection # ============================================================================ @@ -124,12 +154,17 @@ log_error() { detect_os() { case "$(uname -s)" in Linux*) - OS="linux" - if [ -f /etc/os-release ]; then - . /etc/os-release - DISTRO="$ID" + if is_termux; then + OS="android" + DISTRO="termux" else - DISTRO="unknown" + OS="linux" + if [ -f /etc/os-release ]; then + . /etc/os-release + DISTRO="$ID" + else + DISTRO="unknown" + fi fi ;; Darwin*) @@ -158,6 +193,12 @@ detect_os() { # ============================================================================ install_uv() { + if [ "$DISTRO" = "termux" ]; then + log_info "Termux detected — using Python's stdlib venv + pip instead of uv" + UV_CMD="" + return 0 + fi + log_info "Checking for uv package manager..." # Check common locations for uv @@ -209,6 +250,25 @@ install_uv() { } check_python() { + if [ "$DISTRO" = "termux" ]; then + log_info "Checking Termux Python..." + if command -v python >/dev/null 2>&1; then + PYTHON_PATH="$(command -v python)" + if "$PYTHON_PATH" -c 'import sys; raise SystemExit(0 if sys.version_info >= (3, 11) else 1)' 2>/dev/null; then + PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null) + log_success "Python found: $PYTHON_FOUND_VERSION" + return 0 + fi + fi + + log_info "Installing Python via pkg..." + pkg install -y python >/dev/null + PYTHON_PATH="$(command -v python)" + PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null) + log_success "Python installed: $PYTHON_FOUND_VERSION" + return 0 + fi + log_info "Checking Python $PYTHON_VERSION..." # Let uv handle Python — it can download and manage Python versions @@ -243,6 +303,17 @@ check_git() { fi log_error "Git not found" + + if [ "$DISTRO" = "termux" ]; then + log_info "Installing Git via pkg..." + pkg install -y git >/dev/null + if command -v git >/dev/null 2>&1; then + GIT_VERSION=$(git --version | awk '{print $3}') + log_success "Git $GIT_VERSION installed" + return 0 + fi + fi + log_info "Please install Git:" case "$OS" in @@ -262,6 +333,9 @@ check_git() { ;; esac ;; + android) + log_info " pkg install git" + ;; macos) log_info " xcode-select --install" log_info " Or: brew install git" @@ -290,11 +364,29 @@ check_node() { return 0 fi - log_info "Node.js not found — installing Node.js $NODE_VERSION LTS..." + if [ "$DISTRO" = "termux" ]; then + log_info "Node.js not found — installing Node.js via pkg..." + else + log_info "Node.js not found — installing Node.js $NODE_VERSION LTS..." + fi install_node } install_node() { + if [ "$DISTRO" = "termux" ]; then + log_info "Installing Node.js via pkg..." + if pkg install -y nodejs >/dev/null; then + local installed_ver + installed_ver=$(node --version 2>/dev/null) + log_success "Node.js $installed_ver installed via pkg" + HAS_NODE=true + else + log_warn "Failed to install Node.js via pkg" + HAS_NODE=false + fi + return 0 + fi + local arch=$(uname -m) local node_arch case "$arch" in @@ -413,6 +505,30 @@ install_system_packages() { need_ffmpeg=true fi + # Termux always needs the Android build toolchain for the tested pip path, + # even when ripgrep/ffmpeg are already present. + if [ "$DISTRO" = "termux" ]; then + local termux_pkgs=(clang rust make pkg-config libffi openssl) + if [ "$need_ripgrep" = true ]; then + termux_pkgs+=("ripgrep") + fi + if [ "$need_ffmpeg" = true ]; then + termux_pkgs+=("ffmpeg") + fi + + log_info "Installing Termux packages: ${termux_pkgs[*]}" + if pkg install -y "${termux_pkgs[@]}" >/dev/null; then + [ "$need_ripgrep" = true ] && HAS_RIPGREP=true && log_success "ripgrep installed" + [ "$need_ffmpeg" = true ] && HAS_FFMPEG=true && log_success "ffmpeg installed" + log_success "Termux build dependencies installed" + return 0 + fi + + log_warn "Could not auto-install all Termux packages" + log_info "Install manually: pkg install ${termux_pkgs[*]}" + return 0 + fi + # Nothing to install — done if [ "$need_ripgrep" = false ] && [ "$need_ffmpeg" = false ]; then return 0 @@ -550,6 +666,9 @@ show_manual_install_hint() { *) log_info " Use your package manager or visit the project homepage" ;; esac ;; + android) + log_info " pkg install $pkg" + ;; macos) log_info " brew install $pkg" ;; esac } @@ -646,6 +765,19 @@ setup_venv() { return 0 fi + if [ "$DISTRO" = "termux" ]; then + log_info "Creating virtual environment with Termux Python..." + + if [ -d "venv" ]; then + log_info "Virtual environment already exists, recreating..." + rm -rf venv + fi + + "$PYTHON_PATH" -m venv venv + log_success "Virtual environment ready ($(./venv/bin/python --version 2>/dev/null))" + return 0 + fi + log_info "Creating virtual environment with Python $PYTHON_VERSION..." if [ -d "venv" ]; then @@ -662,6 +794,46 @@ setup_venv() { install_deps() { log_info "Installing dependencies..." + if [ "$DISTRO" = "termux" ]; then + if [ "$USE_VENV" = true ]; then + export VIRTUAL_ENV="$INSTALL_DIR/venv" + PIP_PYTHON="$INSTALL_DIR/venv/bin/python" + else + PIP_PYTHON="$PYTHON_PATH" + fi + + if [ -z "${ANDROID_API_LEVEL:-}" ]; then + ANDROID_API_LEVEL="$(getprop ro.build.version.sdk 2>/dev/null || true)" + if [ -z "$ANDROID_API_LEVEL" ]; then + ANDROID_API_LEVEL=24 + fi + export ANDROID_API_LEVEL + log_info "Using ANDROID_API_LEVEL=$ANDROID_API_LEVEL for Android wheel builds" + fi + + "$PIP_PYTHON" -m pip install --upgrade pip setuptools wheel >/dev/null + if ! "$PIP_PYTHON" -m pip install -e '.[termux]' -c constraints-termux.txt; then + log_warn "Termux feature install (.[termux]) failed, trying base install..." + if ! "$PIP_PYTHON" -m pip install -e '.' -c constraints-termux.txt; then + log_error "Package installation failed on Termux." + log_info "Ensure these packages are installed: pkg install clang rust make pkg-config libffi openssl" + log_info "Then re-run: cd $INSTALL_DIR && python -m pip install -e '.[termux]' -c constraints-termux.txt" + exit 1 + fi + fi + + log_success "Main package installed" + log_info "Termux note: browser/WhatsApp tooling is not installed by default; see the Termux guide for optional follow-up steps." + + if [ -d "tinker-atropos" ] && [ -f "tinker-atropos/pyproject.toml" ]; then + log_info "tinker-atropos submodule found — skipping install (optional, for RL training)" + log_info " To install later: $PIP_PYTHON -m pip install -e \"./tinker-atropos\"" + fi + + log_success "All dependencies installed" + return 0 + fi + if [ "$USE_VENV" = true ]; then # Tell uv to install into our venv (no need to activate) export VIRTUAL_ENV="$INSTALL_DIR/venv" @@ -743,19 +915,35 @@ setup_path() { if [ ! -x "$HERMES_BIN" ]; then log_warn "hermes entry point not found at $HERMES_BIN" log_info "This usually means the pip install didn't complete successfully." - log_info "Try: cd $INSTALL_DIR && uv pip install -e '.[all]'" + if [ "$DISTRO" = "termux" ]; then + log_info "Try: cd $INSTALL_DIR && python -m pip install -e '.[termux]' -c constraints-termux.txt" + else + log_info "Try: cd $INSTALL_DIR && uv pip install -e '.[all]'" + fi return 0 fi - # Create symlink in ~/.local/bin (standard user binary location, usually on PATH) - mkdir -p "$HOME/.local/bin" - ln -sf "$HERMES_BIN" "$HOME/.local/bin/hermes" - log_success "Symlinked hermes → ~/.local/bin/hermes" + local command_link_dir + local command_link_display_dir + command_link_dir="$(get_command_link_dir)" + command_link_display_dir="$(get_command_link_display_dir)" + + # Create a user-facing shim for the hermes command. + mkdir -p "$command_link_dir" + ln -sf "$HERMES_BIN" "$command_link_dir/hermes" + log_success "Symlinked hermes → $command_link_display_dir/hermes" + + if [ "$DISTRO" = "termux" ]; then + export PATH="$command_link_dir:$PATH" + log_info "$command_link_display_dir is the native Termux command path" + log_success "hermes command ready" + return 0 + fi # Check if ~/.local/bin is on PATH; if not, add it to shell config. # Detect the user's actual login shell (not the shell running this script, # which is always bash when piped from curl). - if ! echo "$PATH" | tr ':' '\n' | grep -q "^$HOME/.local/bin$"; then + if ! echo "$PATH" | tr ':' '\n' | grep -q "^$command_link_dir$"; then SHELL_CONFIGS=() LOGIN_SHELL="$(basename "${SHELL:-/bin/bash}")" case "$LOGIN_SHELL" in @@ -801,7 +989,7 @@ setup_path() { fi # Export for current session so hermes works immediately - export PATH="$HOME/.local/bin:$PATH" + export PATH="$command_link_dir:$PATH" log_success "hermes command ready" } @@ -878,6 +1066,13 @@ install_node_deps() { return 0 fi + if [ "$DISTRO" = "termux" ]; then + log_info "Skipping automatic Node/browser dependency setup on Termux" + log_info "Browser automation and WhatsApp bridge are not part of the tested Termux install path yet." + log_info "If you want to experiment manually later, run: cd $INSTALL_DIR && npm install" + return 0 + fi + if [ -f "$INSTALL_DIR/package.json" ]; then log_info "Installing Node.js dependencies (browser tools)..." cd "$INSTALL_DIR" @@ -1002,8 +1197,7 @@ maybe_start_gateway() { read -p "Pair WhatsApp now? [Y/n] " -n 1 -r echo if [[ $REPLY =~ ^[Yy]$ ]] || [[ -z $REPLY ]]; then - HERMES_CMD="$HOME/.local/bin/hermes" - [ ! -x "$HERMES_CMD" ] && HERMES_CMD="hermes" + HERMES_CMD="$(get_hermes_command_path)" $HERMES_CMD whatsapp || true fi else @@ -1017,16 +1211,17 @@ maybe_start_gateway() { fi echo "" - read -p "Would you like to install the gateway as a background service? [Y/n] " -n 1 -r < /dev/tty + if [ "$DISTRO" = "termux" ]; then + read -p "Would you like to start the gateway in the background? [Y/n] " -n 1 -r < /dev/tty + else + read -p "Would you like to install the gateway as a background service? [Y/n] " -n 1 -r < /dev/tty + fi echo if [[ $REPLY =~ ^[Yy]$ ]] || [[ -z $REPLY ]]; then - HERMES_CMD="$HOME/.local/bin/hermes" - if [ ! -x "$HERMES_CMD" ]; then - HERMES_CMD="hermes" - fi + HERMES_CMD="$(get_hermes_command_path)" - if command -v systemctl &> /dev/null; then + if [ "$DISTRO" != "termux" ] && command -v systemctl &> /dev/null; then log_info "Installing systemd service..." if $HERMES_CMD gateway install 2>/dev/null; then log_success "Gateway service installed" @@ -1039,12 +1234,19 @@ maybe_start_gateway() { log_warn "Systemd install failed. You can start manually: hermes gateway" fi else - log_info "systemd not available — starting gateway in background..." + if [ "$DISTRO" = "termux" ]; then + log_info "Termux detected — starting gateway in best-effort background mode..." + else + log_info "systemd not available — starting gateway in background..." + fi nohup $HERMES_CMD gateway > "$HERMES_HOME/logs/gateway.log" 2>&1 & GATEWAY_PID=$! log_success "Gateway started (PID $GATEWAY_PID). Logs: ~/.hermes/logs/gateway.log" log_info "To stop: kill $GATEWAY_PID" log_info "To restart later: hermes gateway" + if [ "$DISTRO" = "termux" ]; then + log_warn "Android may stop background processes when Termux is suspended or the system reclaims resources." + fi fi else log_info "Skipped. Start the gateway later with: hermes gateway" @@ -1083,24 +1285,33 @@ print_success() { echo -e "${CYAN}─────────────────────────────────────────────────────────${NC}" echo "" - echo -e "${YELLOW}⚡ Reload your shell to use 'hermes' command:${NC}" - echo "" - LOGIN_SHELL="$(basename "${SHELL:-/bin/bash}")" - if [ "$LOGIN_SHELL" = "zsh" ]; then - echo " source ~/.zshrc" - elif [ "$LOGIN_SHELL" = "bash" ]; then - echo " source ~/.bashrc" + if [ "$DISTRO" = "termux" ]; then + echo -e "${YELLOW}⚡ 'hermes' was linked into $(get_command_link_display_dir), which is already on PATH in Termux.${NC}" + echo "" else - echo " source ~/.bashrc # or ~/.zshrc" + echo -e "${YELLOW}⚡ Reload your shell to use 'hermes' command:${NC}" + echo "" + LOGIN_SHELL="$(basename "${SHELL:-/bin/bash}")" + if [ "$LOGIN_SHELL" = "zsh" ]; then + echo " source ~/.zshrc" + elif [ "$LOGIN_SHELL" = "bash" ]; then + echo " source ~/.bashrc" + else + echo " source ~/.bashrc # or ~/.zshrc" + fi + echo "" fi - echo "" # Show Node.js warning if auto-install failed if [ "$HAS_NODE" = false ]; then echo -e "${YELLOW}" echo "Note: Node.js could not be installed automatically." echo "Browser tools need Node.js. Install manually:" - echo " https://nodejs.org/en/download/" + if [ "$DISTRO" = "termux" ]; then + echo " pkg install nodejs" + else + echo " https://nodejs.org/en/download/" + fi echo -e "${NC}" fi @@ -1109,7 +1320,11 @@ print_success() { echo -e "${YELLOW}" echo "Note: ripgrep (rg) was not found. File search will use" echo "grep as a fallback. For faster search in large codebases," - echo "install ripgrep: sudo apt install ripgrep (or brew install ripgrep)" + if [ "$DISTRO" = "termux" ]; then + echo "install ripgrep: pkg install ripgrep" + else + echo "install ripgrep: sudo apt install ripgrep (or brew install ripgrep)" + fi echo -e "${NC}" fi } diff --git a/setup-hermes.sh b/setup-hermes.sh index d2a1b12ea3..5d0f2928ab 100755 --- a/setup-hermes.sh +++ b/setup-hermes.sh @@ -3,17 +3,17 @@ # Hermes Agent Setup Script # ============================================================================ # Quick setup for developers who cloned the repo manually. -# Uses uv for fast Python provisioning and package management. +# Uses uv for desktop/server setup and Python's stdlib venv + pip on Termux. # # Usage: # ./setup-hermes.sh # # This script: -# 1. Installs uv if not present -# 2. Creates a virtual environment with Python 3.11 via uv -# 3. Installs all dependencies (main package + submodules) +# 1. Detects desktop/server vs Android/Termux setup path +# 2. Creates a Python 3.11 virtual environment +# 3. Installs the appropriate dependency set for the platform # 4. Creates .env from template (if not exists) -# 5. Symlinks the 'hermes' CLI command into ~/.local/bin +# 5. Symlinks the 'hermes' CLI command into a user-facing bin dir # 6. Runs the setup wizard (optional) # ============================================================================ @@ -31,6 +31,26 @@ cd "$SCRIPT_DIR" PYTHON_VERSION="3.11" +is_termux() { + [ -n "${TERMUX_VERSION:-}" ] || [[ "${PREFIX:-}" == *"com.termux/files/usr"* ]] +} + +get_command_link_dir() { + if is_termux && [ -n "${PREFIX:-}" ]; then + echo "$PREFIX/bin" + else + echo "$HOME/.local/bin" + fi +} + +get_command_link_display_dir() { + if is_termux && [ -n "${PREFIX:-}" ]; then + echo '$PREFIX/bin' + else + echo '~/.local/bin' + fi +} + echo "" echo -e "${CYAN}⚕ Hermes Agent Setup${NC}" echo "" @@ -42,36 +62,40 @@ echo "" echo -e "${CYAN}→${NC} Checking for uv..." UV_CMD="" -if command -v uv &> /dev/null; then - UV_CMD="uv" -elif [ -x "$HOME/.local/bin/uv" ]; then - UV_CMD="$HOME/.local/bin/uv" -elif [ -x "$HOME/.cargo/bin/uv" ]; then - UV_CMD="$HOME/.cargo/bin/uv" -fi - -if [ -n "$UV_CMD" ]; then - UV_VERSION=$($UV_CMD --version 2>/dev/null) - echo -e "${GREEN}✓${NC} uv found ($UV_VERSION)" +if is_termux; then + echo -e "${CYAN}→${NC} Termux detected — using Python's stdlib venv + pip instead of uv" else - echo -e "${CYAN}→${NC} Installing uv..." - if curl -LsSf https://astral.sh/uv/install.sh | sh 2>/dev/null; then - if [ -x "$HOME/.local/bin/uv" ]; then - UV_CMD="$HOME/.local/bin/uv" - elif [ -x "$HOME/.cargo/bin/uv" ]; then - UV_CMD="$HOME/.cargo/bin/uv" - fi - - if [ -n "$UV_CMD" ]; then - UV_VERSION=$($UV_CMD --version 2>/dev/null) - echo -e "${GREEN}✓${NC} uv installed ($UV_VERSION)" + if command -v uv &> /dev/null; then + UV_CMD="uv" + elif [ -x "$HOME/.local/bin/uv" ]; then + UV_CMD="$HOME/.local/bin/uv" + elif [ -x "$HOME/.cargo/bin/uv" ]; then + UV_CMD="$HOME/.cargo/bin/uv" + fi + + if [ -n "$UV_CMD" ]; then + UV_VERSION=$($UV_CMD --version 2>/dev/null) + echo -e "${GREEN}✓${NC} uv found ($UV_VERSION)" + else + echo -e "${CYAN}→${NC} Installing uv..." + if curl -LsSf https://astral.sh/uv/install.sh | sh 2>/dev/null; then + if [ -x "$HOME/.local/bin/uv" ]; then + UV_CMD="$HOME/.local/bin/uv" + elif [ -x "$HOME/.cargo/bin/uv" ]; then + UV_CMD="$HOME/.cargo/bin/uv" + fi + + if [ -n "$UV_CMD" ]; then + UV_VERSION=$($UV_CMD --version 2>/dev/null) + echo -e "${GREEN}✓${NC} uv installed ($UV_VERSION)" + else + echo -e "${RED}✗${NC} uv installed but not found. Add ~/.local/bin to PATH and retry." + exit 1 + fi else - echo -e "${RED}✗${NC} uv installed but not found. Add ~/.local/bin to PATH and retry." + echo -e "${RED}✗${NC} Failed to install uv. Visit https://docs.astral.sh/uv/" exit 1 fi - else - echo -e "${RED}✗${NC} Failed to install uv. Visit https://docs.astral.sh/uv/" - exit 1 fi fi @@ -81,16 +105,34 @@ fi echo -e "${CYAN}→${NC} Checking Python $PYTHON_VERSION..." -if $UV_CMD python find "$PYTHON_VERSION" &> /dev/null; then - PYTHON_PATH=$($UV_CMD python find "$PYTHON_VERSION") - PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null) - echo -e "${GREEN}✓${NC} $PYTHON_FOUND_VERSION found" +if is_termux; then + if command -v python >/dev/null 2>&1; then + PYTHON_PATH="$(command -v python)" + if "$PYTHON_PATH" -c 'import sys; raise SystemExit(0 if sys.version_info >= (3, 11) else 1)' 2>/dev/null; then + PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null) + echo -e "${GREEN}✓${NC} $PYTHON_FOUND_VERSION found" + else + echo -e "${RED}✗${NC} Termux Python must be 3.11+" + echo " Run: pkg install python" + exit 1 + fi + else + echo -e "${RED}✗${NC} Python not found in Termux" + echo " Run: pkg install python" + exit 1 + fi else - echo -e "${CYAN}→${NC} Python $PYTHON_VERSION not found, installing via uv..." - $UV_CMD python install "$PYTHON_VERSION" - PYTHON_PATH=$($UV_CMD python find "$PYTHON_VERSION") - PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null) - echo -e "${GREEN}✓${NC} $PYTHON_FOUND_VERSION installed" + if $UV_CMD python find "$PYTHON_VERSION" &> /dev/null; then + PYTHON_PATH=$($UV_CMD python find "$PYTHON_VERSION") + PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null) + echo -e "${GREEN}✓${NC} $PYTHON_FOUND_VERSION found" + else + echo -e "${CYAN}→${NC} Python $PYTHON_VERSION not found, installing via uv..." + $UV_CMD python install "$PYTHON_VERSION" + PYTHON_PATH=$($UV_CMD python find "$PYTHON_VERSION") + PYTHON_FOUND_VERSION=$($PYTHON_PATH --version 2>/dev/null) + echo -e "${GREEN}✓${NC} $PYTHON_FOUND_VERSION installed" + fi fi # ============================================================================ @@ -104,11 +146,16 @@ if [ -d "venv" ]; then rm -rf venv fi -$UV_CMD venv venv --python "$PYTHON_VERSION" -echo -e "${GREEN}✓${NC} venv created (Python $PYTHON_VERSION)" +if is_termux; then + "$PYTHON_PATH" -m venv venv + echo -e "${GREEN}✓${NC} venv created with stdlib venv" +else + $UV_CMD venv venv --python "$PYTHON_VERSION" + echo -e "${GREEN}✓${NC} venv created (Python $PYTHON_VERSION)" +fi -# Tell uv to install into this venv (no activation needed for uv) export VIRTUAL_ENV="$SCRIPT_DIR/venv" +SETUP_PYTHON="$SCRIPT_DIR/venv/bin/python" # ============================================================================ # Dependencies @@ -116,19 +163,34 @@ export VIRTUAL_ENV="$SCRIPT_DIR/venv" echo -e "${CYAN}→${NC} Installing dependencies..." -# Prefer uv sync with lockfile (hash-verified installs) when available, -# fall back to pip install for compatibility or when lockfile is stale. -if [ -f "uv.lock" ]; then - echo -e "${CYAN}→${NC} Using uv.lock for hash-verified installation..." - UV_PROJECT_ENVIRONMENT="$SCRIPT_DIR/venv" $UV_CMD sync --all-extras --locked 2>/dev/null && \ - echo -e "${GREEN}✓${NC} Dependencies installed (lockfile verified)" || { - echo -e "${YELLOW}⚠${NC} Lockfile install failed (may be outdated), falling back to pip install..." +if is_termux; then + export ANDROID_API_LEVEL="$(getprop ro.build.version.sdk 2>/dev/null || printf '%s' "${ANDROID_API_LEVEL:-}")" + echo -e "${CYAN}→${NC} Termux detected — installing the tested Android bundle" + "$SETUP_PYTHON" -m pip install --upgrade pip setuptools wheel + if [ -f "constraints-termux.txt" ]; then + "$SETUP_PYTHON" -m pip install -e ".[termux]" -c constraints-termux.txt || { + echo -e "${YELLOW}⚠${NC} Termux bundle install failed, falling back to base install..." + "$SETUP_PYTHON" -m pip install -e "." -c constraints-termux.txt + } + else + "$SETUP_PYTHON" -m pip install -e ".[termux]" || "$SETUP_PYTHON" -m pip install -e "." + fi + echo -e "${GREEN}✓${NC} Dependencies installed" +else + # Prefer uv sync with lockfile (hash-verified installs) when available, + # fall back to pip install for compatibility or when lockfile is stale. + if [ -f "uv.lock" ]; then + echo -e "${CYAN}→${NC} Using uv.lock for hash-verified installation..." + UV_PROJECT_ENVIRONMENT="$SCRIPT_DIR/venv" $UV_CMD sync --all-extras --locked 2>/dev/null && \ + echo -e "${GREEN}✓${NC} Dependencies installed (lockfile verified)" || { + echo -e "${YELLOW}⚠${NC} Lockfile install failed (may be outdated), falling back to pip install..." + $UV_CMD pip install -e ".[all]" || $UV_CMD pip install -e "." + echo -e "${GREEN}✓${NC} Dependencies installed" + } + else $UV_CMD pip install -e ".[all]" || $UV_CMD pip install -e "." echo -e "${GREEN}✓${NC} Dependencies installed" - } -else - $UV_CMD pip install -e ".[all]" || $UV_CMD pip install -e "." - echo -e "${GREEN}✓${NC} Dependencies installed" + fi fi # ============================================================================ @@ -138,7 +200,9 @@ fi echo -e "${CYAN}→${NC} Installing optional submodules..." # tinker-atropos (RL training backend) -if [ -d "tinker-atropos" ] && [ -f "tinker-atropos/pyproject.toml" ]; then +if is_termux; then + echo -e "${CYAN}→${NC} Skipping tinker-atropos on Termux (not part of the tested Android path)" +elif [ -d "tinker-atropos" ] && [ -f "tinker-atropos/pyproject.toml" ]; then $UV_CMD pip install -e "./tinker-atropos" && \ echo -e "${GREEN}✓${NC} tinker-atropos installed" || \ echo -e "${YELLOW}⚠${NC} tinker-atropos install failed (RL tools may not work)" @@ -160,34 +224,42 @@ else echo if [[ $REPLY =~ ^[Yy]$ ]] || [[ -z $REPLY ]]; then INSTALLED=false - - # Check if sudo is available - if command -v sudo &> /dev/null && sudo -n true 2>/dev/null; then - if command -v apt &> /dev/null; then - sudo apt install -y ripgrep && INSTALLED=true - elif command -v dnf &> /dev/null; then - sudo dnf install -y ripgrep && INSTALLED=true + + if is_termux; then + pkg install -y ripgrep && INSTALLED=true + else + # Check if sudo is available + if command -v sudo &> /dev/null && sudo -n true 2>/dev/null; then + if command -v apt &> /dev/null; then + sudo apt install -y ripgrep && INSTALLED=true + elif command -v dnf &> /dev/null; then + sudo dnf install -y ripgrep && INSTALLED=true + fi + fi + + # Try brew (no sudo needed) + if [ "$INSTALLED" = false ] && command -v brew &> /dev/null; then + brew install ripgrep && INSTALLED=true + fi + + # Try cargo (no sudo needed) + if [ "$INSTALLED" = false ] && command -v cargo &> /dev/null; then + echo -e "${CYAN}→${NC} Trying cargo install (no sudo required)..." + cargo install ripgrep && INSTALLED=true fi fi - - # Try brew (no sudo needed) - if [ "$INSTALLED" = false ] && command -v brew &> /dev/null; then - brew install ripgrep && INSTALLED=true - fi - - # Try cargo (no sudo needed) - if [ "$INSTALLED" = false ] && command -v cargo &> /dev/null; then - echo -e "${CYAN}→${NC} Trying cargo install (no sudo required)..." - cargo install ripgrep && INSTALLED=true - fi - + if [ "$INSTALLED" = true ]; then echo -e "${GREEN}✓${NC} ripgrep installed" else echo -e "${YELLOW}⚠${NC} Auto-install failed. Install options:" - echo " sudo apt install ripgrep # Debian/Ubuntu" - echo " brew install ripgrep # macOS" - echo " cargo install ripgrep # With Rust (no sudo)" + if is_termux; then + echo " pkg install ripgrep # Termux / Android" + else + echo " sudo apt install ripgrep # Debian/Ubuntu" + echo " brew install ripgrep # macOS" + echo " cargo install ripgrep # With Rust (no sudo)" + fi echo " https://github.com/BurntSushi/ripgrep#installation" fi fi @@ -207,49 +279,56 @@ else fi # ============================================================================ -# PATH setup — symlink hermes into ~/.local/bin +# PATH setup — symlink hermes into a user-facing bin dir # ============================================================================ echo -e "${CYAN}→${NC} Setting up hermes command..." HERMES_BIN="$SCRIPT_DIR/venv/bin/hermes" -mkdir -p "$HOME/.local/bin" -ln -sf "$HERMES_BIN" "$HOME/.local/bin/hermes" -echo -e "${GREEN}✓${NC} Symlinked hermes → ~/.local/bin/hermes" +COMMAND_LINK_DIR="$(get_command_link_dir)" +COMMAND_LINK_DISPLAY_DIR="$(get_command_link_display_dir)" +mkdir -p "$COMMAND_LINK_DIR" +ln -sf "$HERMES_BIN" "$COMMAND_LINK_DIR/hermes" +echo -e "${GREEN}✓${NC} Symlinked hermes → $COMMAND_LINK_DISPLAY_DIR/hermes" -# Determine the appropriate shell config file -SHELL_CONFIG="" -if [[ "$SHELL" == *"zsh"* ]]; then - SHELL_CONFIG="$HOME/.zshrc" -elif [[ "$SHELL" == *"bash"* ]]; then - SHELL_CONFIG="$HOME/.bashrc" - [ ! -f "$SHELL_CONFIG" ] && SHELL_CONFIG="$HOME/.bash_profile" +if is_termux; then + export PATH="$COMMAND_LINK_DIR:$PATH" + echo -e "${GREEN}✓${NC} $COMMAND_LINK_DISPLAY_DIR is already on PATH in Termux" else - # Fallback to checking existing files - if [ -f "$HOME/.zshrc" ]; then + # Determine the appropriate shell config file + SHELL_CONFIG="" + if [[ "$SHELL" == *"zsh"* ]]; then SHELL_CONFIG="$HOME/.zshrc" - elif [ -f "$HOME/.bashrc" ]; then + elif [[ "$SHELL" == *"bash"* ]]; then SHELL_CONFIG="$HOME/.bashrc" - elif [ -f "$HOME/.bash_profile" ]; then - SHELL_CONFIG="$HOME/.bash_profile" - fi -fi - -if [ -n "$SHELL_CONFIG" ]; then - # Touch the file just in case it doesn't exist yet but was selected - touch "$SHELL_CONFIG" 2>/dev/null || true - - if ! echo "$PATH" | tr ':' '\n' | grep -q "^$HOME/.local/bin$"; then - if ! grep -q '\.local/bin' "$SHELL_CONFIG" 2>/dev/null; then - echo "" >> "$SHELL_CONFIG" - echo "# Hermes Agent — ensure ~/.local/bin is on PATH" >> "$SHELL_CONFIG" - echo 'export PATH="$HOME/.local/bin:$PATH"' >> "$SHELL_CONFIG" - echo -e "${GREEN}✓${NC} Added ~/.local/bin to PATH in $SHELL_CONFIG" - else - echo -e "${GREEN}✓${NC} ~/.local/bin already in $SHELL_CONFIG" - fi + [ ! -f "$SHELL_CONFIG" ] && SHELL_CONFIG="$HOME/.bash_profile" else - echo -e "${GREEN}✓${NC} ~/.local/bin already on PATH" + # Fallback to checking existing files + if [ -f "$HOME/.zshrc" ]; then + SHELL_CONFIG="$HOME/.zshrc" + elif [ -f "$HOME/.bashrc" ]; then + SHELL_CONFIG="$HOME/.bashrc" + elif [ -f "$HOME/.bash_profile" ]; then + SHELL_CONFIG="$HOME/.bash_profile" + fi + fi + + if [ -n "$SHELL_CONFIG" ]; then + # Touch the file just in case it doesn't exist yet but was selected + touch "$SHELL_CONFIG" 2>/dev/null || true + + if ! echo "$PATH" | tr ':' '\n' | grep -q "^$HOME/.local/bin$"; then + if ! grep -q '\.local/bin' "$SHELL_CONFIG" 2>/dev/null; then + echo "" >> "$SHELL_CONFIG" + echo "# Hermes Agent — ensure ~/.local/bin is on PATH" >> "$SHELL_CONFIG" + echo 'export PATH="$HOME/.local/bin:$PATH"' >> "$SHELL_CONFIG" + echo -e "${GREEN}✓${NC} Added ~/.local/bin to PATH in $SHELL_CONFIG" + else + echo -e "${GREEN}✓${NC} ~/.local/bin already in $SHELL_CONFIG" + fi + else + echo -e "${GREEN}✓${NC} ~/.local/bin already on PATH" + fi fi fi @@ -281,18 +360,31 @@ echo -e "${GREEN}✓ Setup complete!${NC}" echo "" echo "Next steps:" echo "" -echo " 1. Reload your shell:" -echo " source $SHELL_CONFIG" -echo "" -echo " 2. Run the setup wizard to configure API keys:" -echo " hermes setup" -echo "" -echo " 3. Start chatting:" -echo " hermes" -echo "" +if is_termux; then + echo " 1. Run the setup wizard to configure API keys:" + echo " hermes setup" + echo "" + echo " 2. Start chatting:" + echo " hermes" + echo "" +else + echo " 1. Reload your shell:" + echo " source $SHELL_CONFIG" + echo "" + echo " 2. Run the setup wizard to configure API keys:" + echo " hermes setup" + echo "" + echo " 3. Start chatting:" + echo " hermes" + echo "" +fi echo "Other commands:" echo " hermes status # Check configuration" -echo " hermes gateway install # Install gateway service (messaging + cron)" +if is_termux; then + echo " hermes gateway # Run gateway in foreground" +else + echo " hermes gateway install # Install gateway service (messaging + cron)" +fi echo " hermes cron list # View scheduled jobs" echo " hermes doctor # Diagnose issues" echo "" diff --git a/skills/autonomous-ai-agents/hermes-agent/SKILL.md b/skills/autonomous-ai-agents/hermes-agent/SKILL.md index 8d93e3fb79..6d8cd1c617 100644 --- a/skills/autonomous-ai-agents/hermes-agent/SKILL.md +++ b/skills/autonomous-ai-agents/hermes-agent/SKILL.md @@ -249,9 +249,8 @@ Type these during an interactive chat session. /config Show config (CLI) /model [name] Show or change model /provider Show provider info -/prompt [text] View/set system prompt (CLI) /personality [name] Set personality -/reasoning [level] Set reasoning (none|low|medium|high|xhigh|show|hide) +/reasoning [level] Set reasoning (none|minimal|low|medium|high|xhigh|show|hide) /verbose Cycle: off → new → all → verbose /voice [on|off|tts] Voice mode /yolo Toggle approval bypass diff --git a/skills/productivity/google-workspace/SKILL.md b/skills/productivity/google-workspace/SKILL.md index 60b9693d17..e4553e4256 100644 --- a/skills/productivity/google-workspace/SKILL.md +++ b/skills/productivity/google-workspace/SKILL.md @@ -1,7 +1,7 @@ --- name: google-workspace -description: Gmail, Calendar, Drive, Contacts, Sheets, and Docs integration via Python. Uses OAuth2 with automatic token refresh. No external binaries needed — runs entirely with Google's Python client libraries in the Hermes venv. -version: 1.0.0 +description: Gmail, Calendar, Drive, Contacts, Sheets, and Docs integration via gws CLI (googleworkspace/cli). Uses OAuth2 with automatic token refresh via bridge script. Requires gws binary. +version: 2.0.0 author: Nous Research license: MIT required_credential_files: @@ -11,14 +11,25 @@ required_credential_files: description: Google OAuth2 client credentials (downloaded from Google Cloud Console) metadata: hermes: - tags: [Google, Gmail, Calendar, Drive, Sheets, Docs, Contacts, Email, OAuth] + tags: [Google, Gmail, Calendar, Drive, Sheets, Docs, Contacts, Email, OAuth, gws] homepage: https://github.com/NousResearch/hermes-agent related_skills: [himalaya] --- # Google Workspace -Gmail, Calendar, Drive, Contacts, Sheets, and Docs — all through Python scripts in this skill. No external binaries to install. +Gmail, Calendar, Drive, Contacts, Sheets, and Docs — powered by `gws` (Google's official Rust CLI). The skill provides a backward-compatible Python wrapper that handles OAuth token refresh and delegates to `gws`. + +## Architecture + +``` +google_api.py → gws_bridge.py → gws CLI +(argparse compat) (token refresh) (Google APIs) +``` + +- `setup.py` handles OAuth2 (headless-compatible, works on CLI/Telegram/Discord) +- `gws_bridge.py` refreshes the Hermes token and injects it into `gws` via `GOOGLE_WORKSPACE_CLI_TOKEN` +- `google_api.py` provides the same CLI interface as v1 but delegates to `gws` ## References @@ -27,7 +38,22 @@ Gmail, Calendar, Drive, Contacts, Sheets, and Docs — all through Python script ## Scripts - `scripts/setup.py` — OAuth2 setup (run once to authorize) -- `scripts/google_api.py` — API wrapper CLI (agent uses this for all operations) +- `scripts/gws_bridge.py` — Token refresh bridge to gws CLI +- `scripts/google_api.py` — Backward-compatible API wrapper (delegates to gws) + +## Prerequisites + +Install `gws`: + +```bash +cargo install google-workspace-cli +# or via npm (recommended, downloads prebuilt binary): +npm install -g @googleworkspace/cli +# or via Homebrew: +brew install googleworkspace-cli +``` + +Verify: `gws --version` ## First-Time Setup @@ -56,42 +82,29 @@ If it prints `AUTHENTICATED`, skip to Usage — setup is already done. ### Step 1: Triage — ask the user what they need -Before starting OAuth setup, ask the user TWO questions: - **Question 1: "What Google services do you need? Just email, or also Calendar/Drive/Sheets/Docs?"** -- **Email only** → They don't need this skill at all. Use the `himalaya` skill - instead — it works with a Gmail App Password (Settings → Security → App - Passwords) and takes 2 minutes to set up. No Google Cloud project needed. - Load the himalaya skill and follow its setup instructions. +- **Email only** → Use the `himalaya` skill instead — simpler setup. +- **Calendar, Drive, Sheets, Docs (or email + these)** → Continue below. -- **Calendar, Drive, Sheets, Docs (or email + these)** → Continue with this - skill's OAuth setup below. +**Partial scopes**: Users can authorize only a subset of services. The setup +script accepts partial scopes and warns about missing ones. -**Question 2: "Does your Google account use Advanced Protection (hardware -security keys required to sign in)? If you're not sure, you probably don't -— it's something you would have explicitly enrolled in."** +**Question 2: "Does your Google account use Advanced Protection?"** -- **No / Not sure** → Normal setup. Continue below. -- **Yes** → Their Workspace admin must add the OAuth client ID to the org's - allowed apps list before Step 4 will work. Let them know upfront. +- **No / Not sure** → Normal setup. +- **Yes** → Workspace admin must add the OAuth client ID to allowed apps first. ### Step 2: Create OAuth credentials (one-time, ~5 minutes) Tell the user: -> You need a Google Cloud OAuth client. This is a one-time setup: -> > 1. Go to https://console.cloud.google.com/apis/credentials > 2. Create a project (or use an existing one) -> 3. Click "Enable APIs" and enable: Gmail API, Google Calendar API, -> Google Drive API, Google Sheets API, Google Docs API, People API -> 4. Go to Credentials → Create Credentials → OAuth 2.0 Client ID -> 5. Application type: "Desktop app" → Create -> 6. Click "Download JSON" and tell me the file path - -Once they provide the path: +> 3. Enable the APIs you need (Gmail, Calendar, Drive, Sheets, Docs, People) +> 4. Credentials → Create Credentials → OAuth 2.0 Client ID → Desktop app +> 5. Download JSON and tell me the file path ```bash $GSETUP --client-secret /path/to/client_secret.json @@ -103,20 +116,10 @@ $GSETUP --client-secret /path/to/client_secret.json $GSETUP --auth-url ``` -This prints a URL. **Send the URL to the user** and tell them: - -> Open this link in your browser, sign in with your Google account, and -> authorize access. After authorizing, you'll be redirected to a page that -> may show an error — that's expected. Copy the ENTIRE URL from your -> browser's address bar and paste it back to me. +Send the URL to the user. After authorizing, they paste back the redirect URL or code. ### Step 4: Exchange the code -The user will paste back either a URL like `http://localhost:1/?code=4/0A...&scope=...` -or just the code string. Either works. The `--auth-url` step stores a temporary -pending OAuth session locally so `--auth-code` can complete the PKCE exchange -later, even on headless systems: - ```bash $GSETUP --auth-code "THE_URL_OR_CODE_THE_USER_PASTED" ``` @@ -127,18 +130,11 @@ $GSETUP --auth-code "THE_URL_OR_CODE_THE_USER_PASTED" $GSETUP --check ``` -Should print `AUTHENTICATED`. Setup is complete — token refreshes automatically from now on. - -### Notes - -- Token is stored at `google_token.json` under the active profile's `HERMES_HOME` and auto-refreshes. -- Pending OAuth session state/verifier are stored temporarily at `google_oauth_pending.json` under the active profile's `HERMES_HOME` until exchange completes. -- Hermes now refuses to overwrite a full Google Workspace token with a narrower re-auth token missing Gmail scopes, so one profile's partial consent cannot silently break email actions later. -- To revoke: `$GSETUP --revoke` +Should print `AUTHENTICATED`. Token refreshes automatically from now on. ## Usage -All commands go through the API script. Set `GAPI` as a shorthand: +All commands go through the API script: ```bash HERMES_HOME="${HERMES_HOME:-$HOME/.hermes}" @@ -153,40 +149,21 @@ GAPI="$PYTHON_BIN $GWORKSPACE_SKILL_DIR/scripts/google_api.py" ### Gmail ```bash -# Search (returns JSON array with id, from, subject, date, snippet) $GAPI gmail search "is:unread" --max 10 -$GAPI gmail search "from:boss@company.com newer_than:1d" -$GAPI gmail search "has:attachment filename:pdf newer_than:7d" - -# Read full message (returns JSON with body text) $GAPI gmail get MESSAGE_ID - -# Send $GAPI gmail send --to user@example.com --subject "Hello" --body "Message text" -$GAPI gmail send --to user@example.com --subject "Report" --body "

Q4

Details...

" --html - -# Reply (automatically threads and sets In-Reply-To) +$GAPI gmail send --to user@example.com --subject "Report" --body "

Q4

" --html $GAPI gmail reply MESSAGE_ID --body "Thanks, that works for me." - -# Labels $GAPI gmail labels $GAPI gmail modify MESSAGE_ID --add-labels LABEL_ID -$GAPI gmail modify MESSAGE_ID --remove-labels UNREAD ``` ### Calendar ```bash -# List events (defaults to next 7 days) $GAPI calendar list -$GAPI calendar list --start 2026-03-01T00:00:00Z --end 2026-03-07T23:59:59Z - -# Create event (ISO 8601 with timezone required) -$GAPI calendar create --summary "Team Standup" --start 2026-03-01T10:00:00-06:00 --end 2026-03-01T10:30:00-06:00 -$GAPI calendar create --summary "Lunch" --start 2026-03-01T12:00:00Z --end 2026-03-01T13:00:00Z --location "Cafe" -$GAPI calendar create --summary "Review" --start 2026-03-01T14:00:00Z --end 2026-03-01T15:00:00Z --attendees "alice@co.com,bob@co.com" - -# Delete event +$GAPI calendar create --summary "Standup" --start 2026-03-01T10:00:00+01:00 --end 2026-03-01T10:30:00+01:00 +$GAPI calendar create --summary "Review" --start ... --end ... --attendees "alice@co.com,bob@co.com" $GAPI calendar delete EVENT_ID ``` @@ -206,13 +183,8 @@ $GAPI contacts list --max 20 ### Sheets ```bash -# Read $GAPI sheets get SHEET_ID "Sheet1!A1:D10" - -# Write $GAPI sheets update SHEET_ID "Sheet1!A1:B2" --values '[["Name","Score"],["Alice","95"]]' - -# Append rows $GAPI sheets append SHEET_ID "Sheet1!A:C" --values '[["new","row","data"]]' ``` @@ -222,37 +194,52 @@ $GAPI sheets append SHEET_ID "Sheet1!A:C" --values '[["new","row","data"]]' $GAPI docs get DOC_ID ``` +### Direct gws access (advanced) + +For operations not covered by the wrapper, use `gws_bridge.py` directly: + +```bash +GBRIDGE="$PYTHON_BIN $GWORKSPACE_SKILL_DIR/scripts/gws_bridge.py" +$GBRIDGE calendar +agenda --today --format table +$GBRIDGE gmail +triage --labels --format json +$GBRIDGE drive +upload ./report.pdf +$GBRIDGE sheets +read --spreadsheet SHEET_ID --range "Sheet1!A1:D10" +``` + ## Output Format -All commands return JSON. Parse with `jq` or read directly. Key fields: +All commands return JSON via `gws --format json`. Key output shapes: -- **Gmail search**: `[{id, threadId, from, to, subject, date, snippet, labels}]` -- **Gmail get**: `{id, threadId, from, to, subject, date, labels, body}` -- **Gmail send/reply**: `{status: "sent", id, threadId}` -- **Calendar list**: `[{id, summary, start, end, location, description, htmlLink}]` -- **Calendar create**: `{status: "created", id, summary, htmlLink}` -- **Drive search**: `[{id, name, mimeType, modifiedTime, webViewLink}]` -- **Contacts list**: `[{name, emails: [...], phones: [...]}]` -- **Sheets get**: `[[cell, cell, ...], ...]` +- **Gmail search/triage**: Array of message summaries (sender, subject, date, snippet) +- **Gmail get/read**: Message object with headers and body text +- **Gmail send/reply**: Confirmation with message ID +- **Calendar list/agenda**: Array of event objects (summary, start, end, location) +- **Calendar create**: Confirmation with event ID and htmlLink +- **Drive search**: Array of file objects (id, name, mimeType, webViewLink) +- **Sheets get/read**: 2D array of cell values +- **Docs get**: Full document JSON (use `body.content` for text extraction) +- **Contacts list**: Array of person objects with names, emails, phones + +Parse output with `jq` or read JSON directly. ## Rules -1. **Never send email or create/delete events without confirming with the user first.** Show the draft content and ask for approval. -2. **Check auth before first use** — run `setup.py --check`. If it fails, guide the user through setup. -3. **Use the Gmail search syntax reference** for complex queries — load it with `skill_view("google-workspace", file_path="references/gmail-search-syntax.md")`. -4. **Calendar times must include timezone** — always use ISO 8601 with offset (e.g., `2026-03-01T10:00:00-06:00`) or UTC (`Z`). -5. **Respect rate limits** — avoid rapid-fire sequential API calls. Batch reads when possible. +1. **Never send email or create/delete events without confirming with the user first.** +2. **Check auth before first use** — run `setup.py --check`. +3. **Use the Gmail search syntax reference** for complex queries. +4. **Calendar times must include timezone** — ISO 8601 with offset or UTC. +5. **Respect rate limits** — avoid rapid-fire sequential API calls. ## Troubleshooting | Problem | Fix | |---------|-----| -| `NOT_AUTHENTICATED` | Run setup Steps 2-5 above | -| `REFRESH_FAILED` | Token revoked or expired — redo Steps 3-5 | -| `HttpError 403: Insufficient Permission` | Missing API scope — `$GSETUP --revoke` then redo Steps 3-5 | -| `HttpError 403: Access Not Configured` | API not enabled — user needs to enable it in Google Cloud Console | -| `ModuleNotFoundError` | Run `$GSETUP --install-deps` | -| Advanced Protection blocks auth | Workspace admin must allowlist the OAuth client ID | +| `NOT_AUTHENTICATED` | Run setup Steps 2-5 | +| `REFRESH_FAILED` | Token revoked — redo Steps 3-5 | +| `gws: command not found` | Install: `npm install -g @googleworkspace/cli` | +| `HttpError 403` | Missing scope — `$GSETUP --revoke` then redo Steps 3-5 | +| `HttpError 403: Access Not Configured` | Enable API in Google Cloud Console | +| Advanced Protection blocks auth | Admin must allowlist the OAuth client ID | ## Revoking Access diff --git a/skills/productivity/google-workspace/scripts/google_api.py b/skills/productivity/google-workspace/scripts/google_api.py index ece0c3ea03..ae8732f4bc 100644 --- a/skills/productivity/google-workspace/scripts/google_api.py +++ b/skills/productivity/google-workspace/scripts/google_api.py @@ -1,16 +1,17 @@ #!/usr/bin/env python3 """Google Workspace API CLI for Hermes Agent. -A thin CLI wrapper around Google's Python client libraries. -Authenticates using the token stored by setup.py. +Thin wrapper that delegates to gws (googleworkspace/cli) via gws_bridge.py. +Maintains the same CLI interface for backward compatibility with Hermes skills. Usage: python google_api.py gmail search "is:unread" [--max 10] python google_api.py gmail get MESSAGE_ID python google_api.py gmail send --to user@example.com --subject "Hi" --body "Hello" python google_api.py gmail reply MESSAGE_ID --body "Thanks" - python google_api.py calendar list [--from DATE] [--to DATE] [--calendar primary] + python google_api.py calendar list [--start DATE] [--end DATE] [--calendar primary] python google_api.py calendar create --summary "Meeting" --start DATETIME --end DATETIME + python google_api.py calendar delete EVENT_ID python google_api.py drive search "budget report" [--max 10] python google_api.py contacts list [--max 20] python google_api.py sheets get SHEET_ID RANGE @@ -20,386 +21,193 @@ Usage: """ import argparse -import base64 import json +import os +import subprocess import sys -from datetime import datetime, timedelta, timezone -from email.mime.text import MIMEText from pathlib import Path -try: - from hermes_constants import display_hermes_home, get_hermes_home -except ModuleNotFoundError: - HERMES_AGENT_ROOT = Path(__file__).resolve().parents[4] - if HERMES_AGENT_ROOT.exists(): - sys.path.insert(0, str(HERMES_AGENT_ROOT)) - from hermes_constants import display_hermes_home, get_hermes_home - -HERMES_HOME = get_hermes_home() -TOKEN_PATH = HERMES_HOME / "google_token.json" - -SCOPES = [ - "https://www.googleapis.com/auth/gmail.readonly", - "https://www.googleapis.com/auth/gmail.send", - "https://www.googleapis.com/auth/gmail.modify", - "https://www.googleapis.com/auth/calendar", - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/contacts.readonly", - "https://www.googleapis.com/auth/spreadsheets", - "https://www.googleapis.com/auth/documents.readonly", -] +BRIDGE = Path(__file__).parent / "gws_bridge.py" +PYTHON = sys.executable -def _missing_scopes() -> list[str]: - try: - payload = json.loads(TOKEN_PATH.read_text()) - except Exception: - return [] - raw = payload.get("scopes") or payload.get("scope") - if not raw: - return [] - granted = {s.strip() for s in (raw.split() if isinstance(raw, str) else raw) if s.strip()} - return sorted(scope for scope in SCOPES if scope not in granted) +def gws(*args: str) -> None: + """Call gws via the bridge and exit with its return code.""" + result = subprocess.run( + [PYTHON, str(BRIDGE)] + list(args), + env={**os.environ, "HERMES_HOME": os.environ.get("HERMES_HOME", str(Path.home() / ".hermes"))}, + ) + sys.exit(result.returncode) -def get_credentials(): - """Load and refresh credentials from token file.""" - if not TOKEN_PATH.exists(): - print("Not authenticated. Run the setup script first:", file=sys.stderr) - print(f" python {Path(__file__).parent / 'setup.py'}", file=sys.stderr) - sys.exit(1) - - from google.oauth2.credentials import Credentials - from google.auth.transport.requests import Request - - creds = Credentials.from_authorized_user_file(str(TOKEN_PATH), SCOPES) - if creds.expired and creds.refresh_token: - creds.refresh(Request()) - TOKEN_PATH.write_text(creds.to_json()) - if not creds.valid: - print("Token is invalid. Re-run setup.", file=sys.stderr) - sys.exit(1) - - missing_scopes = _missing_scopes() - if missing_scopes: - print( - "Token is valid but missing Google Workspace scopes required by this skill.", - file=sys.stderr, - ) - for scope in missing_scopes: - print(f" - {scope}", file=sys.stderr) - print( - f"Re-run setup.py from the active Hermes profile ({display_hermes_home()}) to restore full access.", - file=sys.stderr, - ) - sys.exit(1) - return creds - - -def build_service(api, version): - from googleapiclient.discovery import build - return build(api, version, credentials=get_credentials()) - - -# ========================================================================= -# Gmail -# ========================================================================= +# -- Gmail -- def gmail_search(args): - service = build_service("gmail", "v1") - results = service.users().messages().list( - userId="me", q=args.query, maxResults=args.max - ).execute() - messages = results.get("messages", []) - if not messages: - print("No messages found.") - return - - output = [] - for msg_meta in messages: - msg = service.users().messages().get( - userId="me", id=msg_meta["id"], format="metadata", - metadataHeaders=["From", "To", "Subject", "Date"], - ).execute() - headers = {h["name"]: h["value"] for h in msg.get("payload", {}).get("headers", [])} - output.append({ - "id": msg["id"], - "threadId": msg["threadId"], - "from": headers.get("From", ""), - "to": headers.get("To", ""), - "subject": headers.get("Subject", ""), - "date": headers.get("Date", ""), - "snippet": msg.get("snippet", ""), - "labels": msg.get("labelIds", []), - }) - print(json.dumps(output, indent=2, ensure_ascii=False)) - + cmd = ["gmail", "+triage", "--query", args.query, "--max", str(args.max), "--format", "json"] + gws(*cmd) def gmail_get(args): - service = build_service("gmail", "v1") - msg = service.users().messages().get( - userId="me", id=args.message_id, format="full" - ).execute() - - headers = {h["name"]: h["value"] for h in msg.get("payload", {}).get("headers", [])} - - # Extract body text - body = "" - payload = msg.get("payload", {}) - if payload.get("body", {}).get("data"): - body = base64.urlsafe_b64decode(payload["body"]["data"]).decode("utf-8", errors="replace") - elif payload.get("parts"): - for part in payload["parts"]: - if part.get("mimeType") == "text/plain" and part.get("body", {}).get("data"): - body = base64.urlsafe_b64decode(part["body"]["data"]).decode("utf-8", errors="replace") - break - if not body: - for part in payload["parts"]: - if part.get("mimeType") == "text/html" and part.get("body", {}).get("data"): - body = base64.urlsafe_b64decode(part["body"]["data"]).decode("utf-8", errors="replace") - break - - result = { - "id": msg["id"], - "threadId": msg["threadId"], - "from": headers.get("From", ""), - "to": headers.get("To", ""), - "subject": headers.get("Subject", ""), - "date": headers.get("Date", ""), - "labels": msg.get("labelIds", []), - "body": body, - } - print(json.dumps(result, indent=2, ensure_ascii=False)) - + gws("gmail", "+read", "--id", args.message_id, "--headers", "--format", "json") def gmail_send(args): - service = build_service("gmail", "v1") - message = MIMEText(args.body, "html" if args.html else "plain") - message["to"] = args.to - message["subject"] = args.subject + cmd = ["gmail", "+send", "--to", args.to, "--subject", args.subject, "--body", args.body, "--format", "json"] if args.cc: - message["cc"] = args.cc - - raw = base64.urlsafe_b64encode(message.as_bytes()).decode() - body = {"raw": raw} - - if args.thread_id: - body["threadId"] = args.thread_id - - result = service.users().messages().send(userId="me", body=body).execute() - print(json.dumps({"status": "sent", "id": result["id"], "threadId": result.get("threadId", "")}, indent=2)) - + cmd += ["--cc", args.cc] + if args.html: + cmd.append("--html") + gws(*cmd) def gmail_reply(args): - service = build_service("gmail", "v1") - # Fetch original to get thread ID and headers - original = service.users().messages().get( - userId="me", id=args.message_id, format="metadata", - metadataHeaders=["From", "Subject", "Message-ID"], - ).execute() - headers = {h["name"]: h["value"] for h in original.get("payload", {}).get("headers", [])} - - subject = headers.get("Subject", "") - if not subject.startswith("Re:"): - subject = f"Re: {subject}" - - message = MIMEText(args.body) - message["to"] = headers.get("From", "") - message["subject"] = subject - if headers.get("Message-ID"): - message["In-Reply-To"] = headers["Message-ID"] - message["References"] = headers["Message-ID"] - - raw = base64.urlsafe_b64encode(message.as_bytes()).decode() - body = {"raw": raw, "threadId": original["threadId"]} - - result = service.users().messages().send(userId="me", body=body).execute() - print(json.dumps({"status": "sent", "id": result["id"], "threadId": result.get("threadId", "")}, indent=2)) - + gws("gmail", "+reply", "--message-id", args.message_id, "--body", args.body, "--format", "json") def gmail_labels(args): - service = build_service("gmail", "v1") - results = service.users().labels().list(userId="me").execute() - labels = [{"id": l["id"], "name": l["name"], "type": l.get("type", "")} for l in results.get("labels", [])] - print(json.dumps(labels, indent=2)) - + gws("gmail", "users", "labels", "list", "--params", json.dumps({"userId": "me"}), "--format", "json") def gmail_modify(args): - service = build_service("gmail", "v1") body = {} if args.add_labels: body["addLabelIds"] = args.add_labels.split(",") if args.remove_labels: body["removeLabelIds"] = args.remove_labels.split(",") - result = service.users().messages().modify(userId="me", id=args.message_id, body=body).execute() - print(json.dumps({"id": result["id"], "labels": result.get("labelIds", [])}, indent=2)) + gws( + "gmail", "users", "messages", "modify", + "--params", json.dumps({"userId": "me", "id": args.message_id}), + "--json", json.dumps(body), + "--format", "json", + ) -# ========================================================================= -# Calendar -# ========================================================================= +# -- Calendar -- def calendar_list(args): - service = build_service("calendar", "v3") - now = datetime.now(timezone.utc) - time_min = args.start or now.isoformat() - time_max = args.end or (now + timedelta(days=7)).isoformat() - - # Ensure timezone info - for val in [time_min, time_max]: - if "T" in val and "Z" not in val and "+" not in val and "-" not in val[11:]: - val += "Z" - - results = service.events().list( - calendarId=args.calendar, timeMin=time_min, timeMax=time_max, - maxResults=args.max, singleEvents=True, orderBy="startTime", - ).execute() - - events = [] - for e in results.get("items", []): - events.append({ - "id": e["id"], - "summary": e.get("summary", "(no title)"), - "start": e.get("start", {}).get("dateTime", e.get("start", {}).get("date", "")), - "end": e.get("end", {}).get("dateTime", e.get("end", {}).get("date", "")), - "location": e.get("location", ""), - "description": e.get("description", ""), - "status": e.get("status", ""), - "htmlLink": e.get("htmlLink", ""), - }) - print(json.dumps(events, indent=2, ensure_ascii=False)) - + if args.start or args.end: + # Specific date range — use raw Calendar API for precise timeMin/timeMax + from datetime import datetime, timedelta, timezone as tz + now = datetime.now(tz.utc) + time_min = args.start or now.isoformat() + time_max = args.end or (now + timedelta(days=7)).isoformat() + gws( + "calendar", "events", "list", + "--params", json.dumps({ + "calendarId": args.calendar, + "timeMin": time_min, + "timeMax": time_max, + "maxResults": args.max, + "singleEvents": True, + "orderBy": "startTime", + }), + "--format", "json", + ) + else: + # No date range — use +agenda helper (defaults to 7 days) + cmd = ["calendar", "+agenda", "--days", "7", "--format", "json"] + if args.calendar != "primary": + cmd += ["--calendar", args.calendar] + gws(*cmd) def calendar_create(args): - service = build_service("calendar", "v3") - event = { - "summary": args.summary, - "start": {"dateTime": args.start}, - "end": {"dateTime": args.end}, - } + cmd = [ + "calendar", "+insert", + "--summary", args.summary, + "--start", args.start, + "--end", args.end, + "--format", "json", + ] if args.location: - event["location"] = args.location + cmd += ["--location", args.location] if args.description: - event["description"] = args.description + cmd += ["--description", args.description] if args.attendees: - event["attendees"] = [{"email": e.strip()} for e in args.attendees.split(",")] - - result = service.events().insert(calendarId=args.calendar, body=event).execute() - print(json.dumps({ - "status": "created", - "id": result["id"], - "summary": result.get("summary", ""), - "htmlLink": result.get("htmlLink", ""), - }, indent=2)) - + for email in args.attendees.split(","): + cmd += ["--attendee", email.strip()] + if args.calendar != "primary": + cmd += ["--calendar", args.calendar] + gws(*cmd) def calendar_delete(args): - service = build_service("calendar", "v3") - service.events().delete(calendarId=args.calendar, eventId=args.event_id).execute() - print(json.dumps({"status": "deleted", "eventId": args.event_id})) + gws( + "calendar", "events", "delete", + "--params", json.dumps({"calendarId": args.calendar, "eventId": args.event_id}), + "--format", "json", + ) -# ========================================================================= -# Drive -# ========================================================================= +# -- Drive -- def drive_search(args): - service = build_service("drive", "v3") - query = f"fullText contains '{args.query}'" if not args.raw_query else args.query - results = service.files().list( - q=query, pageSize=args.max, fields="files(id, name, mimeType, modifiedTime, webViewLink)", - ).execute() - files = results.get("files", []) - print(json.dumps(files, indent=2, ensure_ascii=False)) + query = args.query if args.raw_query else f"fullText contains '{args.query}'" + gws( + "drive", "files", "list", + "--params", json.dumps({ + "q": query, + "pageSize": args.max, + "fields": "files(id,name,mimeType,modifiedTime,webViewLink)", + }), + "--format", "json", + ) -# ========================================================================= -# Contacts -# ========================================================================= +# -- Contacts -- def contacts_list(args): - service = build_service("people", "v1") - results = service.people().connections().list( - resourceName="people/me", - pageSize=args.max, - personFields="names,emailAddresses,phoneNumbers", - ).execute() - contacts = [] - for person in results.get("connections", []): - names = person.get("names", [{}]) - emails = person.get("emailAddresses", []) - phones = person.get("phoneNumbers", []) - contacts.append({ - "name": names[0].get("displayName", "") if names else "", - "emails": [e.get("value", "") for e in emails], - "phones": [p.get("value", "") for p in phones], - }) - print(json.dumps(contacts, indent=2, ensure_ascii=False)) + gws( + "people", "people", "connections", "list", + "--params", json.dumps({ + "resourceName": "people/me", + "pageSize": args.max, + "personFields": "names,emailAddresses,phoneNumbers", + }), + "--format", "json", + ) -# ========================================================================= -# Sheets -# ========================================================================= +# -- Sheets -- def sheets_get(args): - service = build_service("sheets", "v4") - result = service.spreadsheets().values().get( - spreadsheetId=args.sheet_id, range=args.range, - ).execute() - print(json.dumps(result.get("values", []), indent=2, ensure_ascii=False)) - + gws( + "sheets", "+read", + "--spreadsheet", args.sheet_id, + "--range", args.range, + "--format", "json", + ) def sheets_update(args): - service = build_service("sheets", "v4") values = json.loads(args.values) - body = {"values": values} - result = service.spreadsheets().values().update( - spreadsheetId=args.sheet_id, range=args.range, - valueInputOption="USER_ENTERED", body=body, - ).execute() - print(json.dumps({"updatedCells": result.get("updatedCells", 0), "updatedRange": result.get("updatedRange", "")}, indent=2)) - + gws( + "sheets", "spreadsheets", "values", "update", + "--params", json.dumps({ + "spreadsheetId": args.sheet_id, + "range": args.range, + "valueInputOption": "USER_ENTERED", + }), + "--json", json.dumps({"values": values}), + "--format", "json", + ) def sheets_append(args): - service = build_service("sheets", "v4") values = json.loads(args.values) - body = {"values": values} - result = service.spreadsheets().values().append( - spreadsheetId=args.sheet_id, range=args.range, - valueInputOption="USER_ENTERED", insertDataOption="INSERT_ROWS", body=body, - ).execute() - print(json.dumps({"updatedCells": result.get("updates", {}).get("updatedCells", 0)}, indent=2)) + gws( + "sheets", "+append", + "--spreadsheet", args.sheet_id, + "--json-values", json.dumps(values), + "--format", "json", + ) -# ========================================================================= -# Docs -# ========================================================================= +# -- Docs -- def docs_get(args): - service = build_service("docs", "v1") - doc = service.documents().get(documentId=args.doc_id).execute() - # Extract plain text from the document structure - text_parts = [] - for element in doc.get("body", {}).get("content", []): - paragraph = element.get("paragraph", {}) - for pe in paragraph.get("elements", []): - text_run = pe.get("textRun", {}) - if text_run.get("content"): - text_parts.append(text_run["content"]) - result = { - "title": doc.get("title", ""), - "documentId": doc.get("documentId", ""), - "body": "".join(text_parts), - } - print(json.dumps(result, indent=2, ensure_ascii=False)) + gws( + "docs", "documents", "get", + "--params", json.dumps({"documentId": args.doc_id}), + "--format", "json", + ) -# ========================================================================= -# CLI parser -# ========================================================================= +# -- CLI parser (backward-compatible interface) -- def main(): - parser = argparse.ArgumentParser(description="Google Workspace API for Hermes Agent") + parser = argparse.ArgumentParser(description="Google Workspace API for Hermes Agent (gws backend)") sub = parser.add_subparsers(dest="service", required=True) # --- Gmail --- @@ -421,7 +229,7 @@ def main(): p.add_argument("--body", required=True) p.add_argument("--cc", default="") p.add_argument("--html", action="store_true", help="Send body as HTML") - p.add_argument("--thread-id", default="", help="Thread ID for threading") + p.add_argument("--thread-id", default="", help="Thread ID (unused with gws, kept for compat)") p.set_defaults(func=gmail_send) p = gmail_sub.add_parser("reply") diff --git a/skills/productivity/google-workspace/scripts/gws_bridge.py b/skills/productivity/google-workspace/scripts/gws_bridge.py new file mode 100755 index 0000000000..adecd33ad4 --- /dev/null +++ b/skills/productivity/google-workspace/scripts/gws_bridge.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +"""Bridge between Hermes OAuth token and gws CLI. + +Refreshes the token if expired, then executes gws with the valid access token. +""" +import json +import os +import subprocess +import sys +from datetime import datetime, timezone +from pathlib import Path + + +def get_hermes_home() -> Path: + return Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes")) + + +def get_token_path() -> Path: + return get_hermes_home() / "google_token.json" + + +def refresh_token(token_data: dict) -> dict: + """Refresh the access token using the refresh token.""" + import urllib.error + import urllib.parse + import urllib.request + + params = urllib.parse.urlencode({ + "client_id": token_data["client_id"], + "client_secret": token_data["client_secret"], + "refresh_token": token_data["refresh_token"], + "grant_type": "refresh_token", + }).encode() + + req = urllib.request.Request(token_data["token_uri"], data=params) + try: + with urllib.request.urlopen(req) as resp: + result = json.loads(resp.read()) + except urllib.error.HTTPError as e: + body = e.read().decode("utf-8", errors="replace") + print(f"ERROR: Token refresh failed (HTTP {e.code}): {body}", file=sys.stderr) + print("Re-run setup.py to re-authenticate.", file=sys.stderr) + sys.exit(1) + + token_data["token"] = result["access_token"] + token_data["expiry"] = datetime.fromtimestamp( + datetime.now(timezone.utc).timestamp() + result["expires_in"], + tz=timezone.utc, + ).isoformat() + + get_token_path().write_text(json.dumps(token_data, indent=2)) + return token_data + + +def get_valid_token() -> str: + """Return a valid access token, refreshing if needed.""" + token_path = get_token_path() + if not token_path.exists(): + print("ERROR: No Google token found. Run setup.py --auth-url first.", file=sys.stderr) + sys.exit(1) + + token_data = json.loads(token_path.read_text()) + + expiry = token_data.get("expiry", "") + if expiry: + exp_dt = datetime.fromisoformat(expiry.replace("Z", "+00:00")) + now = datetime.now(timezone.utc) + if now >= exp_dt: + token_data = refresh_token(token_data) + + return token_data["token"] + + +def main(): + """Refresh token if needed, then exec gws with remaining args.""" + if len(sys.argv) < 2: + print("Usage: gws_bridge.py ", file=sys.stderr) + sys.exit(1) + + access_token = get_valid_token() + env = os.environ.copy() + env["GOOGLE_WORKSPACE_CLI_TOKEN"] = access_token + + result = subprocess.run(["gws"] + sys.argv[1:], env=env) + sys.exit(result.returncode) + + +if __name__ == "__main__": + main() diff --git a/skills/productivity/google-workspace/scripts/setup.py b/skills/productivity/google-workspace/scripts/setup.py index 5e4924f9d2..cb8c38cb98 100644 --- a/skills/productivity/google-workspace/scripts/setup.py +++ b/skills/productivity/google-workspace/scripts/setup.py @@ -23,6 +23,7 @@ Agent workflow: import argparse import json +import os import subprocess import sys from pathlib import Path @@ -128,7 +129,11 @@ def check_auth(): from google.auth.transport.requests import Request try: - creds = Credentials.from_authorized_user_file(str(TOKEN_PATH), SCOPES) + # Don't pass scopes — user may have authorized only a subset. + # Passing scopes forces google-auth to validate them on refresh, + # which fails with invalid_scope if the token has fewer scopes + # than requested. + creds = Credentials.from_authorized_user_file(str(TOKEN_PATH)) except Exception as e: print(f"TOKEN_CORRUPT: {e}") return False @@ -137,8 +142,9 @@ def check_auth(): if creds.valid: missing_scopes = _missing_scopes_from_payload(payload) if missing_scopes: - print(f"AUTH_SCOPE_MISMATCH: {_format_missing_scopes(missing_scopes)}") - return False + print(f"AUTHENTICATED (partial): Token valid but missing {len(missing_scopes)} scopes:") + for s in missing_scopes: + print(f" - {s}") print(f"AUTHENTICATED: Token valid at {TOKEN_PATH}") return True @@ -148,8 +154,9 @@ def check_auth(): TOKEN_PATH.write_text(creds.to_json()) missing_scopes = _missing_scopes_from_payload(_load_token_payload(TOKEN_PATH)) if missing_scopes: - print(f"AUTH_SCOPE_MISMATCH: {_format_missing_scopes(missing_scopes)}") - return False + print(f"AUTHENTICATED (partial): Token refreshed but missing {len(missing_scopes)} scopes:") + for s in missing_scopes: + print(f" - {s}") print(f"AUTHENTICATED: Token refreshed at {TOKEN_PATH}") return True except Exception as e: @@ -272,16 +279,33 @@ def exchange_auth_code(code: str): _ensure_deps() from google_auth_oauthlib.flow import Flow + from urllib.parse import parse_qs, urlparse + + # Extract granted scopes from the callback URL if present + if returned_state and "scope" in parse_qs(urlparse(code).query if isinstance(code, str) and code.startswith("http") else {}): + granted_scopes = parse_qs(urlparse(code).query)["scope"][0].split() + else: + # Try to extract from code_or_url parameter + if isinstance(code, str) and code.startswith("http"): + params = parse_qs(urlparse(code).query) + if "scope" in params: + granted_scopes = params["scope"][0].split() + else: + granted_scopes = SCOPES + else: + granted_scopes = SCOPES flow = Flow.from_client_secrets_file( str(CLIENT_SECRET_PATH), - scopes=SCOPES, + scopes=granted_scopes, redirect_uri=pending_auth.get("redirect_uri", REDIRECT_URI), state=pending_auth["state"], code_verifier=pending_auth["code_verifier"], ) try: + # Accept partial scopes — user may deselect some permissions in the consent screen + os.environ["OAUTHLIB_RELAX_TOKEN_SCOPE"] = "1" flow.fetch_token(code=code) except Exception as e: print(f"ERROR: Token exchange failed: {e}") @@ -290,11 +314,21 @@ def exchange_auth_code(code: str): creds = flow.credentials token_payload = json.loads(creds.to_json()) + + # Store only the scopes actually granted by the user, not what was requested. + # creds.to_json() writes the requested scopes, which causes refresh to fail + # with invalid_scope if the user only authorized a subset. + actually_granted = list(creds.granted_scopes or []) if hasattr(creds, "granted_scopes") and creds.granted_scopes else [] + if actually_granted: + token_payload["scopes"] = actually_granted + elif granted_scopes != SCOPES: + # granted_scopes was extracted from the callback URL + token_payload["scopes"] = granted_scopes + missing_scopes = _missing_scopes_from_payload(token_payload) if missing_scopes: - print(f"ERROR: Refusing to save incomplete Google Workspace token. {_format_missing_scopes(missing_scopes)}") - print(f"Existing token at {TOKEN_PATH} was left unchanged.") - sys.exit(1) + print(f"WARNING: Token missing some Google Workspace scopes: {', '.join(missing_scopes)}") + print("Some services may not be available.") TOKEN_PATH.write_text(json.dumps(token_payload, indent=2)) PENDING_AUTH_PATH.unlink(missing_ok=True) diff --git a/tests/agent/test_error_classifier.py b/tests/agent/test_error_classifier.py index da248f8218..44e891f0c7 100644 --- a/tests/agent/test_error_classifier.py +++ b/tests/agent/test_error_classifier.py @@ -480,6 +480,39 @@ class TestClassifyApiError: result = classify_api_error(e) assert result.reason == FailoverReason.context_overflow + # ── Message-only usage limit disambiguation (no status code) ── + + def test_message_usage_limit_transient_is_rate_limit(self): + """'usage limit' + 'try again' with no status code → rate_limit, not billing.""" + e = Exception("usage limit exceeded, try again in 5 minutes") + result = classify_api_error(e) + assert result.reason == FailoverReason.rate_limit + assert result.retryable is True + assert result.should_rotate_credential is True + assert result.should_fallback is True + + def test_message_usage_limit_no_retry_signal_is_billing(self): + """'usage limit' with no transient signal and no status code → billing.""" + e = Exception("usage limit reached") + result = classify_api_error(e) + assert result.reason == FailoverReason.billing + assert result.retryable is False + assert result.should_rotate_credential is True + + def test_message_quota_with_reset_window_is_rate_limit(self): + """'quota' + 'resets at' with no status code → rate_limit.""" + e = Exception("quota exceeded, resets at midnight UTC") + result = classify_api_error(e) + assert result.reason == FailoverReason.rate_limit + assert result.retryable is True + + def test_message_limit_exceeded_with_wait_is_rate_limit(self): + """'limit exceeded' + 'wait' with no status code → rate_limit.""" + e = Exception("key limit exceeded, please wait before retrying") + result = classify_api_error(e) + assert result.reason == FailoverReason.rate_limit + assert result.retryable is True + # ── Unknown / fallback ── def test_generic_exception_is_unknown(self): @@ -507,6 +540,38 @@ class TestClassifyApiError: assert result.reason == FailoverReason.format_error assert result.retryable is False + def test_400_flat_body_descriptive_not_context_overflow(self): + """Responses API flat body with descriptive error + large session → format error. + + The Codex Responses API returns errors in flat body format: + {"message": "...", "type": "..."} without an "error" wrapper. + A descriptive 400 must NOT be misclassified as context overflow + just because the session is large. + """ + e = MockAPIError( + "Invalid 'input[index].name': string does not match pattern.", + status_code=400, + body={"message": "Invalid 'input[index].name': string does not match pattern.", + "type": "invalid_request_error"}, + ) + result = classify_api_error(e, approx_tokens=200000, context_length=400000, num_messages=500) + assert result.reason == FailoverReason.format_error + assert result.retryable is False + + def test_400_flat_body_generic_large_session_still_context_overflow(self): + """Flat body with generic 'Error' message + large session → context overflow. + + Regression: the flat-body fallback must not break the existing heuristic + for genuinely generic errors from providers that use flat bodies. + """ + e = MockAPIError( + "Error", + status_code=400, + body={"message": "Error"}, + ) + result = classify_api_error(e, approx_tokens=100000, context_length=200000) + assert result.reason == FailoverReason.context_overflow + # ── Peer closed + large session ── def test_peer_closed_large_session(self): diff --git a/tests/cli/test_cli_browser_connect.py b/tests/cli/test_cli_browser_connect.py index f01475bf8a..e123afe110 100644 --- a/tests/cli/test_cli_browser_connect.py +++ b/tests/cli/test_cli_browser_connect.py @@ -6,6 +6,17 @@ from unittest.mock import patch from cli import HermesCLI +def _assert_chrome_debug_cmd(cmd, expected_chrome, expected_port): + """Verify the auto-launch command has all required flags.""" + assert cmd[0] == expected_chrome + assert f"--remote-debugging-port={expected_port}" in cmd + assert "--no-first-run" in cmd + assert "--no-default-browser-check" in cmd + user_data_args = [a for a in cmd if a.startswith("--user-data-dir=")] + assert len(user_data_args) == 1, "Expected exactly one --user-data-dir flag" + assert "chrome-debug" in user_data_args[0] + + class TestChromeDebugLaunch: def test_windows_launch_uses_browser_found_on_path(self): captured = {} @@ -20,7 +31,7 @@ class TestChromeDebugLaunch: patch("subprocess.Popen", side_effect=fake_popen): assert HermesCLI._try_launch_chrome_debug(9333, "Windows") is True - assert captured["cmd"] == [r"C:\Chrome\chrome.exe", "--remote-debugging-port=9333"] + _assert_chrome_debug_cmd(captured["cmd"], r"C:\Chrome\chrome.exe", 9333) assert captured["kwargs"]["start_new_session"] is True def test_windows_launch_falls_back_to_common_install_dirs(self, monkeypatch): @@ -43,4 +54,4 @@ class TestChromeDebugLaunch: patch("subprocess.Popen", side_effect=fake_popen): assert HermesCLI._try_launch_chrome_debug(9222, "Windows") is True - assert captured["cmd"] == [installed, "--remote-debugging-port=9222"] + _assert_chrome_debug_cmd(captured["cmd"], installed, 9222) diff --git a/tests/cli/test_cli_file_drop.py b/tests/cli/test_cli_file_drop.py index 386aba5d17..78503de8d7 100644 --- a/tests/cli/test_cli_file_drop.py +++ b/tests/cli/test_cli_file_drop.py @@ -147,6 +147,20 @@ class TestEscapedSpaces: assert result["path"] == tmp_image_with_spaces assert result["remainder"] == "what is this?" + def test_tilde_prefixed_path(self, tmp_path, monkeypatch): + home = tmp_path / "home" + img = home / "storage" / "shared" / "Pictures" / "cat.png" + img.parent.mkdir(parents=True, exist_ok=True) + img.write_bytes(b"\x89PNG\r\n\x1a\n") + monkeypatch.setenv("HOME", str(home)) + + result = _detect_file_drop("~/storage/shared/Pictures/cat.png what is this?") + + assert result is not None + assert result["path"] == img + assert result["is_image"] is True + assert result["remainder"] == "what is this?" + # --------------------------------------------------------------------------- # Tests: edge cases diff --git a/tests/cli/test_cli_image_command.py b/tests/cli/test_cli_image_command.py new file mode 100644 index 0000000000..45bdfa7e1b --- /dev/null +++ b/tests/cli/test_cli_image_command.py @@ -0,0 +1,109 @@ +from pathlib import Path +from unittest.mock import patch + +from cli import ( + HermesCLI, + _collect_query_images, + _format_image_attachment_badges, + _termux_example_image_path, +) + + +def _make_cli(): + cli_obj = HermesCLI.__new__(HermesCLI) + cli_obj._attached_images = [] + return cli_obj + + +def _make_image(path: Path) -> Path: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_bytes(b"\x89PNG\r\n\x1a\n") + return path + + +class TestImageCommand: + def test_handle_image_command_attaches_local_image(self, tmp_path): + img = _make_image(tmp_path / "photo.png") + cli_obj = _make_cli() + + with patch("cli._cprint"): + cli_obj._handle_image_command(f"/image {img}") + + assert cli_obj._attached_images == [img] + + def test_handle_image_command_supports_quoted_path_with_spaces(self, tmp_path): + img = _make_image(tmp_path / "my photo.png") + cli_obj = _make_cli() + + with patch("cli._cprint"): + cli_obj._handle_image_command(f'/image "{img}"') + + assert cli_obj._attached_images == [img] + + def test_handle_image_command_rejects_non_image_file(self, tmp_path): + file_path = tmp_path / "notes.txt" + file_path.write_text("hello\n", encoding="utf-8") + cli_obj = _make_cli() + + with patch("cli._cprint") as mock_print: + cli_obj._handle_image_command(f"/image {file_path}") + + assert cli_obj._attached_images == [] + rendered = " ".join(str(arg) for call in mock_print.call_args_list for arg in call.args) + assert "Not a supported image file" in rendered + + +class TestCollectQueryImages: + def test_collect_query_images_accepts_explicit_image_arg(self, tmp_path): + img = _make_image(tmp_path / "diagram.png") + + message, images = _collect_query_images("describe this", str(img)) + + assert message == "describe this" + assert images == [img] + + def test_collect_query_images_extracts_leading_path(self, tmp_path): + img = _make_image(tmp_path / "camera.png") + + message, images = _collect_query_images(f"{img} what do you see?") + + assert message == "what do you see?" + assert images == [img] + + def test_collect_query_images_supports_tilde_paths(self, tmp_path, monkeypatch): + home = tmp_path / "home" + img = _make_image(home / "storage" / "shared" / "Pictures" / "cat.png") + monkeypatch.setenv("HOME", str(home)) + + message, images = _collect_query_images("describe this", "~/storage/shared/Pictures/cat.png") + + assert message == "describe this" + assert images == [img] + + +class TestTermuxImageHints: + def test_termux_example_image_path_prefers_real_shared_storage_root(self, monkeypatch): + existing = {"/sdcard", "/storage/emulated/0"} + monkeypatch.setattr("cli.os.path.isdir", lambda path: path in existing) + + hint = _termux_example_image_path() + + assert hint == "/sdcard/Pictures/cat.png" + + +class TestImageBadgeFormatting: + def test_compact_badges_use_filename_on_narrow_terminals(self, tmp_path): + img = _make_image(tmp_path / "Screenshot 2026-04-09 at 11.22.33 AM.png") + + badges = _format_image_attachment_badges([img], image_counter=1, width=40) + + assert badges.startswith("[📎 ") + assert "Image #1" not in badges + + def test_compact_badges_summarize_multiple_images(self, tmp_path): + img1 = _make_image(tmp_path / "one.png") + img2 = _make_image(tmp_path / "two.png") + + badges = _format_image_attachment_badges([img1, img2], image_counter=2, width=45) + + assert badges == "[📎 2 images attached]" diff --git a/tests/cli/test_cli_skin_integration.py b/tests/cli/test_cli_skin_integration.py index 61a177cad4..08a86782d8 100644 --- a/tests/cli/test_cli_skin_integration.py +++ b/tests/cli/test_cli_skin_integration.py @@ -49,6 +49,25 @@ class TestCliSkinPromptIntegration: set_active_skin("ares") assert cli._get_tui_prompt_fragments() == [("class:sudo-prompt", "🔑 ❯ ")] + def test_narrow_terminals_compact_voice_prompt_fragments(self): + cli = _make_cli_stub() + cli._voice_mode = True + + with patch.object(HermesCLI, "_get_tui_terminal_width", return_value=50): + assert cli._get_tui_prompt_fragments() == [("class:voice-prompt", "🎤 ")] + + def test_narrow_terminals_compact_voice_recording_prompt_fragments(self): + cli = _make_cli_stub() + cli._voice_recording = True + cli._voice_recorder = SimpleNamespace(current_rms=3000) + + with patch.object(HermesCLI, "_get_tui_terminal_width", return_value=50): + frags = cli._get_tui_prompt_fragments() + + assert frags[0][0] == "class:voice-recording" + assert frags[0][1].startswith("●") + assert "❯" not in frags[0][1] + def test_icon_only_skin_symbol_still_visible_in_special_states(self): cli = _make_cli_stub() cli._secret_state = {"response_queue": object()} diff --git a/tests/cli/test_cli_status_bar.py b/tests/cli/test_cli_status_bar.py index e728328b8c..eabcd0f962 100644 --- a/tests/cli/test_cli_status_bar.py +++ b/tests/cli/test_cli_status_bar.py @@ -41,6 +41,7 @@ def _attach_agent( session_completion_tokens=completion_tokens, session_total_tokens=total_tokens, session_api_calls=api_calls, + get_rate_limit_state=lambda: None, context_compressor=SimpleNamespace( last_prompt_tokens=context_tokens, context_length=context_length, @@ -205,6 +206,59 @@ class TestCLIStatusBar: assert "⚕" in text assert "claude-sonnet-4-20250514" in text + def test_minimal_tui_chrome_threshold(self): + cli_obj = _make_cli() + + assert cli_obj._use_minimal_tui_chrome(width=63) is True + assert cli_obj._use_minimal_tui_chrome(width=64) is False + + def test_bottom_input_rule_hides_on_narrow_terminals(self): + cli_obj = _make_cli() + + assert cli_obj._tui_input_rule_height("top", width=50) == 1 + assert cli_obj._tui_input_rule_height("bottom", width=50) == 0 + assert cli_obj._tui_input_rule_height("bottom", width=90) == 1 + + def test_agent_spacer_reclaimed_on_narrow_terminals(self): + cli_obj = _make_cli() + cli_obj._agent_running = True + + assert cli_obj._agent_spacer_height(width=50) == 0 + assert cli_obj._agent_spacer_height(width=90) == 1 + cli_obj._agent_running = False + assert cli_obj._agent_spacer_height(width=90) == 0 + + def test_spinner_line_hidden_on_narrow_terminals(self): + cli_obj = _make_cli() + cli_obj._spinner_text = "thinking" + + assert cli_obj._spinner_widget_height(width=50) == 0 + assert cli_obj._spinner_widget_height(width=90) == 1 + cli_obj._spinner_text = "" + assert cli_obj._spinner_widget_height(width=90) == 0 + + def test_voice_status_bar_compacts_on_narrow_terminals(self): + cli_obj = _make_cli() + cli_obj._voice_mode = True + cli_obj._voice_recording = False + cli_obj._voice_processing = False + cli_obj._voice_tts = True + cli_obj._voice_continuous = True + + fragments = cli_obj._get_voice_status_fragments(width=50) + + assert fragments == [("class:voice-status", " 🎤 Ctrl+B ")] + + def test_voice_recording_status_bar_compacts_on_narrow_terminals(self): + cli_obj = _make_cli() + cli_obj._voice_mode = True + cli_obj._voice_recording = True + cli_obj._voice_processing = False + + fragments = cli_obj._get_voice_status_fragments(width=50) + + assert fragments == [("class:voice-status-recording", " ● REC ")] + class TestCLIUsageReport: def test_show_usage_includes_estimated_cost(self, capsys): diff --git a/tests/conftest.py b/tests/conftest.py index 313a3cecfd..0211404667 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -38,6 +38,8 @@ def _isolate_hermes_home(tmp_path, monkeypatch): monkeypatch.delenv("HERMES_SESSION_CHAT_ID", raising=False) monkeypatch.delenv("HERMES_SESSION_CHAT_NAME", raising=False) monkeypatch.delenv("HERMES_GATEWAY_SESSION", raising=False) + # Avoid making real calls during tests if this key is set in the env files + monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) @pytest.fixture() diff --git a/tests/gateway/test_discord_connect.py b/tests/gateway/test_discord_connect.py index 6809c443ea..dd594cf7ed 100644 --- a/tests/gateway/test_discord_connect.py +++ b/tests/gateway/test_discord_connect.py @@ -56,7 +56,7 @@ class FakeTree: class FakeBot: - def __init__(self, *, intents): + def __init__(self, *, intents, proxy=None): self.intents = intents self.user = SimpleNamespace(id=999, name="Hermes") self._events = {} @@ -95,7 +95,7 @@ async def test_connect_only_requests_members_intent_when_needed(monkeypatch, all created = {} - def fake_bot_factory(*, command_prefix, intents): + def fake_bot_factory(*, command_prefix, intents, proxy=None): created["bot"] = FakeBot(intents=intents) return created["bot"] @@ -124,7 +124,7 @@ async def test_connect_releases_token_lock_on_timeout(monkeypatch): monkeypatch.setattr( discord_platform.commands, "Bot", - lambda **kwargs: FakeBot(intents=kwargs["intents"]), + lambda **kwargs: FakeBot(intents=kwargs["intents"], proxy=kwargs.get("proxy")), ) async def fake_wait_for(awaitable, timeout): diff --git a/tests/gateway/test_media_download_retry.py b/tests/gateway/test_media_download_retry.py index 8f135a053b..f0147dfb46 100644 --- a/tests/gateway/test_media_download_retry.py +++ b/tests/gateway/test_media_download_retry.py @@ -38,10 +38,11 @@ def _make_timeout_error() -> httpx.TimeoutException: # cache_image_from_url (base.py) # --------------------------------------------------------------------------- +@patch("tools.url_safety.is_safe_url", return_value=True) class TestCacheImageFromUrl: """Tests for gateway.platforms.base.cache_image_from_url""" - def test_success_on_first_attempt(self, tmp_path, monkeypatch): + def test_success_on_first_attempt(self, _mock_safe, tmp_path, monkeypatch): """A clean 200 response caches the image and returns a path.""" monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img") @@ -65,7 +66,7 @@ class TestCacheImageFromUrl: assert path.endswith(".jpg") mock_client.get.assert_called_once() - def test_retries_on_timeout_then_succeeds(self, tmp_path, monkeypatch): + def test_retries_on_timeout_then_succeeds(self, _mock_safe, tmp_path, monkeypatch): """A timeout on the first attempt is retried; second attempt succeeds.""" monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img") @@ -95,7 +96,7 @@ class TestCacheImageFromUrl: assert mock_client.get.call_count == 2 mock_sleep.assert_called_once() - def test_retries_on_429_then_succeeds(self, tmp_path, monkeypatch): + def test_retries_on_429_then_succeeds(self, _mock_safe, tmp_path, monkeypatch): """A 429 response on the first attempt is retried; second attempt succeeds.""" monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img") @@ -122,7 +123,7 @@ class TestCacheImageFromUrl: assert path.endswith(".jpg") assert mock_client.get.call_count == 2 - def test_raises_after_max_retries_exhausted(self, tmp_path, monkeypatch): + def test_raises_after_max_retries_exhausted(self, _mock_safe, tmp_path, monkeypatch): """Timeout on every attempt raises after all retries are consumed.""" monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img") @@ -145,7 +146,7 @@ class TestCacheImageFromUrl: # 3 total calls: initial + 2 retries assert mock_client.get.call_count == 3 - def test_non_retryable_4xx_raises_immediately(self, tmp_path, monkeypatch): + def test_non_retryable_4xx_raises_immediately(self, _mock_safe, tmp_path, monkeypatch): """A 404 (non-retryable) is raised immediately without any retry.""" monkeypatch.setattr("gateway.platforms.base.IMAGE_CACHE_DIR", tmp_path / "img") @@ -175,10 +176,11 @@ class TestCacheImageFromUrl: # cache_audio_from_url (base.py) # --------------------------------------------------------------------------- +@patch("tools.url_safety.is_safe_url", return_value=True) class TestCacheAudioFromUrl: """Tests for gateway.platforms.base.cache_audio_from_url""" - def test_success_on_first_attempt(self, tmp_path, monkeypatch): + def test_success_on_first_attempt(self, _mock_safe, tmp_path, monkeypatch): """A clean 200 response caches the audio and returns a path.""" monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio") @@ -202,7 +204,7 @@ class TestCacheAudioFromUrl: assert path.endswith(".ogg") mock_client.get.assert_called_once() - def test_retries_on_timeout_then_succeeds(self, tmp_path, monkeypatch): + def test_retries_on_timeout_then_succeeds(self, _mock_safe, tmp_path, monkeypatch): """A timeout on the first attempt is retried; second attempt succeeds.""" monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio") @@ -232,7 +234,7 @@ class TestCacheAudioFromUrl: assert mock_client.get.call_count == 2 mock_sleep.assert_called_once() - def test_retries_on_429_then_succeeds(self, tmp_path, monkeypatch): + def test_retries_on_429_then_succeeds(self, _mock_safe, tmp_path, monkeypatch): """A 429 response on the first attempt is retried; second attempt succeeds.""" monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio") @@ -259,7 +261,7 @@ class TestCacheAudioFromUrl: assert path.endswith(".ogg") assert mock_client.get.call_count == 2 - def test_retries_on_500_then_succeeds(self, tmp_path, monkeypatch): + def test_retries_on_500_then_succeeds(self, _mock_safe, tmp_path, monkeypatch): """A 500 response on the first attempt is retried; second attempt succeeds.""" monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio") @@ -286,7 +288,7 @@ class TestCacheAudioFromUrl: assert path.endswith(".ogg") assert mock_client.get.call_count == 2 - def test_raises_after_max_retries_exhausted(self, tmp_path, monkeypatch): + def test_raises_after_max_retries_exhausted(self, _mock_safe, tmp_path, monkeypatch): """Timeout on every attempt raises after all retries are consumed.""" monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio") @@ -309,7 +311,7 @@ class TestCacheAudioFromUrl: # 3 total calls: initial + 2 retries assert mock_client.get.call_count == 3 - def test_non_retryable_4xx_raises_immediately(self, tmp_path, monkeypatch): + def test_non_retryable_4xx_raises_immediately(self, _mock_safe, tmp_path, monkeypatch): """A 404 (non-retryable) is raised immediately without any retry.""" monkeypatch.setattr("gateway.platforms.base.AUDIO_CACHE_DIR", tmp_path / "audio") diff --git a/tests/gateway/test_slack.py b/tests/gateway/test_slack.py index 67c7cce1dc..983a7e990c 100644 --- a/tests/gateway/test_slack.py +++ b/tests/gateway/test_slack.py @@ -619,6 +619,18 @@ class TestFormatMessage: result = adapter.format_message("[click here](https://example.com)") assert result == "" + def test_link_conversion_strips_markdown_angle_brackets(self, adapter): + result = adapter.format_message("[click here]()") + assert result == "" + + def test_escapes_control_characters(self, adapter): + result = adapter.format_message("AT&T < 5 > 3") + assert result == "AT&T < 5 > 3" + + def test_preserves_existing_slack_entities(self, adapter): + text = "Hey <@U123>, see and " + assert adapter.format_message(text) == text + def test_strikethrough(self, adapter): assert adapter.format_message("~~deleted~~") == "~deleted~" @@ -643,6 +655,325 @@ class TestFormatMessage: def test_none_passthrough(self, adapter): assert adapter.format_message(None) is None + def test_blockquote_preserved(self, adapter): + """Single-line blockquote > marker is preserved.""" + assert adapter.format_message("> quoted text") == "> quoted text" + + def test_multiline_blockquote(self, adapter): + """Multi-line blockquote preserves > on each line.""" + text = "> line one\n> line two" + assert adapter.format_message(text) == "> line one\n> line two" + + def test_blockquote_with_formatting(self, adapter): + """Blockquote containing bold text.""" + assert adapter.format_message("> **bold quote**") == "> *bold quote*" + + def test_nested_blockquote(self, adapter): + """Multiple > characters for nested quotes.""" + assert adapter.format_message(">> deeply quoted") == ">> deeply quoted" + + def test_blockquote_mixed_with_plain(self, adapter): + """Blockquote lines interleaved with plain text.""" + text = "normal\n> quoted\nnormal again" + result = adapter.format_message(text) + assert "> quoted" in result + assert "normal" in result + + def test_non_prefix_gt_still_escaped(self, adapter): + """Greater-than in mid-line is still escaped.""" + assert adapter.format_message("5 > 3") == "5 > 3" + + def test_blockquote_with_code(self, adapter): + """Blockquote containing inline code.""" + result = adapter.format_message("> use `fmt.Println`") + assert result.startswith(">") + assert "`fmt.Println`" in result + + def test_bold_italic_combined(self, adapter): + """Triple-star ***text*** converts to Slack bold+italic *_text_*.""" + assert adapter.format_message("***hello***") == "*_hello_*" + + def test_bold_italic_with_surrounding_text(self, adapter): + """Bold+italic in a sentence.""" + result = adapter.format_message("This is ***important*** stuff") + assert "*_important_*" in result + + def test_bold_italic_does_not_break_plain_bold(self, adapter): + """**bold** still works after adding ***bold italic*** support.""" + assert adapter.format_message("**bold**") == "*bold*" + + def test_bold_italic_does_not_break_plain_italic(self, adapter): + """*italic* still works after adding ***bold italic*** support.""" + assert adapter.format_message("*italic*") == "_italic_" + + def test_bold_italic_mixed_with_bold(self, adapter): + """Both ***bold italic*** and **bold** in the same message.""" + result = adapter.format_message("***important*** and **bold**") + assert "*_important_*" in result + assert "*bold*" in result + + def test_pre_escaped_ampersand_not_double_escaped(self, adapter): + """Already-escaped & must not become &amp;.""" + assert adapter.format_message("&") == "&" + + def test_pre_escaped_lt_not_double_escaped(self, adapter): + """Already-escaped < must not become &lt;.""" + assert adapter.format_message("<") == "<" + + def test_pre_escaped_gt_not_double_escaped(self, adapter): + """Already-escaped > in plain text must not become &gt;.""" + assert adapter.format_message("5 > 3") == "5 > 3" + + def test_mixed_raw_and_escaped_entities(self, adapter): + """Raw & and pre-escaped & coexist correctly.""" + result = adapter.format_message("AT&T and & entity") + assert result == "AT&T and & entity" + + def test_link_with_parentheses_in_url(self, adapter): + """Wikipedia-style URL with balanced parens is not truncated.""" + result = adapter.format_message("[Foo](https://en.wikipedia.org/wiki/Foo_(bar))") + assert result == "" + + def test_link_with_multiple_paren_pairs(self, adapter): + """URL with multiple balanced paren pairs.""" + result = adapter.format_message("[text](https://example.com/a_(b)_c_(d))") + assert result == "" + + def test_link_without_parens_still_works(self, adapter): + """Normal URL without parens is unaffected by regex change.""" + result = adapter.format_message("[click](https://example.com/path?q=1)") + assert result == "" + + def test_link_with_angle_brackets_and_parens(self, adapter): + """Angle-bracket URL with parens (CommonMark syntax).""" + result = adapter.format_message("[Foo]()") + assert result == "" + + def test_escaping_is_idempotent(self, adapter): + """Formatting already-formatted text produces the same result.""" + original = "AT&T < 5 > 3" + once = adapter.format_message(original) + twice = adapter.format_message(once) + assert once == twice + + # --- Entity preservation (spec-compliance) --- + + def test_channel_mention_preserved(self, adapter): + """ special mention passes through unchanged.""" + assert adapter.format_message("Attention ") == "Attention " + + def test_everyone_mention_preserved(self, adapter): + """ special mention passes through unchanged.""" + assert adapter.format_message("Hey ") == "Hey " + + def test_subteam_mention_preserved(self, adapter): + """ user group mention passes through unchanged.""" + assert adapter.format_message("Paging ") == "Paging " + + def test_date_formatting_preserved(self, adapter): + """ formatting token passes through unchanged.""" + text = "Posted " + assert adapter.format_message(text) == text + + def test_channel_link_preserved(self, adapter): + """<#CHANNEL_ID> channel link passes through unchanged.""" + assert adapter.format_message("Join <#C12345>") == "Join <#C12345>" + + # --- Additional edge cases --- + + def test_message_only_code_block(self, adapter): + """Entire message is a fenced code block — no conversion.""" + code = "```python\nx = 1\n```" + assert adapter.format_message(code) == code + + def test_multiline_mixed_formatting(self, adapter): + """Multi-line message with headers, bold, links, code, and blockquotes.""" + text = "## Title\n**bold** and [link](https://x.com)\n> quote\n`code`" + result = adapter.format_message(text) + assert result.startswith("*Title*") + assert "*bold*" in result + assert "" in result + assert "> quote" in result + assert "`code`" in result + + def test_markdown_unordered_list_with_asterisk(self, adapter): + """Asterisk list items must not trigger italic conversion.""" + text = "* item one\n* item two" + result = adapter.format_message(text) + assert "item one" in result + assert "item two" in result + + def test_nested_bold_in_link(self, adapter): + """Bold inside link label — label is stashed before bold pass.""" + result = adapter.format_message("[**bold**](https://example.com)") + assert "https://example.com" in result + assert "bold" in result + + def test_url_with_query_string_and_ampersand(self, adapter): + """Ampersand in URL query string must not be escaped.""" + result = adapter.format_message("[link](https://x.com?a=1&b=2)") + assert result == "" + + def test_emoji_shortcodes_passthrough(self, adapter): + """Emoji shortcodes like :smile: pass through unchanged.""" + assert adapter.format_message(":smile: hello :wave:") == ":smile: hello :wave:" + + +# --------------------------------------------------------------------------- +# TestEditMessage +# --------------------------------------------------------------------------- + + +class TestEditMessage: + """Verify that edit_message() applies mrkdwn formatting before sending.""" + + @pytest.mark.asyncio + async def test_edit_message_formats_bold(self, adapter): + """edit_message converts **bold** to Slack *bold*.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + await adapter.edit_message("C123", "1234.5678", "**hello world**") + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert kwargs["text"] == "*hello world*" + + @pytest.mark.asyncio + async def test_edit_message_formats_links(self, adapter): + """edit_message converts markdown links to Slack format.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + await adapter.edit_message("C123", "1234.5678", "[click](https://example.com)") + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert kwargs["text"] == "" + + @pytest.mark.asyncio + async def test_edit_message_preserves_blockquotes(self, adapter): + """edit_message preserves blockquote > markers.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + await adapter.edit_message("C123", "1234.5678", "> quoted text") + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert kwargs["text"] == "> quoted text" + + @pytest.mark.asyncio + async def test_edit_message_escapes_control_chars(self, adapter): + """edit_message escapes & < > in plain text.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + await adapter.edit_message("C123", "1234.5678", "AT&T < 5 > 3") + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert kwargs["text"] == "AT&T < 5 > 3" + + +# --------------------------------------------------------------------------- +# TestEditMessageStreamingPipeline +# --------------------------------------------------------------------------- + + +class TestEditMessageStreamingPipeline: + """E2E: verify that sequential streaming edits all go through format_message. + + Simulates the GatewayStreamConsumer pattern where edit_message is called + repeatedly with progressively longer accumulated text. Every call must + produce properly formatted mrkdwn in the chat_update payload. + """ + + @pytest.mark.asyncio + async def test_edit_message_formats_streaming_updates(self, adapter): + """Simulates streaming: multiple edits, each should be formatted.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + + # First streaming update — bold + result1 = await adapter.edit_message("C123", "ts1", "**Processing**...") + assert result1.success is True + kwargs1 = adapter._app.client.chat_update.call_args.kwargs + assert kwargs1["text"] == "*Processing*..." + + # Second streaming update — bold + link + result2 = await adapter.edit_message( + "C123", "ts1", "**Done!** See [results](https://example.com)" + ) + assert result2.success is True + kwargs2 = adapter._app.client.chat_update.call_args.kwargs + assert kwargs2["text"] == "*Done!* See " + + @pytest.mark.asyncio + async def test_edit_message_formats_code_and_bold(self, adapter): + """Streaming update with code block and bold — code must be preserved.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + + content = "**Result:**\n```python\nprint('hello')\n```" + result = await adapter.edit_message("C123", "ts1", content) + assert result.success is True + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert kwargs["text"].startswith("*Result:*") + assert "```python\nprint('hello')\n```" in kwargs["text"] + + @pytest.mark.asyncio + async def test_edit_message_formats_blockquote_in_stream(self, adapter): + """Streaming update with blockquote — '>' marker must survive.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + + content = "> **Important:** do this\nnormal line" + result = await adapter.edit_message("C123", "ts1", content) + assert result.success is True + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert kwargs["text"].startswith("> *Important:*") + assert "normal line" in kwargs["text"] + + @pytest.mark.asyncio + async def test_edit_message_formats_progressive_accumulation(self, adapter): + """Simulate real streaming: text grows with each edit, all formatted.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + + updates = [ + ("**Step 1**", "*Step 1*"), + ("**Step 1**\n**Step 2**", "*Step 1*\n*Step 2*"), + ( + "**Step 1**\n**Step 2**\nSee [docs](https://docs.example.com)", + "*Step 1*\n*Step 2*\nSee ", + ), + ] + + for raw, expected in updates: + result = await adapter.edit_message("C123", "ts1", raw) + assert result.success is True + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert kwargs["text"] == expected, f"Failed for input: {raw!r}" + + # Total edit count should match number of updates + assert adapter._app.client.chat_update.call_count == len(updates) + + @pytest.mark.asyncio + async def test_edit_message_formats_bold_italic(self, adapter): + """Bold+italic ***text*** is formatted as *_text_* in edited messages.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + await adapter.edit_message("C123", "ts1", "***important*** update") + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert "*_important_*" in kwargs["text"] + + @pytest.mark.asyncio + async def test_edit_message_does_not_double_escape(self, adapter): + """Pre-escaped entities in edited messages must not get double-escaped.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + await adapter.edit_message("C123", "ts1", "5 > 3 and & entity") + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert "&gt;" not in kwargs["text"] + assert "&amp;" not in kwargs["text"] + assert ">" in kwargs["text"] + assert "&" in kwargs["text"] + + @pytest.mark.asyncio + async def test_edit_message_formats_url_with_parens(self, adapter): + """Wikipedia-style URL with parens survives edit pipeline.""" + adapter._app.client.chat_update = AsyncMock(return_value={"ok": True}) + await adapter.edit_message("C123", "ts1", "See [Foo](https://en.wikipedia.org/wiki/Foo_(bar))") + kwargs = adapter._app.client.chat_update.call_args.kwargs + assert "" in kwargs["text"] + + @pytest.mark.asyncio + async def test_edit_message_not_connected(self, adapter): + """edit_message returns failure when adapter is not connected.""" + adapter._app = None + result = await adapter.edit_message("C123", "ts1", "**hello**") + assert result.success is False + assert "Not connected" in result.error + # --------------------------------------------------------------------------- # TestReactions @@ -1085,6 +1416,48 @@ class TestMessageSplitting: await adapter.send("C123", "hello world") assert adapter._app.client.chat_postMessage.call_count == 1 + @pytest.mark.asyncio + async def test_send_preserves_blockquote_formatting(self, adapter): + """Blockquote '>' markers must survive format → chunk → send pipeline.""" + adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"}) + await adapter.send("C123", "> quoted text\nnormal text") + kwargs = adapter._app.client.chat_postMessage.call_args.kwargs + sent_text = kwargs["text"] + assert sent_text.startswith("> quoted text") + assert "normal text" in sent_text + + @pytest.mark.asyncio + async def test_send_formats_bold_italic(self, adapter): + """Bold+italic ***text*** is formatted as *_text_* in sent messages.""" + adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"}) + await adapter.send("C123", "***important*** update") + kwargs = adapter._app.client.chat_postMessage.call_args.kwargs + assert "*_important_*" in kwargs["text"] + + @pytest.mark.asyncio + async def test_send_explicitly_enables_mrkdwn(self, adapter): + adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"}) + await adapter.send("C123", "**hello**") + kwargs = adapter._app.client.chat_postMessage.call_args.kwargs + assert kwargs.get("mrkdwn") is True + + @pytest.mark.asyncio + async def test_send_does_not_double_escape_entities(self, adapter): + """Pre-escaped & in sent messages must not become &amp;.""" + adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"}) + await adapter.send("C123", "Use & for ampersand") + kwargs = adapter._app.client.chat_postMessage.call_args.kwargs + assert "&amp;" not in kwargs["text"] + assert "&" in kwargs["text"] + + @pytest.mark.asyncio + async def test_send_formats_url_with_parens(self, adapter): + """Wikipedia-style URL with parens survives send pipeline.""" + adapter._app.client.chat_postMessage = AsyncMock(return_value={"ts": "ts1"}) + await adapter.send("C123", "See [Foo](https://en.wikipedia.org/wiki/Foo_(bar))") + kwargs = adapter._app.client.chat_postMessage.call_args.kwargs + assert "" in kwargs["text"] + # --------------------------------------------------------------------------- # TestReplyBroadcast diff --git a/tests/gateway/test_slack_mention.py b/tests/gateway/test_slack_mention.py new file mode 100644 index 0000000000..22e17443fb --- /dev/null +++ b/tests/gateway/test_slack_mention.py @@ -0,0 +1,312 @@ +""" +Tests for Slack mention gating (require_mention / free_response_channels). + +Follows the same pattern as test_whatsapp_group_gating.py. +""" + +import sys +from unittest.mock import MagicMock + +from gateway.config import Platform, PlatformConfig + + +# --------------------------------------------------------------------------- +# Mock slack-bolt if not installed (same as test_slack.py) +# --------------------------------------------------------------------------- + +def _ensure_slack_mock(): + if "slack_bolt" in sys.modules and hasattr(sys.modules["slack_bolt"], "__file__"): + return + + slack_bolt = MagicMock() + slack_bolt.async_app.AsyncApp = MagicMock + slack_bolt.adapter.socket_mode.async_handler.AsyncSocketModeHandler = MagicMock + + slack_sdk = MagicMock() + slack_sdk.web.async_client.AsyncWebClient = MagicMock + + for name, mod in [ + ("slack_bolt", slack_bolt), + ("slack_bolt.async_app", slack_bolt.async_app), + ("slack_bolt.adapter", slack_bolt.adapter), + ("slack_bolt.adapter.socket_mode", slack_bolt.adapter.socket_mode), + ("slack_bolt.adapter.socket_mode.async_handler", slack_bolt.adapter.socket_mode.async_handler), + ("slack_sdk", slack_sdk), + ("slack_sdk.web", slack_sdk.web), + ("slack_sdk.web.async_client", slack_sdk.web.async_client), + ]: + sys.modules.setdefault(name, mod) + + +_ensure_slack_mock() + +import gateway.platforms.slack as _slack_mod +_slack_mod.SLACK_AVAILABLE = True + +from gateway.platforms.slack import SlackAdapter # noqa: E402 + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +BOT_USER_ID = "U_BOT_123" +CHANNEL_ID = "C0AQWDLHY9M" +OTHER_CHANNEL_ID = "C9999999999" + + +def _make_adapter(require_mention=None, free_response_channels=None): + extra = {} + if require_mention is not None: + extra["require_mention"] = require_mention + if free_response_channels is not None: + extra["free_response_channels"] = free_response_channels + + adapter = object.__new__(SlackAdapter) + adapter.platform = Platform.SLACK + adapter.config = PlatformConfig(enabled=True, extra=extra) + adapter._bot_user_id = BOT_USER_ID + adapter._team_bot_user_ids = {} + return adapter + + +# --------------------------------------------------------------------------- +# Tests: _slack_require_mention +# --------------------------------------------------------------------------- + +def test_require_mention_defaults_to_true(monkeypatch): + monkeypatch.delenv("SLACK_REQUIRE_MENTION", raising=False) + adapter = _make_adapter() + assert adapter._slack_require_mention() is True + + +def test_require_mention_false(): + adapter = _make_adapter(require_mention=False) + assert adapter._slack_require_mention() is False + + +def test_require_mention_true(): + adapter = _make_adapter(require_mention=True) + assert adapter._slack_require_mention() is True + + +def test_require_mention_string_true(): + adapter = _make_adapter(require_mention="true") + assert adapter._slack_require_mention() is True + + +def test_require_mention_string_false(): + adapter = _make_adapter(require_mention="false") + assert adapter._slack_require_mention() is False + + +def test_require_mention_string_no(): + adapter = _make_adapter(require_mention="no") + assert adapter._slack_require_mention() is False + + +def test_require_mention_string_yes(): + adapter = _make_adapter(require_mention="yes") + assert adapter._slack_require_mention() is True + + +def test_require_mention_empty_string_stays_true(): + """Empty/malformed strings keep gating ON (explicit-false parser).""" + adapter = _make_adapter(require_mention="") + assert adapter._slack_require_mention() is True + + +def test_require_mention_malformed_string_stays_true(): + """Unrecognised values keep gating ON (fail-closed).""" + adapter = _make_adapter(require_mention="maybe") + assert adapter._slack_require_mention() is True + + +def test_require_mention_env_var_fallback(monkeypatch): + monkeypatch.setenv("SLACK_REQUIRE_MENTION", "false") + adapter = _make_adapter() # no config value -> falls back to env + assert adapter._slack_require_mention() is False + + +def test_require_mention_env_var_default_true(monkeypatch): + monkeypatch.delenv("SLACK_REQUIRE_MENTION", raising=False) + adapter = _make_adapter() + assert adapter._slack_require_mention() is True + + +# --------------------------------------------------------------------------- +# Tests: _slack_free_response_channels +# --------------------------------------------------------------------------- + +def test_free_response_channels_default_empty(monkeypatch): + monkeypatch.delenv("SLACK_FREE_RESPONSE_CHANNELS", raising=False) + adapter = _make_adapter() + assert adapter._slack_free_response_channels() == set() + + +def test_free_response_channels_list(): + adapter = _make_adapter(free_response_channels=[CHANNEL_ID, OTHER_CHANNEL_ID]) + result = adapter._slack_free_response_channels() + assert CHANNEL_ID in result + assert OTHER_CHANNEL_ID in result + + +def test_free_response_channels_csv_string(): + adapter = _make_adapter(free_response_channels=f"{CHANNEL_ID}, {OTHER_CHANNEL_ID}") + result = adapter._slack_free_response_channels() + assert CHANNEL_ID in result + assert OTHER_CHANNEL_ID in result + + +def test_free_response_channels_empty_string(): + adapter = _make_adapter(free_response_channels="") + assert adapter._slack_free_response_channels() == set() + + +def test_free_response_channels_env_var_fallback(monkeypatch): + monkeypatch.setenv("SLACK_FREE_RESPONSE_CHANNELS", f"{CHANNEL_ID},{OTHER_CHANNEL_ID}") + adapter = _make_adapter() # no config value → falls back to env + result = adapter._slack_free_response_channels() + assert CHANNEL_ID in result + assert OTHER_CHANNEL_ID in result + + +# --------------------------------------------------------------------------- +# Tests: mention gating integration (simulating _handle_slack_message logic) +# --------------------------------------------------------------------------- + +def _would_process(adapter, *, is_dm=False, channel_id=CHANNEL_ID, + text="hello", mentioned=False, thread_reply=False, + active_session=False): + """Simulate the mention gating logic from _handle_slack_message. + + Returns True if the message would be processed, False if it would be + skipped (returned early). + """ + bot_uid = adapter._team_bot_user_ids.get("T1", adapter._bot_user_id) + if mentioned: + text = f"<@{bot_uid}> {text}" + is_mentioned = bot_uid and f"<@{bot_uid}>" in text + + if not is_dm: + if channel_id in adapter._slack_free_response_channels(): + return True + elif not adapter._slack_require_mention(): + return True + elif not is_mentioned: + if thread_reply and active_session: + return True + else: + return False + return True + + +def test_default_require_mention_channel_without_mention_ignored(): + adapter = _make_adapter() # default: require_mention=True + assert _would_process(adapter, text="hello everyone") is False + + +def test_require_mention_false_channel_without_mention_processed(): + adapter = _make_adapter(require_mention=False) + assert _would_process(adapter, text="hello everyone") is True + + +def test_channel_in_free_response_processed_without_mention(): + adapter = _make_adapter( + require_mention=True, + free_response_channels=[CHANNEL_ID], + ) + assert _would_process(adapter, channel_id=CHANNEL_ID, text="hello") is True + + +def test_other_channel_not_in_free_response_still_gated(): + adapter = _make_adapter( + require_mention=True, + free_response_channels=[CHANNEL_ID], + ) + assert _would_process(adapter, channel_id=OTHER_CHANNEL_ID, text="hello") is False + + +def test_dm_always_processed_regardless_of_setting(): + adapter = _make_adapter(require_mention=True) + assert _would_process(adapter, is_dm=True, text="hello") is True + + +def test_mentioned_message_always_processed(): + adapter = _make_adapter(require_mention=True) + assert _would_process(adapter, mentioned=True, text="what's up") is True + + +def test_thread_reply_with_active_session_processed(): + adapter = _make_adapter(require_mention=True) + assert _would_process( + adapter, text="followup", + thread_reply=True, active_session=True, + ) is True + + +def test_thread_reply_without_active_session_ignored(): + adapter = _make_adapter(require_mention=True) + assert _would_process( + adapter, text="followup", + thread_reply=True, active_session=False, + ) is False + + +def test_bot_uid_none_processes_channel_message(): + """When bot_uid is None (before auth_test), channel messages pass through. + + This preserves the old behavior: the gating block is skipped entirely + when bot_uid is falsy, so messages are not silently dropped during + startup or for new workspaces. + """ + adapter = _make_adapter(require_mention=True) + adapter._bot_user_id = None + adapter._team_bot_user_ids = {} + + # With bot_uid=None, the `if not is_dm and bot_uid:` condition is False, + # so the gating block is skipped — message passes through. + bot_uid = adapter._team_bot_user_ids.get("T1", adapter._bot_user_id) + assert bot_uid is None + + # Simulate: gating block not entered when bot_uid is falsy + is_dm = False + if not is_dm and bot_uid: + result = False # would enter gating + else: + result = True # gating skipped, message processed + assert result is True + + +# --------------------------------------------------------------------------- +# Tests: config bridging +# --------------------------------------------------------------------------- + +def test_config_bridges_slack_free_response_channels(monkeypatch, tmp_path): + from gateway.config import load_gateway_config + + hermes_home = tmp_path / ".hermes" + hermes_home.mkdir() + (hermes_home / "config.yaml").write_text( + "slack:\n" + " require_mention: false\n" + " free_response_channels:\n" + " - C0AQWDLHY9M\n" + " - C9999999999\n", + encoding="utf-8", + ) + + monkeypatch.setenv("HERMES_HOME", str(hermes_home)) + monkeypatch.delenv("SLACK_REQUIRE_MENTION", raising=False) + monkeypatch.delenv("SLACK_FREE_RESPONSE_CHANNELS", raising=False) + + config = load_gateway_config() + + assert config is not None + slack_extra = config.platforms[Platform.SLACK].extra + assert slack_extra.get("require_mention") is False + assert slack_extra.get("free_response_channels") == ["C0AQWDLHY9M", "C9999999999"] + # Verify env vars were set by config bridging + import os as _os + assert _os.environ["SLACK_REQUIRE_MENTION"] == "false" + assert _os.environ["SLACK_FREE_RESPONSE_CHANNELS"] == "C0AQWDLHY9M,C9999999999" diff --git a/tests/gateway/test_wecom.py b/tests/gateway/test_wecom.py index a7101c6973..418a4b622f 100644 --- a/tests/gateway/test_wecom.py +++ b/tests/gateway/test_wecom.py @@ -4,7 +4,7 @@ import base64 import os from pathlib import Path from types import SimpleNamespace -from unittest.mock import AsyncMock +from unittest.mock import AsyncMock, patch import pytest @@ -355,7 +355,8 @@ class TestMediaUpload: assert calls[3][1]["chunk_index"] == 2 @pytest.mark.asyncio - async def test_download_remote_bytes_rejects_large_content_length(self): + @patch("tools.url_safety.is_safe_url", return_value=True) + async def test_download_remote_bytes_rejects_large_content_length(self, _mock_safe): from gateway.platforms.wecom import WeComAdapter class FakeResponse: diff --git a/tests/hermes_cli/test_api_key_providers.py b/tests/hermes_cli/test_api_key_providers.py index ee86507a16..d97b0c1f75 100644 --- a/tests/hermes_cli/test_api_key_providers.py +++ b/tests/hermes_cli/test_api_key_providers.py @@ -628,14 +628,21 @@ class TestHasAnyProviderConfigured: def test_claude_code_creds_ignored_on_fresh_install(self, monkeypatch, tmp_path): """Claude Code credentials should NOT skip the wizard when Hermes is unconfigured.""" from hermes_cli import config as config_module + from hermes_cli.auth import PROVIDER_REGISTRY hermes_home = tmp_path / ".hermes" hermes_home.mkdir() monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env") monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home) # Clear all provider env vars so earlier checks don't short-circuit - for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY", - "ANTHROPIC_TOKEN", "OPENAI_BASE_URL"): + _all_vars = {"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY", + "ANTHROPIC_TOKEN", "OPENAI_BASE_URL"} + for pconfig in PROVIDER_REGISTRY.values(): + if pconfig.auth_type == "api_key": + _all_vars.update(pconfig.api_key_env_vars) + for var in _all_vars: monkeypatch.delenv(var, raising=False) + # Prevent gh-cli / copilot auth fallback from leaking in + monkeypatch.setattr("hermes_cli.auth.get_auth_status", lambda _pid: {}) # Simulate valid Claude Code credentials monkeypatch.setattr( "agent.anthropic_adapter.read_claude_code_credentials", @@ -710,6 +717,7 @@ class TestHasAnyProviderConfigured: """config.yaml model dict with empty default and no creds stays false.""" import yaml from hermes_cli import config as config_module + from hermes_cli.auth import PROVIDER_REGISTRY hermes_home = tmp_path / ".hermes" hermes_home.mkdir() config_file = hermes_home / "config.yaml" @@ -719,9 +727,15 @@ class TestHasAnyProviderConfigured: monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env") monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home) monkeypatch.setenv("HERMES_HOME", str(hermes_home)) - for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY", - "ANTHROPIC_TOKEN", "OPENAI_BASE_URL"): + _all_vars = {"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY", + "ANTHROPIC_TOKEN", "OPENAI_BASE_URL"} + for pconfig in PROVIDER_REGISTRY.values(): + if pconfig.auth_type == "api_key": + _all_vars.update(pconfig.api_key_env_vars) + for var in _all_vars: monkeypatch.delenv(var, raising=False) + # Prevent gh-cli / copilot auth fallback from leaking in + monkeypatch.setattr("hermes_cli.auth.get_auth_status", lambda _pid: {}) from hermes_cli.main import _has_any_provider_configured assert _has_any_provider_configured() is False @@ -941,9 +955,10 @@ class TestHuggingFaceModels: """Every HF model should have a context length entry.""" from hermes_cli.models import _PROVIDER_MODELS from agent.model_metadata import DEFAULT_CONTEXT_LENGTHS + lower_keys = {k.lower() for k in DEFAULT_CONTEXT_LENGTHS} hf_models = _PROVIDER_MODELS["huggingface"] for model in hf_models: - assert model in DEFAULT_CONTEXT_LENGTHS, ( + assert model.lower() in lower_keys, ( f"HF model {model!r} missing from DEFAULT_CONTEXT_LENGTHS" ) diff --git a/tests/hermes_cli/test_chat_skills_flag.py b/tests/hermes_cli/test_chat_skills_flag.py index 8551b4105a..0ec25a5400 100644 --- a/tests/hermes_cli/test_chat_skills_flag.py +++ b/tests/hermes_cli/test_chat_skills_flag.py @@ -49,6 +49,30 @@ def test_chat_subcommand_accepts_skills_flag(monkeypatch): } +def test_chat_subcommand_accepts_image_flag(monkeypatch): + import hermes_cli.main as main_mod + + captured = {} + + def fake_cmd_chat(args): + captured["query"] = args.query + captured["image"] = args.image + + monkeypatch.setattr(main_mod, "cmd_chat", fake_cmd_chat) + monkeypatch.setattr( + sys, + "argv", + ["hermes", "chat", "-q", "hello", "--image", "~/storage/shared/Pictures/cat.png"], + ) + + main_mod.main() + + assert captured == { + "query": "hello", + "image": "~/storage/shared/Pictures/cat.png", + } + + def test_continue_worktree_and_skills_flags_work_together(monkeypatch): import hermes_cli.main as main_mod diff --git a/tests/hermes_cli/test_commands.py b/tests/hermes_cli/test_commands.py index 3b57bf07aa..4cef1a6843 100644 --- a/tests/hermes_cli/test_commands.py +++ b/tests/hermes_cli/test_commands.py @@ -68,6 +68,17 @@ class TestCommandRegistry: for cmd in COMMAND_REGISTRY: assert cmd.category in valid_categories, f"{cmd.name} has invalid category '{cmd.category}'" + def test_reasoning_subcommands_are_in_logical_order(self): + reasoning = next(cmd for cmd in COMMAND_REGISTRY if cmd.name == "reasoning") + assert reasoning.subcommands[:6] == ( + "none", + "minimal", + "low", + "medium", + "high", + "xhigh", + ) + def test_cli_only_and_gateway_only_are_mutually_exclusive(self): for cmd in COMMAND_REGISTRY: assert not (cmd.cli_only and cmd.gateway_only), \ @@ -428,8 +439,8 @@ class TestSlashCommandCompleter: class TestSubcommands: def test_explicit_subcommands_extracted(self): """Commands with explicit subcommands on CommandDef are extracted.""" - assert "/prompt" in SUBCOMMANDS - assert "clear" in SUBCOMMANDS["/prompt"] + assert "/skills" in SUBCOMMANDS + assert "install" in SUBCOMMANDS["/skills"] def test_reasoning_has_subcommands(self): assert "/reasoning" in SUBCOMMANDS diff --git a/tests/hermes_cli/test_doctor.py b/tests/hermes_cli/test_doctor.py index f30fb48396..faaa7a8a2d 100644 --- a/tests/hermes_cli/test_doctor.py +++ b/tests/hermes_cli/test_doctor.py @@ -14,6 +14,23 @@ from hermes_cli import doctor as doctor_mod from hermes_cli.doctor import _has_provider_env_config +class TestDoctorPlatformHints: + def test_termux_package_hint(self, monkeypatch): + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + assert doctor._is_termux() is True + assert doctor._python_install_cmd() == "python -m pip install" + assert doctor._system_package_install_cmd("ripgrep") == "pkg install ripgrep" + + def test_non_termux_package_hint_defaults_to_apt(self, monkeypatch): + monkeypatch.delenv("TERMUX_VERSION", raising=False) + monkeypatch.setenv("PREFIX", "/usr") + monkeypatch.setattr(sys, "platform", "linux") + assert doctor._is_termux() is False + assert doctor._python_install_cmd() == "uv pip install" + assert doctor._system_package_install_cmd("ripgrep") == "sudo apt install ripgrep" + + class TestProviderEnvDetection: def test_detects_openai_api_key(self): content = "OPENAI_BASE_URL=http://localhost:1234/v1\nOPENAI_API_KEY=***" @@ -206,3 +223,72 @@ class TestDoctorMemoryProviderSection: out = self._run_doctor_and_capture(monkeypatch, tmp_path, provider="mem0") assert "Memory Provider" in out assert "Built-in memory active" not in out + + +def test_run_doctor_termux_treats_docker_and_browser_warnings_as_expected(monkeypatch, tmp_path): + helper = TestDoctorMemoryProviderSection() + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + + real_which = doctor_mod.shutil.which + + def fake_which(cmd): + if cmd in {"docker", "node", "npm"}: + return None + return real_which(cmd) + + monkeypatch.setattr(doctor_mod.shutil, "which", fake_which) + + out = helper._run_doctor_and_capture(monkeypatch, tmp_path, provider="") + + assert "Docker backend is not available inside Termux" in out + assert "Node.js not found (browser tools are optional in the tested Termux path)" in out + assert "Install Node.js on Termux with: pkg install nodejs" in out + assert "Termux browser setup:" in out + assert "1) pkg install nodejs" in out + assert "2) npm install -g agent-browser" in out + assert "3) agent-browser install" in out + assert "docker not found (optional)" not in out + + +def test_run_doctor_termux_does_not_mark_browser_available_without_agent_browser(monkeypatch, tmp_path): + home = tmp_path / ".hermes" + home.mkdir(parents=True, exist_ok=True) + (home / "config.yaml").write_text("memory: {}\n", encoding="utf-8") + project = tmp_path / "project" + project.mkdir(exist_ok=True) + + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.setattr(doctor_mod, "HERMES_HOME", home) + monkeypatch.setattr(doctor_mod, "PROJECT_ROOT", project) + monkeypatch.setattr(doctor_mod, "_DHH", str(home)) + monkeypatch.setattr(doctor_mod.shutil, "which", lambda cmd: "/data/data/com.termux/files/usr/bin/node" if cmd in {"node", "npm"} else None) + + fake_model_tools = types.SimpleNamespace( + check_tool_availability=lambda *a, **kw: (["terminal"], [{"name": "browser", "env_vars": [], "tools": ["browser_navigate"]}]), + TOOLSET_REQUIREMENTS={ + "terminal": {"name": "terminal"}, + "browser": {"name": "browser"}, + }, + ) + monkeypatch.setitem(sys.modules, "model_tools", fake_model_tools) + + try: + from hermes_cli import auth as _auth_mod + monkeypatch.setattr(_auth_mod, "get_nous_auth_status", lambda: {}) + monkeypatch.setattr(_auth_mod, "get_codex_auth_status", lambda: {}) + except Exception: + pass + + import io, contextlib + buf = io.StringIO() + with contextlib.redirect_stdout(buf): + doctor_mod.run_doctor(Namespace(fix=False)) + out = buf.getvalue() + + assert "✓ browser" not in out + assert "browser" in out + assert "system dependency not met" in out + assert "agent-browser is not installed (expected in the tested Termux path)" in out + assert "npm install -g agent-browser && agent-browser install" in out diff --git a/tests/hermes_cli/test_gateway.py b/tests/hermes_cli/test_gateway.py index 11c2136356..885597e3ee 100644 --- a/tests/hermes_cli/test_gateway.py +++ b/tests/hermes_cli/test_gateway.py @@ -10,6 +10,7 @@ import hermes_cli.gateway as gateway class TestSystemdLingerStatus: def test_reports_enabled(self, monkeypatch): monkeypatch.setattr(gateway, "is_linux", lambda: True) + monkeypatch.setattr(gateway, "is_termux", lambda: False) monkeypatch.setenv("USER", "alice") monkeypatch.setattr( gateway.subprocess, @@ -22,6 +23,7 @@ class TestSystemdLingerStatus: def test_reports_disabled(self, monkeypatch): monkeypatch.setattr(gateway, "is_linux", lambda: True) + monkeypatch.setattr(gateway, "is_termux", lambda: False) monkeypatch.setenv("USER", "alice") monkeypatch.setattr( gateway.subprocess, @@ -32,6 +34,11 @@ class TestSystemdLingerStatus: assert gateway.get_systemd_linger_status() == (False, "") + def test_reports_termux_as_not_supported(self, monkeypatch): + monkeypatch.setattr(gateway, "is_termux", lambda: True) + + assert gateway.get_systemd_linger_status() == (None, "not supported in Termux") + def test_systemd_status_warns_when_linger_disabled(monkeypatch, tmp_path, capsys): unit_path = tmp_path / "hermes-gateway.service" diff --git a/tests/hermes_cli/test_gateway_linger.py b/tests/hermes_cli/test_gateway_linger.py index 3dacea66e8..90f8ea3d70 100644 --- a/tests/hermes_cli/test_gateway_linger.py +++ b/tests/hermes_cli/test_gateway_linger.py @@ -8,6 +8,7 @@ import hermes_cli.gateway as gateway class TestEnsureLingerEnabled: def test_linger_already_enabled_via_file(self, monkeypatch, capsys): monkeypatch.setattr(gateway, "is_linux", lambda: True) + monkeypatch.setattr(gateway, "is_termux", lambda: False) monkeypatch.setattr("getpass.getuser", lambda: "testuser") monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: True)) @@ -22,6 +23,7 @@ class TestEnsureLingerEnabled: def test_status_enabled_skips_enable(self, monkeypatch, capsys): monkeypatch.setattr(gateway, "is_linux", lambda: True) + monkeypatch.setattr(gateway, "is_termux", lambda: False) monkeypatch.setattr("getpass.getuser", lambda: "testuser") monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False)) monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (True, "")) @@ -37,6 +39,7 @@ class TestEnsureLingerEnabled: def test_loginctl_success_enables_linger(self, monkeypatch, capsys): monkeypatch.setattr(gateway, "is_linux", lambda: True) + monkeypatch.setattr(gateway, "is_termux", lambda: False) monkeypatch.setattr("getpass.getuser", lambda: "testuser") monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False)) monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (False, "")) @@ -59,6 +62,7 @@ class TestEnsureLingerEnabled: def test_missing_loginctl_shows_manual_guidance(self, monkeypatch, capsys): monkeypatch.setattr(gateway, "is_linux", lambda: True) + monkeypatch.setattr(gateway, "is_termux", lambda: False) monkeypatch.setattr("getpass.getuser", lambda: "testuser") monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False)) monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (None, "loginctl not found")) @@ -76,6 +80,7 @@ class TestEnsureLingerEnabled: def test_loginctl_failure_shows_manual_guidance(self, monkeypatch, capsys): monkeypatch.setattr(gateway, "is_linux", lambda: True) + monkeypatch.setattr(gateway, "is_termux", lambda: False) monkeypatch.setattr("getpass.getuser", lambda: "testuser") monkeypatch.setattr(gateway, "Path", lambda _path: SimpleNamespace(exists=lambda: False)) monkeypatch.setattr(gateway, "get_systemd_linger_status", lambda: (False, "")) diff --git a/tests/hermes_cli/test_gateway_service.py b/tests/hermes_cli/test_gateway_service.py index 739d45003c..aa21793ae4 100644 --- a/tests/hermes_cli/test_gateway_service.py +++ b/tests/hermes_cli/test_gateway_service.py @@ -109,7 +109,8 @@ class TestGatewayStopCleanup: unit_path = tmp_path / "hermes-gateway.service" unit_path.write_text("unit\n", encoding="utf-8") - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path) @@ -134,7 +135,8 @@ class TestGatewayStopCleanup: unit_path = tmp_path / "hermes-gateway.service" unit_path.write_text("unit\n", encoding="utf-8") - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path) @@ -256,7 +258,8 @@ class TestGatewayServiceDetection: user_unit = SimpleNamespace(exists=lambda: True) system_unit = SimpleNamespace(exists=lambda: True) - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) monkeypatch.setattr( gateway_cli, @@ -278,7 +281,8 @@ class TestGatewayServiceDetection: class TestGatewaySystemServiceRouting: def test_gateway_install_passes_system_flags(self, monkeypatch): - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) calls = [] @@ -294,11 +298,30 @@ class TestGatewaySystemServiceRouting: assert calls == [(True, True, "alice")] + def test_gateway_install_reports_termux_manual_mode(self, monkeypatch, capsys): + monkeypatch.setattr(gateway_cli, "is_termux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False) + monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) + + try: + gateway_cli.gateway_command( + SimpleNamespace(gateway_command="install", force=False, system=False, run_as_user=None) + ) + except SystemExit as exc: + assert exc.code == 1 + else: + raise AssertionError("Expected gateway_command to exit on unsupported Termux service install") + + out = capsys.readouterr().out + assert "not supported on Termux" in out + assert "Run manually: hermes gateway" in out + def test_gateway_status_prefers_system_service_when_only_system_unit_exists(self, monkeypatch): user_unit = SimpleNamespace(exists=lambda: False) system_unit = SimpleNamespace(exists=lambda: True) - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) monkeypatch.setattr( gateway_cli, @@ -313,6 +336,20 @@ class TestGatewaySystemServiceRouting: assert calls == [(False, False)] + def test_gateway_status_on_termux_shows_manual_guidance(self, monkeypatch, capsys): + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: False) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: True) + monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) + monkeypatch.setattr(gateway_cli, "find_gateway_pids", lambda exclude_pids=None: []) + monkeypatch.setattr(gateway_cli, "_runtime_health_lines", lambda: []) + + gateway_cli.gateway_command(SimpleNamespace(gateway_command="status", deep=False, system=False)) + + out = capsys.readouterr().out + assert "Gateway is not running" in out + assert "nohup hermes gateway" in out + assert "install as user service" not in out + def test_gateway_restart_does_not_fallback_to_foreground_when_launchd_restart_fails(self, tmp_path, monkeypatch): plist_path = tmp_path / "ai.hermes.gateway.plist" plist_path.write_text("plist\n", encoding="utf-8") @@ -513,12 +550,22 @@ class TestGeneratedUnitUsesDetectedVenv: class TestGeneratedUnitIncludesLocalBin: """~/.local/bin must be in PATH so uvx/pipx tools are discoverable.""" - def test_user_unit_includes_local_bin_in_path(self): + def test_user_unit_includes_local_bin_in_path(self, monkeypatch): + home = Path.home() + monkeypatch.setattr( + gateway_cli, + "_build_user_local_paths", + lambda home_path, existing: [str(home / ".local" / "bin")], + ) unit = gateway_cli.generate_systemd_unit(system=False) - home = str(Path.home()) assert f"{home}/.local/bin" in unit - def test_system_unit_includes_local_bin_in_path(self): + def test_system_unit_includes_local_bin_in_path(self, monkeypatch): + monkeypatch.setattr( + gateway_cli, + "_build_user_local_paths", + lambda home_path, existing: [str(home_path / ".local" / "bin")], + ) unit = gateway_cli.generate_systemd_unit(system=True) # System unit uses the resolved home dir from _system_service_identity assert "/.local/bin" in unit diff --git a/tests/hermes_cli/test_reasoning_effort_menu.py b/tests/hermes_cli/test_reasoning_effort_menu.py new file mode 100644 index 0000000000..3d360a4f2f --- /dev/null +++ b/tests/hermes_cli/test_reasoning_effort_menu.py @@ -0,0 +1,34 @@ +import sys +import types + + +from hermes_cli.main import _prompt_reasoning_effort_selection + + +class _FakeTerminalMenu: + last_choices = None + + def __init__(self, choices, **kwargs): + _FakeTerminalMenu.last_choices = choices + self._cursor_index = kwargs.get("cursor_index") + + def show(self): + return self._cursor_index + + +def test_reasoning_menu_orders_minimal_before_low(monkeypatch): + fake_module = types.SimpleNamespace(TerminalMenu=_FakeTerminalMenu) + monkeypatch.setitem(sys.modules, "simple_term_menu", fake_module) + + selected = _prompt_reasoning_effort_selection( + ["low", "minimal", "medium", "high"], + current_effort="medium", + ) + + assert selected == "medium" + assert _FakeTerminalMenu.last_choices[:4] == [ + " minimal", + " low", + " medium ← currently in use", + " high", + ] diff --git a/tests/hermes_cli/test_setup_hermes_script.py b/tests/hermes_cli/test_setup_hermes_script.py new file mode 100644 index 0000000000..7978e660a8 --- /dev/null +++ b/tests/hermes_cli/test_setup_hermes_script.py @@ -0,0 +1,21 @@ +from pathlib import Path +import subprocess + + +REPO_ROOT = Path(__file__).resolve().parents[2] +SETUP_SCRIPT = REPO_ROOT / "setup-hermes.sh" + + +def test_setup_hermes_script_is_valid_shell(): + result = subprocess.run(["bash", "-n", str(SETUP_SCRIPT)], capture_output=True, text=True) + assert result.returncode == 0, result.stderr + + +def test_setup_hermes_script_has_termux_path(): + content = SETUP_SCRIPT.read_text(encoding="utf-8") + + assert "is_termux()" in content + assert ".[termux]" in content + assert "constraints-termux.txt" in content + assert "$PREFIX/bin" in content + assert "Skipping tinker-atropos on Termux" in content diff --git a/tests/hermes_cli/test_setup_openclaw_migration.py b/tests/hermes_cli/test_setup_openclaw_migration.py index b956f1fe64..fe80263905 100644 --- a/tests/hermes_cli/test_setup_openclaw_migration.py +++ b/tests/hermes_cli/test_setup_openclaw_migration.py @@ -44,7 +44,7 @@ class TestOfferOpenclawMigration: assert setup_mod._offer_openclaw_migration(tmp_path / ".hermes") is False def test_runs_migration_when_user_accepts(self, tmp_path): - """Should dynamically load the script and run the Migrator.""" + """Should run dry-run preview first, then execute after confirmation.""" openclaw_dir = tmp_path / ".openclaw" openclaw_dir.mkdir() @@ -60,6 +60,7 @@ class TestOfferOpenclawMigration: fake_migrator = MagicMock() fake_migrator.migrate.return_value = { "summary": {"migrated": 3, "skipped": 1, "conflict": 0, "error": 0}, + "items": [{"kind": "config", "status": "migrated", "destination": "/tmp/x"}], "output_dir": str(hermes_home / "migration"), } fake_mod.Migrator = MagicMock(return_value=fake_migrator) @@ -70,6 +71,7 @@ class TestOfferOpenclawMigration: with ( patch("hermes_cli.setup.Path.home", return_value=tmp_path), patch.object(setup_mod, "_OPENCLAW_SCRIPT", script), + # Both prompts answered Yes: preview offer + proceed confirmation patch.object(setup_mod, "prompt_yes_no", return_value=True), patch.object(setup_mod, "get_config_path", return_value=config_path), patch("importlib.util.spec_from_file_location") as mock_spec_fn, @@ -91,13 +93,75 @@ class TestOfferOpenclawMigration: fake_mod.resolve_selected_options.assert_called_once_with( None, None, preset="full" ) - fake_mod.Migrator.assert_called_once() - call_kwargs = fake_mod.Migrator.call_args[1] - assert call_kwargs["execute"] is True - assert call_kwargs["overwrite"] is True - assert call_kwargs["migrate_secrets"] is True - assert call_kwargs["preset_name"] == "full" - fake_migrator.migrate.assert_called_once() + # Migrator called twice: once for dry-run preview, once for execution + assert fake_mod.Migrator.call_count == 2 + + # First call: dry-run preview (execute=False, overwrite=True to show all) + preview_kwargs = fake_mod.Migrator.call_args_list[0][1] + assert preview_kwargs["execute"] is False + assert preview_kwargs["overwrite"] is True + assert preview_kwargs["migrate_secrets"] is True + assert preview_kwargs["preset_name"] == "full" + + # Second call: actual execution (execute=True, overwrite=False to preserve) + exec_kwargs = fake_mod.Migrator.call_args_list[1][1] + assert exec_kwargs["execute"] is True + assert exec_kwargs["overwrite"] is False + assert exec_kwargs["migrate_secrets"] is True + assert exec_kwargs["preset_name"] == "full" + + # migrate() called twice (once per Migrator instance) + assert fake_migrator.migrate.call_count == 2 + + def test_user_declines_after_preview(self, tmp_path): + """Should return False when user sees preview but declines to proceed.""" + openclaw_dir = tmp_path / ".openclaw" + openclaw_dir.mkdir() + + hermes_home = tmp_path / ".hermes" + hermes_home.mkdir() + config_path = hermes_home / "config.yaml" + config_path.write_text("agent:\n max_turns: 90\n") + + fake_mod = ModuleType("openclaw_to_hermes") + fake_mod.resolve_selected_options = MagicMock(return_value={"soul", "memory"}) + fake_migrator = MagicMock() + fake_migrator.migrate.return_value = { + "summary": {"migrated": 3, "skipped": 0, "conflict": 0, "error": 0}, + "items": [{"kind": "config", "status": "migrated", "destination": "/tmp/x"}], + } + fake_mod.Migrator = MagicMock(return_value=fake_migrator) + + script = tmp_path / "openclaw_to_hermes.py" + script.write_text("# placeholder") + + # First prompt (preview): Yes, Second prompt (proceed): No + prompt_responses = iter([True, False]) + + with ( + patch("hermes_cli.setup.Path.home", return_value=tmp_path), + patch.object(setup_mod, "_OPENCLAW_SCRIPT", script), + patch.object(setup_mod, "prompt_yes_no", side_effect=prompt_responses), + patch.object(setup_mod, "get_config_path", return_value=config_path), + patch("importlib.util.spec_from_file_location") as mock_spec_fn, + ): + mock_spec = MagicMock() + mock_spec.loader = MagicMock() + mock_spec_fn.return_value = mock_spec + + def exec_module(mod): + mod.resolve_selected_options = fake_mod.resolve_selected_options + mod.Migrator = fake_mod.Migrator + + mock_spec.loader.exec_module = exec_module + + result = setup_mod._offer_openclaw_migration(hermes_home) + + assert result is False + # Only dry-run Migrator was created, not the execute one + assert fake_mod.Migrator.call_count == 1 + preview_kwargs = fake_mod.Migrator.call_args[1] + assert preview_kwargs["execute"] is False def test_handles_migration_error_gracefully(self, tmp_path): """Should catch exceptions and return False.""" diff --git a/tests/hermes_cli/test_status.py b/tests/hermes_cli/test_status.py index 374e57b29e..c24b72dd4c 100644 --- a/tests/hermes_cli/test_status.py +++ b/tests/hermes_cli/test_status.py @@ -12,3 +12,33 @@ def test_show_status_includes_tavily_key(monkeypatch, capsys, tmp_path): output = capsys.readouterr().out assert "Tavily" in output assert "tvly...cdef" in output + + +def test_show_status_termux_gateway_section_skips_systemctl(monkeypatch, capsys, tmp_path): + from hermes_cli import status as status_mod + import hermes_cli.auth as auth_mod + import hermes_cli.gateway as gateway_mod + + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.setattr(status_mod, "get_env_path", lambda: tmp_path / ".env", raising=False) + monkeypatch.setattr(status_mod, "get_hermes_home", lambda: tmp_path, raising=False) + monkeypatch.setattr(status_mod, "load_config", lambda: {"model": "gpt-5.4"}, raising=False) + monkeypatch.setattr(status_mod, "resolve_requested_provider", lambda requested=None: "openai-codex", raising=False) + monkeypatch.setattr(status_mod, "resolve_provider", lambda requested=None, **kwargs: "openai-codex", raising=False) + monkeypatch.setattr(status_mod, "provider_label", lambda provider: "OpenAI Codex", raising=False) + monkeypatch.setattr(auth_mod, "get_nous_auth_status", lambda: {}, raising=False) + monkeypatch.setattr(auth_mod, "get_codex_auth_status", lambda: {}, raising=False) + monkeypatch.setattr(gateway_mod, "find_gateway_pids", lambda exclude_pids=None: [], raising=False) + + def _unexpected_systemctl(*args, **kwargs): + raise AssertionError("systemctl should not be called in the Termux status view") + + monkeypatch.setattr(status_mod.subprocess, "run", _unexpected_systemctl) + + status_mod.show_status(SimpleNamespace(all=False, deep=False)) + + output = capsys.readouterr().out + assert "Manager: Termux / manual process" in output + assert "Start with: hermes gateway" in output + assert "systemd (user)" not in output diff --git a/tests/hermes_cli/test_tools_config.py b/tests/hermes_cli/test_tools_config.py index 7371c89df7..830bad8d5f 100644 --- a/tests/hermes_cli/test_tools_config.py +++ b/tests/hermes_cli/test_tools_config.py @@ -354,6 +354,14 @@ def test_first_install_nous_auto_configures_managed_defaults(monkeypatch): lambda *args, **kwargs: {"web", "image_gen", "tts", "browser"}, ) monkeypatch.setattr("hermes_cli.tools_config.save_config", lambda config: None) + # Prevent leaked platform tokens (e.g. DISCORD_BOT_TOKEN from gateway.run + # import) from adding extra platforms. The loop in tools_command runs + # apply_nous_managed_defaults per platform; a second iteration sees values + # set by the first as "explicit" and skips them. + monkeypatch.setattr( + "hermes_cli.tools_config._get_enabled_platforms", + lambda: ["cli"], + ) monkeypatch.setattr( "hermes_cli.nous_subscription.get_nous_auth_status", lambda: {"logged_in": True}, diff --git a/tests/hermes_cli/test_update_gateway_restart.py b/tests/hermes_cli/test_update_gateway_restart.py index 9366c06cf6..ceb05f65c9 100644 --- a/tests/hermes_cli/test_update_gateway_restart.py +++ b/tests/hermes_cli/test_update_gateway_restart.py @@ -368,6 +368,8 @@ class TestCmdUpdateLaunchdRestart: monkeypatch.setattr( gateway_cli, "is_macos", lambda: False, ) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) mock_run.side_effect = _make_run_side_effect( commit_count="3", @@ -426,7 +428,8 @@ class TestCmdUpdateSystemService: ): """When user systemd is inactive but a system service exists, restart via system scope.""" monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) mock_run.side_effect = _make_run_side_effect( commit_count="3", @@ -455,7 +458,8 @@ class TestCmdUpdateSystemService: ): """When system service restart fails, show the failure message.""" monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) mock_run.side_effect = _make_run_side_effect( commit_count="3", @@ -477,7 +481,8 @@ class TestCmdUpdateSystemService: ): """When both user and system services are active, both are restarted.""" monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) mock_run.side_effect = _make_run_side_effect( commit_count="3", @@ -560,7 +565,8 @@ class TestServicePidExclusion: ): """After systemd restart, the sweep must exclude the service PID.""" monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) SERVICE_PID = 55000 @@ -639,7 +645,8 @@ class TestGetServicePids: """Unit tests for _get_service_pids().""" def test_returns_systemd_main_pid(self, monkeypatch): - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) def fake_run(cmd, **kwargs): @@ -688,7 +695,8 @@ class TestGetServicePids: def test_excludes_zero_pid(self, monkeypatch): """systemd returns MainPID=0 for stopped services; skip those.""" - monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + monkeypatch.setattr(gateway_cli, "supports_systemd_services", lambda: True) + monkeypatch.setattr(gateway_cli, "is_termux", lambda: False) monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) def fake_run(cmd, **kwargs): diff --git a/tests/run_agent/test_run_agent.py b/tests/run_agent/test_run_agent.py index 98d799ae43..11024820a9 100644 --- a/tests/run_agent/test_run_agent.py +++ b/tests/run_agent/test_run_agent.py @@ -5,6 +5,7 @@ pieces. The OpenAI client and tool loading are mocked so no network calls are made. """ +import io import json import logging import re @@ -1061,6 +1062,77 @@ class TestExecuteToolCalls: assert len(messages[0]["content"]) < 150_000 assert ("Truncated" in messages[0]["content"] or "" in messages[0]["content"]) + def test_quiet_tool_output_suppressed_when_progress_callback_present(self, agent): + tc = _mock_tool_call(name="web_search", arguments='{"q":"test"}', call_id="c1") + mock_msg = _mock_assistant_msg(content="", tool_calls=[tc]) + messages = [] + agent.tool_progress_callback = lambda *args, **kwargs: None + + with patch("run_agent.handle_function_call", return_value="search result"), \ + patch.object(agent, "_safe_print") as mock_print: + agent._execute_tool_calls(mock_msg, messages, "task-1") + + mock_print.assert_not_called() + assert len(messages) == 1 + assert messages[0]["role"] == "tool" + + def test_quiet_tool_output_prints_without_progress_callback(self, agent): + tc = _mock_tool_call(name="web_search", arguments='{"q":"test"}', call_id="c1") + mock_msg = _mock_assistant_msg(content="", tool_calls=[tc]) + messages = [] + agent.tool_progress_callback = None + + with patch("run_agent.handle_function_call", return_value="search result"), \ + patch.object(agent, "_safe_print") as mock_print: + agent._execute_tool_calls(mock_msg, messages, "task-1") + + mock_print.assert_called_once() + assert "search" in str(mock_print.call_args.args[0]).lower() + assert len(messages) == 1 + assert messages[0]["role"] == "tool" + + def test_vprint_suppressed_in_parseable_quiet_mode(self, agent): + agent.suppress_status_output = True + + with patch.object(agent, "_safe_print") as mock_print: + agent._vprint("status line", force=True) + agent._vprint("normal line") + + mock_print.assert_not_called() + + def test_run_conversation_suppresses_retry_noise_in_parseable_quiet_mode(self, agent): + class _RateLimitError(Exception): + status_code = 429 + + def __str__(self): + return "Error code: 429 - Rate limit exceeded." + + responses = [_RateLimitError(), _mock_response(content="Recovered")] + + def _fake_api_call(api_kwargs): + result = responses.pop(0) + if isinstance(result, Exception): + raise result + return result + + agent.suppress_status_output = True + agent._interruptible_api_call = _fake_api_call + agent._persist_session = lambda *args, **kwargs: None + agent._save_trajectory = lambda *args, **kwargs: None + agent._save_session_log = lambda *args, **kwargs: None + + captured = io.StringIO() + agent._print_fn = lambda *args, **kw: print(*args, file=captured, **kw) + + with patch("run_agent.time.sleep", return_value=None): + result = agent.run_conversation("hello") + + assert result["completed"] is True + assert result["final_response"] == "Recovered" + output = captured.getvalue() + assert "API call failed" not in output + assert "Rate limit reached" not in output + class TestConcurrentToolExecution: """Tests for _execute_tool_calls_concurrent and dispatch logic.""" diff --git a/tests/skills/test_google_oauth_setup.py b/tests/skills/test_google_oauth_setup.py index a96e3d24ec..89612b7df8 100644 --- a/tests/skills/test_google_oauth_setup.py +++ b/tests/skills/test_google_oauth_setup.py @@ -211,14 +211,15 @@ class TestExchangeAuthCode: assert setup_module.PENDING_AUTH_PATH.exists() assert not setup_module.TOKEN_PATH.exists() - def test_refuses_to_overwrite_existing_token_with_narrower_scopes(self, setup_module, capsys): + def test_accepts_narrower_scopes_with_warning(self, setup_module, capsys): + """Partial scopes are accepted with a warning (gws migration: v2.0).""" setup_module.PENDING_AUTH_PATH.write_text( json.dumps({"state": "saved-state", "code_verifier": "saved-verifier"}) ) - setup_module.TOKEN_PATH.write_text(json.dumps({"token": "existing-token", "scopes": setup_module.SCOPES})) + setup_module.TOKEN_PATH.write_text(json.dumps({"token": "***", "scopes": setup_module.SCOPES})) FakeFlow.credentials_payload = { - "token": "narrow-token", - "refresh_token": "refresh-token", + "token": "***", + "refresh_token": "***", "token_uri": "https://oauth2.googleapis.com/token", "client_id": "client-id", "client_secret": "client-secret", @@ -228,10 +229,12 @@ class TestExchangeAuthCode: ], } - with pytest.raises(SystemExit): - setup_module.exchange_auth_code("4/test-auth-code") + setup_module.exchange_auth_code("4/test-auth-code") out = capsys.readouterr().out - assert "refusing to save incomplete google workspace token" in out.lower() - assert json.loads(setup_module.TOKEN_PATH.read_text())["token"] == "existing-token" - assert setup_module.PENDING_AUTH_PATH.exists() + assert "warning" in out.lower() + assert "missing" in out.lower() + # Token is saved (partial scopes accepted) + assert setup_module.TOKEN_PATH.exists() + # Pending auth is cleaned up + assert not setup_module.PENDING_AUTH_PATH.exists() diff --git a/tests/skills/test_google_workspace_api.py b/tests/skills/test_google_workspace_api.py index 694bf49212..034dd29c08 100644 --- a/tests/skills/test_google_workspace_api.py +++ b/tests/skills/test_google_workspace_api.py @@ -1,117 +1,175 @@ -"""Regression tests for Google Workspace API credential validation.""" +"""Tests for Google Workspace gws bridge and CLI wrapper.""" import importlib.util import json +import os +import subprocess import sys import types +from datetime import datetime, timedelta, timezone from pathlib import Path +from unittest.mock import MagicMock, patch import pytest -SCRIPT_PATH = ( +BRIDGE_PATH = ( + Path(__file__).resolve().parents[2] + / "skills/productivity/google-workspace/scripts/gws_bridge.py" +) +API_PATH = ( Path(__file__).resolve().parents[2] / "skills/productivity/google-workspace/scripts/google_api.py" ) -class FakeAuthorizedCredentials: - def __init__(self, *, valid=True, expired=False, refresh_token="refresh-token"): - self.valid = valid - self.expired = expired - self.refresh_token = refresh_token - self.refresh_calls = 0 - - def refresh(self, _request): - self.refresh_calls += 1 - self.valid = True - self.expired = False - - def to_json(self): - return json.dumps({ - "token": "refreshed-token", - "refresh_token": self.refresh_token, - "token_uri": "https://oauth2.googleapis.com/token", - "client_id": "client-id", - "client_secret": "client-secret", - "scopes": [ - "https://www.googleapis.com/auth/gmail.readonly", - "https://www.googleapis.com/auth/gmail.send", - "https://www.googleapis.com/auth/gmail.modify", - "https://www.googleapis.com/auth/calendar", - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/contacts.readonly", - "https://www.googleapis.com/auth/spreadsheets", - "https://www.googleapis.com/auth/documents.readonly", - ], - }) - - -class FakeCredentialsFactory: - creds = FakeAuthorizedCredentials() - - @classmethod - def from_authorized_user_file(cls, _path, _scopes): - return cls.creds - - @pytest.fixture -def google_api_module(monkeypatch, tmp_path): - google_module = types.ModuleType("google") - oauth2_module = types.ModuleType("google.oauth2") - credentials_module = types.ModuleType("google.oauth2.credentials") - credentials_module.Credentials = FakeCredentialsFactory - auth_module = types.ModuleType("google.auth") - transport_module = types.ModuleType("google.auth.transport") - requests_module = types.ModuleType("google.auth.transport.requests") - requests_module.Request = object +def bridge_module(monkeypatch, tmp_path): + hermes_home = tmp_path / ".hermes" + hermes_home.mkdir() + monkeypatch.setenv("HERMES_HOME", str(hermes_home)) - monkeypatch.setitem(sys.modules, "google", google_module) - monkeypatch.setitem(sys.modules, "google.oauth2", oauth2_module) - monkeypatch.setitem(sys.modules, "google.oauth2.credentials", credentials_module) - monkeypatch.setitem(sys.modules, "google.auth", auth_module) - monkeypatch.setitem(sys.modules, "google.auth.transport", transport_module) - monkeypatch.setitem(sys.modules, "google.auth.transport.requests", requests_module) - - spec = importlib.util.spec_from_file_location("google_workspace_api_test", SCRIPT_PATH) + spec = importlib.util.spec_from_file_location("gws_bridge_test", BRIDGE_PATH) module = importlib.util.module_from_spec(spec) assert spec.loader is not None spec.loader.exec_module(module) - - monkeypatch.setattr(module, "TOKEN_PATH", tmp_path / "google_token.json") return module -def _write_token(path: Path, scopes): - path.write_text(json.dumps({ - "token": "access-token", - "refresh_token": "refresh-token", +@pytest.fixture +def api_module(monkeypatch, tmp_path): + hermes_home = tmp_path / ".hermes" + hermes_home.mkdir() + monkeypatch.setenv("HERMES_HOME", str(hermes_home)) + + spec = importlib.util.spec_from_file_location("gws_api_test", API_PATH) + module = importlib.util.module_from_spec(spec) + assert spec.loader is not None + spec.loader.exec_module(module) + return module + + +def _write_token(path: Path, *, token="ya29.test", expiry=None, **extra): + data = { + "token": token, + "refresh_token": "1//refresh", + "client_id": "123.apps.googleusercontent.com", + "client_secret": "secret", "token_uri": "https://oauth2.googleapis.com/token", - "client_id": "client-id", - "client_secret": "client-secret", - "scopes": scopes, - })) + **extra, + } + if expiry is not None: + data["expiry"] = expiry + path.write_text(json.dumps(data)) -def test_get_credentials_rejects_missing_scopes(google_api_module, capsys): - FakeCredentialsFactory.creds = FakeAuthorizedCredentials(valid=True) - _write_token(google_api_module.TOKEN_PATH, [ - "https://www.googleapis.com/auth/drive.readonly", - "https://www.googleapis.com/auth/spreadsheets", - ]) +def test_bridge_returns_valid_token(bridge_module, tmp_path): + """Non-expired token is returned without refresh.""" + future = (datetime.now(timezone.utc) + timedelta(hours=1)).isoformat() + token_path = bridge_module.get_token_path() + _write_token(token_path, token="ya29.valid", expiry=future) + result = bridge_module.get_valid_token() + assert result == "ya29.valid" + + +def test_bridge_refreshes_expired_token(bridge_module, tmp_path): + """Expired token triggers a refresh via token_uri.""" + past = (datetime.now(timezone.utc) - timedelta(hours=1)).isoformat() + token_path = bridge_module.get_token_path() + _write_token(token_path, token="ya29.old", expiry=past) + + mock_resp = MagicMock() + mock_resp.read.return_value = json.dumps({ + "access_token": "ya29.refreshed", + "expires_in": 3600, + }).encode() + mock_resp.__enter__ = lambda s: s + mock_resp.__exit__ = MagicMock(return_value=False) + + with patch("urllib.request.urlopen", return_value=mock_resp): + result = bridge_module.get_valid_token() + + assert result == "ya29.refreshed" + # Verify persisted + saved = json.loads(token_path.read_text()) + assert saved["token"] == "ya29.refreshed" + + +def test_bridge_exits_on_missing_token(bridge_module): + """Missing token file causes exit with code 1.""" with pytest.raises(SystemExit): - google_api_module.get_credentials() - - err = capsys.readouterr().err - assert "missing google workspace scopes" in err.lower() - assert "gmail.send" in err + bridge_module.get_valid_token() -def test_get_credentials_accepts_full_scope_token(google_api_module): - FakeCredentialsFactory.creds = FakeAuthorizedCredentials(valid=True) - _write_token(google_api_module.TOKEN_PATH, list(google_api_module.SCOPES)) +def test_bridge_main_injects_token_env(bridge_module, tmp_path): + """main() sets GOOGLE_WORKSPACE_CLI_TOKEN in subprocess env.""" + future = (datetime.now(timezone.utc) + timedelta(hours=1)).isoformat() + token_path = bridge_module.get_token_path() + _write_token(token_path, token="ya29.injected", expiry=future) - creds = google_api_module.get_credentials() + captured = {} - assert creds is FakeCredentialsFactory.creds + def capture_run(cmd, **kwargs): + captured["cmd"] = cmd + captured["env"] = kwargs.get("env", {}) + return MagicMock(returncode=0) + + with patch.object(sys, "argv", ["gws_bridge.py", "gmail", "+triage"]): + with patch.object(subprocess, "run", side_effect=capture_run): + with pytest.raises(SystemExit): + bridge_module.main() + + assert captured["env"]["GOOGLE_WORKSPACE_CLI_TOKEN"] == "ya29.injected" + assert captured["cmd"] == ["gws", "gmail", "+triage"] + + +def test_api_calendar_list_uses_agenda_by_default(api_module): + """calendar list without dates uses +agenda helper.""" + captured = {} + + def capture_run(cmd, **kwargs): + captured["cmd"] = cmd + return MagicMock(returncode=0) + + args = api_module.argparse.Namespace( + start="", end="", max=25, calendar="primary", func=api_module.calendar_list, + ) + + with patch.object(subprocess, "run", side_effect=capture_run): + with pytest.raises(SystemExit): + api_module.calendar_list(args) + + gws_args = captured["cmd"][2:] # skip python + bridge path + assert "calendar" in gws_args + assert "+agenda" in gws_args + assert "--days" in gws_args + + +def test_api_calendar_list_respects_date_range(api_module): + """calendar list with --start/--end uses raw events list API.""" + captured = {} + + def capture_run(cmd, **kwargs): + captured["cmd"] = cmd + return MagicMock(returncode=0) + + args = api_module.argparse.Namespace( + start="2026-04-01T00:00:00Z", + end="2026-04-07T23:59:59Z", + max=25, + calendar="primary", + func=api_module.calendar_list, + ) + + with patch.object(subprocess, "run", side_effect=capture_run): + with pytest.raises(SystemExit): + api_module.calendar_list(args) + + gws_args = captured["cmd"][2:] + assert "events" in gws_args + assert "list" in gws_args + params_idx = gws_args.index("--params") + params = json.loads(gws_args[params_idx + 1]) + assert params["timeMin"] == "2026-04-01T00:00:00Z" + assert params["timeMax"] == "2026-04-07T23:59:59Z" diff --git a/tests/test_ctx_halving_fix.py b/tests/test_ctx_halving_fix.py new file mode 100644 index 0000000000..1ba423c8ff --- /dev/null +++ b/tests/test_ctx_halving_fix.py @@ -0,0 +1,319 @@ +"""Tests for the context-halving bugfix. + +Background +---------- +When the API returns "max_tokens too large given prompt" (input is fine, +but input_tokens + requested max_tokens > context_window), the old code +incorrectly halved context_length via get_next_probe_tier(). + +The fix introduces: + * parse_available_output_tokens_from_error() — detects this specific + error class and returns the available output token budget. + * _ephemeral_max_output_tokens on AIAgent — a one-shot override that + caps the output for one retry without touching context_length. + +Naming note +----------- + max_tokens = OUTPUT token cap (a single response). + context_length = TOTAL context window (input + output combined). +These are different and the old code conflated them; the fix keeps them +separate. +""" + +import sys +import os +from unittest.mock import MagicMock, patch, PropertyMock + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import pytest + + +# --------------------------------------------------------------------------- +# parse_available_output_tokens_from_error — unit tests +# --------------------------------------------------------------------------- + +class TestParseAvailableOutputTokens: + """Pure-function tests; no I/O required.""" + + def _parse(self, msg): + from agent.model_metadata import parse_available_output_tokens_from_error + return parse_available_output_tokens_from_error(msg) + + # ── Should detect and extract ──────────────────────────────────────── + + def test_anthropic_canonical_format(self): + """Canonical Anthropic error: max_tokens: X > context_window: Y - input_tokens: Z = available_tokens: W""" + msg = ( + "max_tokens: 32768 > context_window: 200000 " + "- input_tokens: 190000 = available_tokens: 10000" + ) + assert self._parse(msg) == 10000 + + def test_anthropic_format_large_numbers(self): + msg = ( + "max_tokens: 128000 > context_window: 200000 " + "- input_tokens: 180000 = available_tokens: 20000" + ) + assert self._parse(msg) == 20000 + + def test_available_tokens_variant_spacing(self): + """Handles extra spaces around the colon.""" + msg = "max_tokens: 32768 > 200000 available_tokens : 5000" + assert self._parse(msg) == 5000 + + def test_available_tokens_natural_language(self): + """'available tokens: N' wording (no underscore).""" + msg = "max_tokens must be at most 10000 given your prompt (available tokens: 10000)" + assert self._parse(msg) == 10000 + + def test_single_token_available(self): + """Edge case: only 1 token left.""" + msg = "max_tokens: 9999 > context_window: 10000 - input_tokens: 9999 = available_tokens: 1" + assert self._parse(msg) == 1 + + # ── Should NOT detect (returns None) ───────────────────────────────── + + def test_prompt_too_long_is_not_output_cap_error(self): + """'prompt is too long' errors must NOT be caught — they need context halving.""" + msg = "prompt is too long: 205000 tokens > 200000 maximum" + assert self._parse(msg) is None + + def test_generic_context_window_exceeded(self): + """Generic context window errors without available_tokens should not match.""" + msg = "context window exceeded: maximum is 32768 tokens" + assert self._parse(msg) is None + + def test_context_length_exceeded(self): + msg = "context_length_exceeded: prompt has 131073 tokens, limit is 131072" + assert self._parse(msg) is None + + def test_no_max_tokens_keyword(self): + """Error not related to max_tokens at all.""" + msg = "invalid_api_key: the API key is invalid" + assert self._parse(msg) is None + + def test_empty_string(self): + assert self._parse("") is None + + def test_rate_limit_error(self): + msg = "rate_limit_error: too many requests per minute" + assert self._parse(msg) is None + + +# --------------------------------------------------------------------------- +# build_anthropic_kwargs — output cap clamping +# --------------------------------------------------------------------------- + +class TestBuildAnthropicKwargsClamping: + """The context_length clamp only fires when output ceiling > window. + For standard Anthropic models (output ceiling < window) it must not fire. + """ + + def _build(self, model, max_tokens=None, context_length=None): + from agent.anthropic_adapter import build_anthropic_kwargs + return build_anthropic_kwargs( + model=model, + messages=[{"role": "user", "content": "hi"}], + tools=None, + max_tokens=max_tokens, + reasoning_config=None, + context_length=context_length, + ) + + def test_no_clamping_when_output_ceiling_fits_in_window(self): + """Opus 4.6 native output (128K) < context window (200K) — no clamping.""" + kwargs = self._build("claude-opus-4-6", context_length=200_000) + assert kwargs["max_tokens"] == 128_000 + + def test_clamping_fires_for_tiny_custom_window(self): + """When context_length is 8K (local model), output cap is clamped to 7999.""" + kwargs = self._build("claude-opus-4-6", context_length=8_000) + assert kwargs["max_tokens"] == 7_999 + + def test_explicit_max_tokens_respected_when_within_window(self): + """Explicit max_tokens smaller than window passes through unchanged.""" + kwargs = self._build("claude-opus-4-6", max_tokens=4096, context_length=200_000) + assert kwargs["max_tokens"] == 4096 + + def test_explicit_max_tokens_clamped_when_exceeds_window(self): + """Explicit max_tokens larger than a small window is clamped.""" + kwargs = self._build("claude-opus-4-6", max_tokens=32_768, context_length=16_000) + assert kwargs["max_tokens"] == 15_999 + + def test_no_context_length_uses_native_ceiling(self): + """Without context_length the native output ceiling is used directly.""" + kwargs = self._build("claude-sonnet-4-6") + assert kwargs["max_tokens"] == 64_000 + + +# --------------------------------------------------------------------------- +# Ephemeral max_tokens mechanism — _build_api_kwargs +# --------------------------------------------------------------------------- + +class TestEphemeralMaxOutputTokens: + """_build_api_kwargs consumes _ephemeral_max_output_tokens exactly once + and falls back to self.max_tokens on subsequent calls. + """ + + def _make_agent(self): + """Return a minimal AIAgent with api_mode='anthropic_messages' and + a stubbed context_compressor, bypassing full __init__ cost.""" + from run_agent import AIAgent + agent = object.__new__(AIAgent) + # Minimal attributes used by _build_api_kwargs + agent.api_mode = "anthropic_messages" + agent.model = "claude-opus-4-6" + agent.tools = [] + agent.max_tokens = None + agent.reasoning_config = None + agent._is_anthropic_oauth = False + agent._ephemeral_max_output_tokens = None + + compressor = MagicMock() + compressor.context_length = 200_000 + agent.context_compressor = compressor + + # Stub out the internal message-preparation helper + agent._prepare_anthropic_messages_for_api = MagicMock( + return_value=[{"role": "user", "content": "hi"}] + ) + agent._anthropic_preserve_dots = MagicMock(return_value=False) + return agent + + def test_ephemeral_override_is_used_on_first_call(self): + """When _ephemeral_max_output_tokens is set, it overrides self.max_tokens.""" + agent = self._make_agent() + agent._ephemeral_max_output_tokens = 5_000 + + kwargs = agent._build_api_kwargs([{"role": "user", "content": "hi"}]) + assert kwargs["max_tokens"] == 5_000 + + def test_ephemeral_override_is_consumed_after_one_call(self): + """After one call the ephemeral override is cleared to None.""" + agent = self._make_agent() + agent._ephemeral_max_output_tokens = 5_000 + + agent._build_api_kwargs([{"role": "user", "content": "hi"}]) + assert agent._ephemeral_max_output_tokens is None + + def test_subsequent_call_uses_self_max_tokens(self): + """A second _build_api_kwargs call uses the normal max_tokens path.""" + agent = self._make_agent() + agent._ephemeral_max_output_tokens = 5_000 + agent.max_tokens = None # will resolve to native ceiling (128K for Opus 4.6) + + agent._build_api_kwargs([{"role": "user", "content": "hi"}]) + # Second call — ephemeral is gone + kwargs2 = agent._build_api_kwargs([{"role": "user", "content": "hi"}]) + assert kwargs2["max_tokens"] == 128_000 # Opus 4.6 native ceiling + + def test_no_ephemeral_uses_self_max_tokens_directly(self): + """Without an ephemeral override, self.max_tokens is used normally.""" + agent = self._make_agent() + agent.max_tokens = 8_192 + + kwargs = agent._build_api_kwargs([{"role": "user", "content": "hi"}]) + assert kwargs["max_tokens"] == 8_192 + + +# --------------------------------------------------------------------------- +# Integration: error handler does NOT halve context_length for output-cap errors +# --------------------------------------------------------------------------- + +class TestContextNotHalvedOnOutputCapError: + """When the API returns 'max_tokens too large given prompt', the handler + must set _ephemeral_max_output_tokens and NOT modify context_length. + """ + + def _make_agent_with_compressor(self, context_length=200_000): + from run_agent import AIAgent + from agent.context_compressor import ContextCompressor + + agent = object.__new__(AIAgent) + agent.api_mode = "anthropic_messages" + agent.model = "claude-opus-4-6" + agent.base_url = "https://api.anthropic.com" + agent.tools = [] + agent.max_tokens = None + agent.reasoning_config = None + agent._is_anthropic_oauth = False + agent._ephemeral_max_output_tokens = None + agent.log_prefix = "" + agent.quiet_mode = True + agent.verbose_logging = False + + compressor = MagicMock(spec=ContextCompressor) + compressor.context_length = context_length + compressor.threshold_percent = 0.75 + agent.context_compressor = compressor + + agent._prepare_anthropic_messages_for_api = MagicMock( + return_value=[{"role": "user", "content": "hi"}] + ) + agent._anthropic_preserve_dots = MagicMock(return_value=False) + agent._vprint = MagicMock() + return agent + + def test_output_cap_error_sets_ephemeral_not_context_length(self): + """On 'max_tokens too large' error, _ephemeral_max_output_tokens is set + and compressor.context_length is left unchanged.""" + from agent.model_metadata import parse_available_output_tokens_from_error + from agent.model_metadata import get_next_probe_tier + + error_msg = ( + "max_tokens: 128000 > context_window: 200000 " + "- input_tokens: 180000 = available_tokens: 20000" + ) + + # Simulate the handler logic from run_agent.py + agent = self._make_agent_with_compressor(context_length=200_000) + old_ctx = agent.context_compressor.context_length + + available_out = parse_available_output_tokens_from_error(error_msg) + assert available_out == 20_000, "parser must detect the error" + + # The fix: set ephemeral, skip context_length modification + agent._ephemeral_max_output_tokens = max(1, available_out - 64) + + # context_length must be untouched + assert agent.context_compressor.context_length == old_ctx + assert agent._ephemeral_max_output_tokens == 19_936 + + def test_prompt_too_long_still_triggers_probe_tier(self): + """Genuine prompt-too-long errors must still use get_next_probe_tier.""" + from agent.model_metadata import parse_available_output_tokens_from_error + from agent.model_metadata import get_next_probe_tier + + error_msg = "prompt is too long: 205000 tokens > 200000 maximum" + + available_out = parse_available_output_tokens_from_error(error_msg) + assert available_out is None, "prompt-too-long must not be caught by output-cap parser" + + # The old halving path is still used for this class of error + new_ctx = get_next_probe_tier(200_000) + assert new_ctx == 128_000 + + def test_output_cap_error_safety_margin(self): + """The ephemeral value includes a 64-token safety margin below available_out.""" + from agent.model_metadata import parse_available_output_tokens_from_error + + error_msg = ( + "max_tokens: 32768 > context_window: 200000 " + "- input_tokens: 190000 = available_tokens: 10000" + ) + available_out = parse_available_output_tokens_from_error(error_msg) + safe_out = max(1, available_out - 64) + assert safe_out == 9_936 + + def test_safety_margin_never_goes_below_one(self): + """When available_out is very small, safe_out must be at least 1.""" + from agent.model_metadata import parse_available_output_tokens_from_error + + error_msg = ( + "max_tokens: 10 > context_window: 200000 " + "- input_tokens: 199990 = available_tokens: 1" + ) + available_out = parse_available_output_tokens_from_error(error_msg) + safe_out = max(1, available_out - 64) + assert safe_out == 1 diff --git a/tests/test_hermes_logging.py b/tests/test_hermes_logging.py index 5b40e63233..80a23dc688 100644 --- a/tests/test_hermes_logging.py +++ b/tests/test_hermes_logging.py @@ -2,6 +2,7 @@ import logging import os +import stat from logging.handlers import RotatingFileHandler from pathlib import Path from unittest.mock import patch @@ -300,6 +301,59 @@ class TestAddRotatingHandler: logger.removeHandler(h) h.close() + def test_managed_mode_initial_open_sets_group_writable(self, tmp_path): + log_path = tmp_path / "managed-open.log" + logger = logging.getLogger("_test_rotating_managed_open") + formatter = logging.Formatter("%(message)s") + + old_umask = os.umask(0o022) + try: + with patch("hermes_cli.config.is_managed", return_value=True): + hermes_logging._add_rotating_handler( + logger, log_path, + level=logging.INFO, max_bytes=1024, backup_count=1, + formatter=formatter, + ) + finally: + os.umask(old_umask) + + assert log_path.exists() + assert stat.S_IMODE(log_path.stat().st_mode) == 0o660 + + for h in list(logger.handlers): + if isinstance(h, RotatingFileHandler): + logger.removeHandler(h) + h.close() + + def test_managed_mode_rollover_sets_group_writable(self, tmp_path): + log_path = tmp_path / "managed-rollover.log" + logger = logging.getLogger("_test_rotating_managed_rollover") + formatter = logging.Formatter("%(message)s") + + old_umask = os.umask(0o022) + try: + with patch("hermes_cli.config.is_managed", return_value=True): + hermes_logging._add_rotating_handler( + logger, log_path, + level=logging.INFO, max_bytes=1, backup_count=1, + formatter=formatter, + ) + handler = next( + h for h in logger.handlers if isinstance(h, RotatingFileHandler) + ) + logger.info("a" * 256) + handler.flush() + finally: + os.umask(old_umask) + + assert log_path.exists() + assert stat.S_IMODE(log_path.stat().st_mode) == 0o660 + + for h in list(logger.handlers): + if isinstance(h, RotatingFileHandler): + logger.removeHandler(h) + h.close() + class TestReadLoggingConfig: """_read_logging_config() reads from config.yaml.""" diff --git a/tests/tools/test_browser_camofox_state.py b/tests/tools/test_browser_camofox_state.py index 7fe4c3d4c2..b1f128ccee 100644 --- a/tests/tools/test_browser_camofox_state.py +++ b/tests/tools/test_browser_camofox_state.py @@ -63,4 +63,4 @@ class TestCamofoxConfigDefaults: from hermes_cli.config import DEFAULT_CONFIG # managed_persistence is auto-merged by _deep_merge, no version bump needed - assert DEFAULT_CONFIG["_config_version"] == 12 + assert DEFAULT_CONFIG["_config_version"] == 13 diff --git a/tests/tools/test_browser_homebrew_paths.py b/tests/tools/test_browser_homebrew_paths.py index 33b725604c..6f92e88f98 100644 --- a/tests/tools/test_browser_homebrew_paths.py +++ b/tests/tools/test_browser_homebrew_paths.py @@ -13,6 +13,7 @@ from tools.browser_tool import ( _find_agent_browser, _run_browser_command, _SANE_PATH, + check_browser_requirements, ) @@ -149,6 +150,31 @@ class TestFindAgentBrowser: _find_agent_browser() +class TestBrowserRequirements: + def test_termux_requires_real_agent_browser_install_not_npx_fallback(self, monkeypatch): + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.setattr("tools.browser_tool._is_camofox_mode", lambda: False) + monkeypatch.setattr("tools.browser_tool._get_cloud_provider", lambda: None) + monkeypatch.setattr("tools.browser_tool._find_agent_browser", lambda: "npx agent-browser") + + assert check_browser_requirements() is False + + +class TestRunBrowserCommandTermuxFallback: + def test_termux_local_mode_rejects_bare_npx_fallback(self, monkeypatch): + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.setattr("tools.browser_tool._find_agent_browser", lambda: "npx agent-browser") + monkeypatch.setattr("tools.browser_tool._get_cloud_provider", lambda: None) + + result = _run_browser_command("task-1", "navigate", ["https://example.com"]) + + assert result["success"] is False + assert "bare npx fallback" in result["error"] + assert "agent-browser install" in result["error"] + + class TestRunBrowserCommandPathConstruction: """Verify _run_browser_command() includes Homebrew node dirs in subprocess PATH.""" diff --git a/tests/tools/test_code_execution.py b/tests/tools/test_code_execution.py index 5ac3fd8726..33653c3607 100644 --- a/tests/tools/test_code_execution.py +++ b/tests/tools/test_code_execution.py @@ -44,6 +44,7 @@ from tools.code_execution_tool import ( build_execute_code_schema, EXECUTE_CODE_SCHEMA, _TOOL_DOC_LINES, + _execute_remote, ) @@ -115,6 +116,48 @@ class TestHermesToolsGeneration(unittest.TestCase): self.assertIn("def retry(", src) self.assertIn("import json, os, socket, shlex, time", src) + def test_file_transport_uses_tempfile_fallback_for_rpc_dir(self): + src = generate_hermes_tools_module(["terminal"], transport="file") + self.assertIn("import json, os, shlex, tempfile, time", src) + self.assertIn("os.path.join(tempfile.gettempdir(), \"hermes_rpc\")", src) + self.assertNotIn('os.environ.get("HERMES_RPC_DIR", "/tmp/hermes_rpc")', src) + + +class TestExecuteCodeRemoteTempDir(unittest.TestCase): + def test_execute_remote_uses_backend_temp_dir_for_sandbox(self): + class FakeEnv: + def __init__(self): + self.commands = [] + + def get_temp_dir(self): + return "/data/data/com.termux/files/usr/tmp" + + def execute(self, command, cwd=None, timeout=None): + self.commands.append((command, cwd, timeout)) + if "command -v python3" in command: + return {"output": "OK\n"} + if "python3 script.py" in command: + return {"output": "hello\n", "returncode": 0} + return {"output": ""} + + env = FakeEnv() + fake_thread = MagicMock() + + with patch("tools.code_execution_tool._load_config", return_value={"timeout": 30, "max_tool_calls": 5}), \ + patch("tools.code_execution_tool._get_or_create_env", return_value=(env, "ssh")), \ + patch("tools.code_execution_tool._ship_file_to_remote"), \ + patch("tools.code_execution_tool.threading.Thread", return_value=fake_thread): + result = json.loads(_execute_remote("print('hello')", "task-1", ["terminal"])) + + self.assertEqual(result["status"], "success") + mkdir_cmd = env.commands[1][0] + run_cmd = next(cmd for cmd, _, _ in env.commands if "python3 script.py" in cmd) + cleanup_cmd = env.commands[-1][0] + self.assertIn("mkdir -p /data/data/com.termux/files/usr/tmp/hermes_exec_", mkdir_cmd) + self.assertIn("HERMES_RPC_DIR=/data/data/com.termux/files/usr/tmp/hermes_exec_", run_cmd) + self.assertIn("rm -rf /data/data/com.termux/files/usr/tmp/hermes_exec_", cleanup_cmd) + self.assertNotIn("mkdir -p /tmp/hermes_exec_", mkdir_cmd) + @unittest.skipIf(sys.platform == "win32", "UDS not available on Windows") class TestExecuteCode(unittest.TestCase): diff --git a/tests/tools/test_docker_environment.py b/tests/tools/test_docker_environment.py index 498ef9d506..e19229a795 100644 --- a/tests/tools/test_docker_environment.py +++ b/tests/tools/test_docker_environment.py @@ -258,28 +258,30 @@ def _make_execute_only_env(forward_env=None): def test_init_env_args_uses_hermes_dotenv_for_allowlisted_env(monkeypatch): """_build_init_env_args picks up forwarded env vars from .env file at init time.""" - env = _make_execute_only_env(["GITHUB_TOKEN"]) + # Use a var that is NOT in _HERMES_PROVIDER_ENV_BLOCKLIST (GITHUB_TOKEN + # is in the copilot provider's api_key_env_vars and gets stripped). + env = _make_execute_only_env(["DATABASE_URL"]) - monkeypatch.delenv("GITHUB_TOKEN", raising=False) - monkeypatch.setattr(docker_env, "_load_hermes_env_vars", lambda: {"GITHUB_TOKEN": "value_from_dotenv"}) + monkeypatch.delenv("DATABASE_URL", raising=False) + monkeypatch.setattr(docker_env, "_load_hermes_env_vars", lambda: {"DATABASE_URL": "value_from_dotenv"}) args = env._build_init_env_args() args_str = " ".join(args) - assert "GITHUB_TOKEN=value_from_dotenv" in args_str + assert "DATABASE_URL=value_from_dotenv" in args_str def test_init_env_args_prefers_shell_env_over_hermes_dotenv(monkeypatch): """Shell env vars take priority over .env file values in init env args.""" - env = _make_execute_only_env(["GITHUB_TOKEN"]) + env = _make_execute_only_env(["DATABASE_URL"]) - monkeypatch.setenv("GITHUB_TOKEN", "value_from_shell") - monkeypatch.setattr(docker_env, "_load_hermes_env_vars", lambda: {"GITHUB_TOKEN": "value_from_dotenv"}) + monkeypatch.setenv("DATABASE_URL", "value_from_shell") + monkeypatch.setattr(docker_env, "_load_hermes_env_vars", lambda: {"DATABASE_URL": "value_from_dotenv"}) args = env._build_init_env_args() args_str = " ".join(args) - assert "GITHUB_TOKEN=value_from_shell" in args_str + assert "DATABASE_URL=value_from_shell" in args_str assert "value_from_dotenv" not in args_str diff --git a/tests/tools/test_local_tempdir.py b/tests/tools/test_local_tempdir.py new file mode 100644 index 0000000000..5bbf3f266f --- /dev/null +++ b/tests/tools/test_local_tempdir.py @@ -0,0 +1,51 @@ +from unittest.mock import patch + +from tools.environments.local import LocalEnvironment + + +class TestLocalTempDir: + def test_uses_os_tmpdir_for_session_artifacts(self, monkeypatch): + monkeypatch.setenv("TMPDIR", "/data/data/com.termux/files/usr/tmp") + monkeypatch.delenv("TMP", raising=False) + monkeypatch.delenv("TEMP", raising=False) + + with patch.object(LocalEnvironment, "init_session", autospec=True, return_value=None): + env = LocalEnvironment(cwd=".", timeout=10) + + assert env.get_temp_dir() == "/data/data/com.termux/files/usr/tmp" + assert env._snapshot_path == f"/data/data/com.termux/files/usr/tmp/hermes-snap-{env._session_id}.sh" + assert env._cwd_file == f"/data/data/com.termux/files/usr/tmp/hermes-cwd-{env._session_id}.txt" + + def test_prefers_backend_env_tmpdir_override(self, monkeypatch): + monkeypatch.delenv("TMPDIR", raising=False) + monkeypatch.delenv("TMP", raising=False) + monkeypatch.delenv("TEMP", raising=False) + + with patch.object(LocalEnvironment, "init_session", autospec=True, return_value=None): + env = LocalEnvironment( + cwd=".", + timeout=10, + env={"TMPDIR": "/data/data/com.termux/files/home/.cache/hermes-tmp/"}, + ) + + assert env.get_temp_dir() == "/data/data/com.termux/files/home/.cache/hermes-tmp" + assert env._snapshot_path == ( + f"/data/data/com.termux/files/home/.cache/hermes-tmp/hermes-snap-{env._session_id}.sh" + ) + assert env._cwd_file == ( + f"/data/data/com.termux/files/home/.cache/hermes-tmp/hermes-cwd-{env._session_id}.txt" + ) + + def test_falls_back_to_tempfile_when_tmp_missing(self, monkeypatch): + monkeypatch.delenv("TMPDIR", raising=False) + monkeypatch.delenv("TMP", raising=False) + monkeypatch.delenv("TEMP", raising=False) + + with patch("tools.environments.local.os.path.isdir", return_value=False), \ + patch("tools.environments.local.os.access", return_value=False), \ + patch("tools.environments.local.tempfile.gettempdir", return_value="/cache/tmp"), \ + patch.object(LocalEnvironment, "init_session", autospec=True, return_value=None): + env = LocalEnvironment(cwd=".", timeout=10) + assert env.get_temp_dir() == "/cache/tmp" + assert env._snapshot_path == f"/cache/tmp/hermes-snap-{env._session_id}.sh" + assert env._cwd_file == f"/cache/tmp/hermes-cwd-{env._session_id}.txt" diff --git a/tests/tools/test_managed_server_tool_support.py b/tests/tools/test_managed_server_tool_support.py index 92cf83f5c4..5b917f3da8 100644 --- a/tests/tools/test_managed_server_tool_support.py +++ b/tests/tools/test_managed_server_tool_support.py @@ -147,7 +147,7 @@ class TestBaseEnvCompatibility: """Hermes wires parser selection through ServerManager.tool_parser.""" import ast - base_env_path = Path(__file__).parent.parent / "environments" / "hermes_base_env.py" + base_env_path = Path(__file__).parent.parent.parent / "environments" / "hermes_base_env.py" source = base_env_path.read_text() tree = ast.parse(source) @@ -171,7 +171,7 @@ class TestBaseEnvCompatibility: def test_hermes_base_env_uses_config_tool_call_parser(self): """Verify hermes_base_env uses the config field rather than a local parser instance.""" - base_env_path = Path(__file__).parent.parent / "environments" / "hermes_base_env.py" + base_env_path = Path(__file__).parent.parent.parent / "environments" / "hermes_base_env.py" source = base_env_path.read_text() assert 'tool_call_parser: str = Field(' in source diff --git a/tests/tools/test_process_registry.py b/tests/tools/test_process_registry.py index 44e3a1bd32..a61da9dd3e 100644 --- a/tests/tools/test_process_registry.py +++ b/tests/tools/test_process_registry.py @@ -135,6 +135,64 @@ class TestReadLog: assert "5 lines" in result["showing"] +# ========================================================================= +# Stdin helpers +# ========================================================================= + +class TestStdinHelpers: + def test_close_stdin_not_found(self, registry): + result = registry.close_stdin("nonexistent") + assert result["status"] == "not_found" + + def test_close_stdin_pipe_mode(self, registry): + proc = MagicMock() + proc.stdin = MagicMock() + s = _make_session() + s.process = proc + registry._running[s.id] = s + + result = registry.close_stdin(s.id) + + proc.stdin.close.assert_called_once() + assert result["status"] == "ok" + + def test_close_stdin_pty_mode(self, registry): + pty = MagicMock() + s = _make_session() + s._pty = pty + registry._running[s.id] = s + + result = registry.close_stdin(s.id) + + pty.sendeof.assert_called_once() + assert result["status"] == "ok" + + def test_close_stdin_allows_eof_driven_process_to_finish(self, registry, tmp_path): + session = registry.spawn_local( + 'python3 -c "import sys; print(sys.stdin.read().strip())"', + cwd=str(tmp_path), + use_pty=False, + ) + + try: + time.sleep(0.5) + assert registry.submit_stdin(session.id, "hello")["status"] == "ok" + assert registry.close_stdin(session.id)["status"] == "ok" + + deadline = time.time() + 5 + while time.time() < deadline: + poll = registry.poll(session.id) + if poll["status"] == "exited": + assert poll["exit_code"] == 0 + assert "hello" in poll["output_preview"] + return + time.sleep(0.2) + + pytest.fail("process did not exit after stdin was closed") + finally: + registry.kill_process(session.id) + + # ========================================================================= # List sessions # ========================================================================= @@ -282,6 +340,67 @@ class TestSpawnEnvSanitization: assert f"{_HERMES_PROVIDER_ENV_FORCE_PREFIX}TELEGRAM_BOT_TOKEN" not in env assert env["PYTHONUNBUFFERED"] == "1" + def test_spawn_via_env_uses_backend_temp_dir_for_artifacts(self, registry): + class FakeEnv: + def __init__(self): + self.commands = [] + + def get_temp_dir(self): + return "/data/data/com.termux/files/usr/tmp" + + def execute(self, command, timeout=None): + self.commands.append((command, timeout)) + return {"output": "4321\n"} + + env = FakeEnv() + fake_thread = MagicMock() + + with patch("tools.process_registry.threading.Thread", return_value=fake_thread), \ + patch.object(registry, "_write_checkpoint"): + session = registry.spawn_via_env(env, "echo hello") + + bg_command = env.commands[0][0] + assert session.pid == 4321 + assert "/data/data/com.termux/files/usr/tmp/hermes_bg_" in bg_command + assert ".exit" in bg_command + assert "rc=$?;" in bg_command + assert " > /tmp/hermes_bg_" not in bg_command + assert "cat /tmp/hermes_bg_" not in bg_command + fake_thread.start.assert_called_once() + + def test_env_poller_quotes_temp_paths_with_spaces(self, registry): + session = _make_session(sid="proc_space") + session.exited = False + + class FakeEnv: + def __init__(self): + self.commands = [] + self._responses = iter([ + {"output": "hello\n"}, + {"output": "1\n"}, + {"output": "0\n"}, + ]) + + def execute(self, command, timeout=None): + self.commands.append((command, timeout)) + return next(self._responses) + + env = FakeEnv() + + with patch("tools.process_registry.time.sleep", return_value=None), \ + patch.object(registry, "_move_to_finished"): + registry._env_poller_loop( + session, + env, + "/path with spaces/hermes_bg.log", + "/path with spaces/hermes_bg.pid", + "/path with spaces/hermes_bg.exit", + ) + + assert env.commands[0][0] == "cat '/path with spaces/hermes_bg.log' 2>/dev/null" + assert env.commands[1][0] == "kill -0 \"$(cat '/path with spaces/hermes_bg.pid' 2>/dev/null)\" 2>/dev/null; echo $?" + assert env.commands[2][0] == "cat '/path with spaces/hermes_bg.exit' 2>/dev/null" + # ========================================================================= # Checkpoint diff --git a/tests/tools/test_send_message_missing_platforms.py b/tests/tools/test_send_message_missing_platforms.py index 881ae33d2b..a6741e16dc 100644 --- a/tests/tools/test_send_message_missing_platforms.py +++ b/tests/tools/test_send_message_missing_platforms.py @@ -125,7 +125,9 @@ class TestSendMatrix: url = call_kwargs[0][0] assert url.startswith("https://matrix.example.com/_matrix/client/v3/rooms/!room:example.com/send/m.room.message/") assert call_kwargs[1]["headers"]["Authorization"] == "Bearer syt_tok" - assert call_kwargs[1]["json"] == {"msgtype": "m.text", "body": "hello matrix"} + payload = call_kwargs[1]["json"] + assert payload["msgtype"] == "m.text" + assert payload["body"] == "hello matrix" def test_http_error(self): resp = _make_aiohttp_resp(403, text_data="Forbidden") diff --git a/tests/tools/test_send_message_tool.py b/tests/tools/test_send_message_tool.py index 34cea278d7..94370e4d5b 100644 --- a/tests/tools/test_send_message_tool.py +++ b/tests/tools/test_send_message_tool.py @@ -32,6 +32,30 @@ def _install_telegram_mock(monkeypatch, bot): monkeypatch.setitem(sys.modules, "telegram.constants", constants_mod) +def _ensure_slack_mock(monkeypatch): + if "slack_bolt" in sys.modules and hasattr(sys.modules["slack_bolt"], "__file__"): + return + + slack_bolt = MagicMock() + slack_bolt.async_app.AsyncApp = MagicMock + slack_bolt.adapter.socket_mode.async_handler.AsyncSocketModeHandler = MagicMock + + slack_sdk = MagicMock() + slack_sdk.web.async_client.AsyncWebClient = MagicMock + + for name, mod in [ + ("slack_bolt", slack_bolt), + ("slack_bolt.async_app", slack_bolt.async_app), + ("slack_bolt.adapter", slack_bolt.adapter), + ("slack_bolt.adapter.socket_mode", slack_bolt.adapter.socket_mode), + ("slack_bolt.adapter.socket_mode.async_handler", slack_bolt.adapter.socket_mode.async_handler), + ("slack_sdk", slack_sdk), + ("slack_sdk.web", slack_sdk.web), + ("slack_sdk.web.async_client", slack_sdk.web.async_client), + ]: + monkeypatch.setitem(sys.modules, name, mod) + + class TestSendMessageTool: def test_cron_duplicate_target_is_skipped_and_explained(self): home = SimpleNamespace(chat_id="-1001") @@ -426,7 +450,7 @@ class TestSendToPlatformChunking: result = asyncio.run( _send_to_platform( Platform.DISCORD, - SimpleNamespace(enabled=True, token="tok", extra={}), + SimpleNamespace(enabled=True, token="***", extra={}), "ch", long_msg, ) ) @@ -435,8 +459,115 @@ class TestSendToPlatformChunking: for call in send.await_args_list: assert len(call.args[2]) <= 2020 # each chunk fits the limit + def test_slack_messages_are_formatted_before_send(self, monkeypatch): + _ensure_slack_mock(monkeypatch) + + import gateway.platforms.slack as slack_mod + + monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True) + send = AsyncMock(return_value={"success": True, "message_id": "1"}) + + with patch("tools.send_message_tool._send_slack", send): + result = asyncio.run( + _send_to_platform( + Platform.SLACK, + SimpleNamespace(enabled=True, token="***", extra={}), + "C123", + "**hello** from [Hermes]()", + ) + ) + + assert result["success"] is True + send.assert_awaited_once_with( + "***", + "C123", + "*hello* from ", + ) + + def test_slack_bold_italic_formatted_before_send(self, monkeypatch): + """Bold+italic ***text*** survives tool-layer formatting.""" + _ensure_slack_mock(monkeypatch) + import gateway.platforms.slack as slack_mod + + monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True) + send = AsyncMock(return_value={"success": True, "message_id": "1"}) + with patch("tools.send_message_tool._send_slack", send): + result = asyncio.run( + _send_to_platform( + Platform.SLACK, + SimpleNamespace(enabled=True, token="***", extra={}), + "C123", + "***important*** update", + ) + ) + assert result["success"] is True + sent_text = send.await_args.args[2] + assert "*_important_*" in sent_text + + def test_slack_blockquote_formatted_before_send(self, monkeypatch): + """Blockquote '>' markers must survive formatting (not escaped to '>').""" + _ensure_slack_mock(monkeypatch) + import gateway.platforms.slack as slack_mod + + monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True) + send = AsyncMock(return_value={"success": True, "message_id": "1"}) + with patch("tools.send_message_tool._send_slack", send): + result = asyncio.run( + _send_to_platform( + Platform.SLACK, + SimpleNamespace(enabled=True, token="***", extra={}), + "C123", + "> important quote\n\nnormal text & stuff", + ) + ) + assert result["success"] is True + sent_text = send.await_args.args[2] + assert sent_text.startswith("> important quote") + assert "&" in sent_text # & is escaped + assert ">" not in sent_text.split("\n")[0] # > in blockquote is NOT escaped + + def test_slack_pre_escaped_entities_not_double_escaped(self, monkeypatch): + """Pre-escaped HTML entities survive tool-layer formatting without double-escaping.""" + _ensure_slack_mock(monkeypatch) + import gateway.platforms.slack as slack_mod + monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True) + send = AsyncMock(return_value={"success": True, "message_id": "1"}) + with patch("tools.send_message_tool._send_slack", send): + result = asyncio.run( + _send_to_platform( + Platform.SLACK, + SimpleNamespace(enabled=True, token="***", extra={}), + "C123", + "AT&T <tag> test", + ) + ) + assert result["success"] is True + sent_text = send.await_args.args[2] + assert "&amp;" not in sent_text + assert "&lt;" not in sent_text + assert "AT&T" in sent_text + + def test_slack_url_with_parens_formatted_before_send(self, monkeypatch): + """Wikipedia-style URL with parens survives tool-layer formatting.""" + _ensure_slack_mock(monkeypatch) + import gateway.platforms.slack as slack_mod + monkeypatch.setattr(slack_mod, "SLACK_AVAILABLE", True) + send = AsyncMock(return_value={"success": True, "message_id": "1"}) + with patch("tools.send_message_tool._send_slack", send): + result = asyncio.run( + _send_to_platform( + Platform.SLACK, + SimpleNamespace(enabled=True, token="***", extra={}), + "C123", + "See [Foo](https://en.wikipedia.org/wiki/Foo_(bar))", + ) + ) + assert result["success"] is True + sent_text = send.await_args.args[2] + assert "" in sent_text + def test_telegram_media_attaches_to_last_chunk(self): - """When chunked, media files are sent only with the last chunk.""" + sent_calls = [] async def fake_send(token, chat_id, message, media_files=None, thread_id=None): diff --git a/tests/tools/test_terminal_tool_pty_fallback.py b/tests/tools/test_terminal_tool_pty_fallback.py new file mode 100644 index 0000000000..75ef721834 --- /dev/null +++ b/tests/tools/test_terminal_tool_pty_fallback.py @@ -0,0 +1,91 @@ +import json +from types import SimpleNamespace + +import tools.terminal_tool as terminal_tool_module +from tools import process_registry as process_registry_module + + +def _base_config(tmp_path): + return { + "env_type": "local", + "docker_image": "", + "singularity_image": "", + "modal_image": "", + "daytona_image": "", + "cwd": str(tmp_path), + "timeout": 30, + } + + +def test_command_requires_pipe_stdin_detects_gh_with_token(): + assert terminal_tool_module._command_requires_pipe_stdin( + "gh auth login --hostname github.com --git-protocol https --with-token" + ) is True + assert terminal_tool_module._command_requires_pipe_stdin( + "gh auth login --web" + ) is False + + +def test_terminal_background_disables_pty_for_gh_with_token(monkeypatch, tmp_path): + config = _base_config(tmp_path) + dummy_env = SimpleNamespace(env={}) + captured = {} + + def fake_spawn_local(**kwargs): + captured.update(kwargs) + return SimpleNamespace(id="proc_test", pid=1234, notify_on_complete=False) + + monkeypatch.setattr(terminal_tool_module, "_get_env_config", lambda: config) + monkeypatch.setattr(terminal_tool_module, "_start_cleanup_thread", lambda: None) + monkeypatch.setattr(terminal_tool_module, "_check_all_guards", lambda *_args, **_kwargs: {"approved": True}) + monkeypatch.setattr(process_registry_module.process_registry, "spawn_local", fake_spawn_local) + monkeypatch.setitem(terminal_tool_module._active_environments, "default", dummy_env) + monkeypatch.setitem(terminal_tool_module._last_activity, "default", 0.0) + + try: + result = json.loads( + terminal_tool_module.terminal_tool( + command="gh auth login --hostname github.com --git-protocol https --with-token", + background=True, + pty=True, + ) + ) + finally: + terminal_tool_module._active_environments.pop("default", None) + terminal_tool_module._last_activity.pop("default", None) + + assert captured["use_pty"] is False + assert result["session_id"] == "proc_test" + assert "PTY disabled" in result["pty_note"] + + +def test_terminal_background_keeps_pty_for_regular_interactive_commands(monkeypatch, tmp_path): + config = _base_config(tmp_path) + dummy_env = SimpleNamespace(env={}) + captured = {} + + def fake_spawn_local(**kwargs): + captured.update(kwargs) + return SimpleNamespace(id="proc_test", pid=1234, notify_on_complete=False) + + monkeypatch.setattr(terminal_tool_module, "_get_env_config", lambda: config) + monkeypatch.setattr(terminal_tool_module, "_start_cleanup_thread", lambda: None) + monkeypatch.setattr(terminal_tool_module, "_check_all_guards", lambda *_args, **_kwargs: {"approved": True}) + monkeypatch.setattr(process_registry_module.process_registry, "spawn_local", fake_spawn_local) + monkeypatch.setitem(terminal_tool_module._active_environments, "default", dummy_env) + monkeypatch.setitem(terminal_tool_module._last_activity, "default", 0.0) + + try: + result = json.loads( + terminal_tool_module.terminal_tool( + command="python3 -c \"print(input())\"", + background=True, + pty=True, + ) + ) + finally: + terminal_tool_module._active_environments.pop("default", None) + terminal_tool_module._last_activity.pop("default", None) + + assert captured["use_pty"] is True + assert "pty_note" not in result diff --git a/tests/tools/test_tool_result_storage.py b/tests/tools/test_tool_result_storage.py index 4e51fe7bb7..f95b5dc08a 100644 --- a/tests/tools/test_tool_result_storage.py +++ b/tests/tools/test_tool_result_storage.py @@ -16,6 +16,7 @@ from tools.tool_result_storage import ( STORAGE_DIR, _build_persisted_message, _heredoc_marker, + _resolve_storage_dir, _write_to_sandbox, enforce_turn_budget, generate_preview, @@ -115,6 +116,24 @@ class TestWriteToSandbox: _write_to_sandbox("content", "/tmp/hermes-results/abc.txt", env) assert env.execute.call_args[1]["timeout"] == 30 + def test_uses_parent_dir_of_remote_path(self): + env = MagicMock() + env.execute.return_value = {"output": "", "returncode": 0} + remote_path = "/data/data/com.termux/files/usr/tmp/hermes-results/abc.txt" + _write_to_sandbox("content", remote_path, env) + cmd = env.execute.call_args[0][0] + assert "mkdir -p /data/data/com.termux/files/usr/tmp/hermes-results" in cmd + + +class TestResolveStorageDir: + def test_defaults_to_storage_dir_without_env(self): + assert _resolve_storage_dir(None) == STORAGE_DIR + + def test_uses_env_temp_dir_when_available(self): + env = MagicMock() + env.get_temp_dir.return_value = "/data/data/com.termux/files/usr/tmp" + assert _resolve_storage_dir(env) == "/data/data/com.termux/files/usr/tmp/hermes-results" + # ── _build_persisted_message ────────────────────────────────────────── @@ -341,6 +360,22 @@ class TestMaybePersistToolResult: ) assert "DISTINCTIVE_START_MARKER" in result + def test_env_temp_dir_changes_persisted_path(self): + env = MagicMock() + env.execute.return_value = {"output": "", "returncode": 0} + env.get_temp_dir.return_value = "/data/data/com.termux/files/usr/tmp" + content = "x" * 60_000 + result = maybe_persist_tool_result( + content=content, + tool_name="terminal", + tool_use_id="tc_termux", + env=env, + threshold=30_000, + ) + assert "/data/data/com.termux/files/usr/tmp/hermes-results/tc_termux.txt" in result + cmd = env.execute.call_args[0][0] + assert "mkdir -p /data/data/com.termux/files/usr/tmp/hermes-results" in cmd + def test_threshold_zero_forces_persist(self): env = MagicMock() env.execute.return_value = {"output": "", "returncode": 0} diff --git a/tests/tools/test_vision_tools.py b/tests/tools/test_vision_tools.py index 97ee57a11a..6612f0e893 100644 --- a/tests/tools/test_vision_tools.py +++ b/tests/tools/test_vision_tools.py @@ -30,7 +30,10 @@ class TestValidateImageUrl: """Tests for URL validation, including urlparse-based netloc check.""" def test_valid_https_url(self): - assert _validate_image_url("https://example.com/image.jpg") is True + with patch("tools.url_safety.socket.getaddrinfo", return_value=[ + (2, 1, 6, "", ("93.184.216.34", 0)), + ]): + assert _validate_image_url("https://example.com/image.jpg") is True def test_valid_http_url(self): with patch("tools.url_safety.socket.getaddrinfo", return_value=[ @@ -56,10 +59,16 @@ class TestValidateImageUrl: assert _validate_image_url("http://localhost:8080/image.png") is False def test_valid_url_with_port(self): - assert _validate_image_url("http://example.com:8080/image.png") is True + with patch("tools.url_safety.socket.getaddrinfo", return_value=[ + (2, 1, 6, "", ("93.184.216.34", 0)), + ]): + assert _validate_image_url("http://example.com:8080/image.png") is True def test_valid_url_with_path_only(self): - assert _validate_image_url("https://example.com/") is True + with patch("tools.url_safety.socket.getaddrinfo", return_value=[ + (2, 1, 6, "", ("93.184.216.34", 0)), + ]): + assert _validate_image_url("https://example.com/") is True def test_rejects_empty_string(self): assert _validate_image_url("") is False @@ -441,6 +450,11 @@ class TestVisionRequirements: (tmp_path / "auth.json").write_text( '{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token":"codex-access-token","refresh_token":"codex-refresh-token"}}}}' ) + # config.yaml must reference the codex provider so vision auto-detect + # falls back to the active provider via _read_main_provider(). + (tmp_path / "config.yaml").write_text( + 'model:\n default: gpt-4o\n provider: openai-codex\n' + ) monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) monkeypatch.delenv("OPENAI_BASE_URL", raising=False) monkeypatch.delenv("OPENAI_API_KEY", raising=False) diff --git a/tests/tools/test_voice_mode.py b/tests/tools/test_voice_mode.py index 933393f85c..1d35c48625 100644 --- a/tests/tools/test_voice_mode.py +++ b/tests/tools/test_voice_mode.py @@ -183,12 +183,77 @@ class TestDetectAudioEnvironment: assert result["available"] is False assert any("PortAudio" in w for w in result["warnings"]) + def test_termux_import_error_shows_termux_install_guidance(self, monkeypatch): + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.delenv("SSH_CLIENT", raising=False) + monkeypatch.delenv("SSH_TTY", raising=False) + monkeypatch.delenv("SSH_CONNECTION", raising=False) + monkeypatch.setattr("tools.voice_mode._import_audio", lambda: (_ for _ in ()).throw(ImportError("no audio libs"))) + monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: None) + + from tools.voice_mode import detect_audio_environment + result = detect_audio_environment() + + assert result["available"] is False + assert any("pkg install python-numpy portaudio" in w for w in result["warnings"]) + assert any("python -m pip install sounddevice" in w for w in result["warnings"]) + + def test_termux_api_package_without_android_app_blocks_voice(self, monkeypatch): + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.delenv("SSH_CLIENT", raising=False) + monkeypatch.delenv("SSH_TTY", raising=False) + monkeypatch.delenv("SSH_CONNECTION", raising=False) + monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record") + monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: False) + monkeypatch.setattr("tools.voice_mode._import_audio", lambda: (_ for _ in ()).throw(ImportError("no audio libs"))) + + from tools.voice_mode import detect_audio_environment + result = detect_audio_environment() + + assert result["available"] is False + assert any("Termux:API Android app is not installed" in w for w in result["warnings"]) + + + def test_termux_api_microphone_allows_voice_without_sounddevice(self, monkeypatch): + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.delenv("SSH_CLIENT", raising=False) + monkeypatch.delenv("SSH_TTY", raising=False) + monkeypatch.delenv("SSH_CONNECTION", raising=False) + monkeypatch.setattr("tools.voice_mode.shutil.which", lambda cmd: "/data/data/com.termux/files/usr/bin/termux-microphone-record" if cmd == "termux-microphone-record" else None) + monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True) + monkeypatch.setattr("tools.voice_mode._import_audio", lambda: (_ for _ in ()).throw(ImportError("no audio libs"))) + + from tools.voice_mode import detect_audio_environment + result = detect_audio_environment() + + assert result["available"] is True + assert any("Termux:API microphone recording available" in n for n in result.get("notices", [])) + assert result["warnings"] == [] + # ============================================================================ # check_voice_requirements # ============================================================================ class TestCheckVoiceRequirements: + def test_termux_api_capture_counts_as_audio_available(self, monkeypatch): + monkeypatch.setattr("tools.voice_mode._audio_available", lambda: False) + monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record") + monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True) + monkeypatch.setattr("tools.voice_mode.detect_audio_environment", lambda: {"available": True, "warnings": [], "notices": ["Termux:API microphone recording available"]}) + monkeypatch.setattr("tools.transcription_tools._get_provider", lambda cfg: "openai") + + from tools.voice_mode import check_voice_requirements + result = check_voice_requirements() + + assert result["available"] is True + assert result["audio_available"] is True + assert result["missing_packages"] == [] + assert "Termux:API microphone" in result["details"] + def test_all_requirements_met(self, monkeypatch): monkeypatch.setattr("tools.voice_mode._audio_available", lambda: True) monkeypatch.setattr("tools.voice_mode.detect_audio_environment", @@ -235,8 +300,85 @@ class TestCheckVoiceRequirements: # AudioRecorder # ============================================================================ -class TestAudioRecorderStart: - def test_start_raises_without_audio(self, monkeypatch): +class TestCreateAudioRecorder: + def test_termux_uses_termux_audio_recorder_when_api_present(self, monkeypatch): + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record") + monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True) + + from tools.voice_mode import create_audio_recorder, TermuxAudioRecorder + recorder = create_audio_recorder() + + assert isinstance(recorder, TermuxAudioRecorder) + assert recorder.supports_silence_autostop is False + + def test_termux_without_android_app_falls_back_to_audio_recorder(self, monkeypatch): + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record") + monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: False) + + from tools.voice_mode import create_audio_recorder, AudioRecorder + recorder = create_audio_recorder() + + assert isinstance(recorder, AudioRecorder) + + +class TestTermuxAudioRecorder: + def test_start_and_stop_use_termux_microphone_commands(self, monkeypatch, temp_voice_dir): + command_calls = [] + output_path = Path(temp_voice_dir) / "recording_20260409_120000.aac" + + def fake_run(cmd, **kwargs): + command_calls.append(cmd) + if cmd[1] == "-f": + Path(cmd[2]).write_bytes(b"aac-bytes") + return MagicMock(returncode=0, stdout="", stderr="") + + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record") + monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True) + monkeypatch.setattr("tools.voice_mode.time.strftime", lambda fmt: "20260409_120000") + monkeypatch.setattr("tools.voice_mode.subprocess.run", fake_run) + + from tools.voice_mode import TermuxAudioRecorder + recorder = TermuxAudioRecorder() + recorder.start() + recorder._start_time = time.monotonic() - 1.0 + result = recorder.stop() + + assert result == str(output_path) + assert command_calls[0][:2] == ["/data/data/com.termux/files/usr/bin/termux-microphone-record", "-f"] + assert command_calls[1] == ["/data/data/com.termux/files/usr/bin/termux-microphone-record", "-q"] + + def test_cancel_removes_partial_termux_recording(self, monkeypatch, temp_voice_dir): + output_path = Path(temp_voice_dir) / "recording_20260409_120000.aac" + + def fake_run(cmd, **kwargs): + if cmd[1] == "-f": + Path(cmd[2]).write_bytes(b"aac-bytes") + return MagicMock(returncode=0, stdout="", stderr="") + + monkeypatch.setenv("TERMUX_VERSION", "0.118.3") + monkeypatch.setenv("PREFIX", "/data/data/com.termux/files/usr") + monkeypatch.setattr("tools.voice_mode._termux_microphone_command", lambda: "/data/data/com.termux/files/usr/bin/termux-microphone-record") + monkeypatch.setattr("tools.voice_mode._termux_api_app_installed", lambda: True) + monkeypatch.setattr("tools.voice_mode.time.strftime", lambda fmt: "20260409_120000") + monkeypatch.setattr("tools.voice_mode.subprocess.run", fake_run) + + from tools.voice_mode import TermuxAudioRecorder + recorder = TermuxAudioRecorder() + recorder.start() + recorder.cancel() + + assert output_path.exists() is False + assert recorder.is_recording is False + + +class TestAudioRecorder: + def test_start_raises_without_audio_libs(self, monkeypatch): def _fail_import(): raise ImportError("no sounddevice") monkeypatch.setattr("tools.voice_mode._import_audio", _fail_import) diff --git a/tests/tools/test_web_tools_tavily.py b/tests/tools/test_web_tools_tavily.py index 2e49b72f16..aef39e8e16 100644 --- a/tests/tools/test_web_tools_tavily.py +++ b/tests/tools/test_web_tools_tavily.py @@ -225,6 +225,7 @@ class TestWebCrawlTavily: patch.dict(os.environ, {"TAVILY_API_KEY": "tvly-test"}), \ patch("tools.web_tools.httpx.post", return_value=mock_response), \ patch("tools.web_tools.check_website_access", return_value=None), \ + patch("tools.web_tools.is_safe_url", return_value=True), \ patch("tools.interrupt.is_interrupted", return_value=False): from tools.web_tools import web_crawl_tool result = json.loads(asyncio.get_event_loop().run_until_complete( @@ -244,6 +245,7 @@ class TestWebCrawlTavily: patch.dict(os.environ, {"TAVILY_API_KEY": "tvly-test"}), \ patch("tools.web_tools.httpx.post", return_value=mock_response) as mock_post, \ patch("tools.web_tools.check_website_access", return_value=None), \ + patch("tools.web_tools.is_safe_url", return_value=True), \ patch("tools.interrupt.is_interrupted", return_value=False): from tools.web_tools import web_crawl_tool asyncio.get_event_loop().run_until_complete( diff --git a/tools/browser_camofox.py b/tools/browser_camofox.py index 3a305bbcb1..d0e268a4da 100644 --- a/tools/browser_camofox.py +++ b/tools/browser_camofox.py @@ -594,13 +594,20 @@ def camofox_console(clear: bool = False, task_id: Optional[str] = None) -> str: # --------------------------------------------------------------------------- def cleanup_all_camofox_sessions() -> None: - """Close all active camofox sessions.""" + """Close all active camofox sessions. + + When managed persistence is enabled, only clears local tracking state + without destroying server-side browser profiles (cookies, logins, etc. + must survive). Ephemeral sessions are fully deleted on the server. + """ + managed = _managed_persistence_enabled() with _sessions_lock: sessions = list(_sessions.items()) - for task_id, session in sessions: - try: - _delete(f"/sessions/{session['user_id']}") - except Exception: - pass + if not managed: + for _task_id, session in sessions: + try: + _delete(f"/sessions/{session['user_id']}") + except Exception: + pass with _sessions_lock: _sessions.clear() diff --git a/tools/browser_tool.py b/tools/browser_tool.py index e62a586c11..9ad8ba48b7 100644 --- a/tools/browser_tool.py +++ b/tools/browser_tool.py @@ -285,6 +285,26 @@ def _get_cloud_provider() -> Optional[CloudBrowserProvider]: return _cached_cloud_provider +from hermes_constants import is_termux as _is_termux_environment + + +def _browser_install_hint() -> str: + if _is_termux_environment(): + return "npm install -g agent-browser && agent-browser install" + return "npm install -g agent-browser && agent-browser install --with-deps" + + +def _requires_real_termux_browser_install(browser_cmd: str) -> bool: + return _is_termux_environment() and _is_local_mode() and browser_cmd.strip() == "npx agent-browser" + + +def _termux_browser_install_error() -> str: + return ( + "Local browser automation on Termux cannot rely on the bare npx fallback. " + f"Install agent-browser explicitly first: {_browser_install_hint()}" + ) + + def _is_local_mode() -> bool: """Return True when the browser tool will use a local browser backend.""" if _get_cdp_override(): @@ -796,7 +816,8 @@ def _find_agent_browser() -> str: return "npx agent-browser" raise FileNotFoundError( - "agent-browser CLI not found. Install it with: npm install -g agent-browser\n" + "agent-browser CLI not found. Install it with: " + f"{_browser_install_hint()}\n" "Or run 'npm install' in the repo root to install locally.\n" "Or ensure npx is available in your PATH." ) @@ -852,6 +873,11 @@ def _run_browser_command( except FileNotFoundError as e: logger.warning("agent-browser CLI not found: %s", e) return {"success": False, "error": str(e)} + + if _requires_real_termux_browser_install(browser_cmd): + error = _termux_browser_install_error() + logger.warning("browser command blocked on Termux: %s", error) + return {"success": False, "error": error} from tools.interrupt import is_interrupted if is_interrupted(): @@ -2040,10 +2066,17 @@ def check_browser_requirements() -> bool: # The agent-browser CLI is always required try: - _find_agent_browser() + browser_cmd = _find_agent_browser() except FileNotFoundError: return False + # On Termux, the bare npx fallback is too fragile to treat as a satisfied + # local browser dependency. Require a real install (global or local) so the + # browser tool is not advertised as available when it will likely fail on + # first use. + if _requires_real_termux_browser_install(browser_cmd): + return False + # In cloud mode, also require provider credentials provider = _get_cloud_provider() if provider is not None and not provider.is_configured(): @@ -2073,10 +2106,13 @@ if __name__ == "__main__": else: print("❌ Missing requirements:") try: - _find_agent_browser() + browser_cmd = _find_agent_browser() + if _requires_real_termux_browser_install(browser_cmd): + print(" - bare npx fallback found (insufficient on Termux local mode)") + print(f" Install: {_browser_install_hint()}") except FileNotFoundError: print(" - agent-browser CLI not found") - print(" Install: npm install -g agent-browser && agent-browser install --with-deps") + print(f" Install: {_browser_install_hint()}") if _cp is not None and not _cp.is_configured(): print(f" - {_cp.provider_name()} credentials not configured") print(" Tip: set browser.cloud_provider to 'local' to use free local mode instead") diff --git a/tools/code_execution_tool.py b/tools/code_execution_tool.py index f0d61210ff..2b9e329a3e 100644 --- a/tools/code_execution_tool.py +++ b/tools/code_execution_tool.py @@ -33,6 +33,7 @@ import json import logging import os import platform +import shlex import signal import socket import subprocess @@ -246,9 +247,9 @@ def _call(tool_name, args): _FILE_TRANSPORT_HEADER = '''\ """Auto-generated Hermes tools RPC stubs (file-based transport).""" -import json, os, shlex, time +import json, os, shlex, tempfile, time -_RPC_DIR = os.environ.get("HERMES_RPC_DIR", "/tmp/hermes_rpc") +_RPC_DIR = os.environ.get("HERMES_RPC_DIR") or os.path.join(tempfile.gettempdir(), "hermes_rpc") _seq = 0 ''' + _COMMON_HELPERS + '''\ @@ -536,13 +537,30 @@ def _ship_file_to_remote(env, remote_path: str, content: str) -> None: quotes are fine. """ encoded = base64.b64encode(content.encode("utf-8")).decode("ascii") + quoted_remote_path = shlex.quote(remote_path) env.execute( - f"echo '{encoded}' | base64 -d > {remote_path}", + f"echo '{encoded}' | base64 -d > {quoted_remote_path}", cwd="/", timeout=30, ) +def _env_temp_dir(env: Any) -> str: + """Return a writable temp dir for env-backed execute_code sandboxes.""" + get_temp_dir = getattr(env, "get_temp_dir", None) + if callable(get_temp_dir): + try: + temp_dir = get_temp_dir() + if isinstance(temp_dir, str) and temp_dir.startswith("/"): + return temp_dir.rstrip("/") or "/" + except Exception as exc: + logger.debug("Could not resolve execute_code env temp dir: %s", exc) + candidate = tempfile.gettempdir() + if isinstance(candidate, str) and candidate.startswith("/"): + return candidate.rstrip("/") or "/" + return "/tmp" + + def _rpc_poll_loop( env, rpc_dir: str, @@ -563,11 +581,12 @@ def _rpc_poll_loop( poll_interval = 0.1 # 100 ms + quoted_rpc_dir = shlex.quote(rpc_dir) while not stop_event.is_set(): try: # List pending request files (skip .tmp partials) ls_result = env.execute( - f"ls -1 {rpc_dir}/req_* 2>/dev/null || true", + f"ls -1 {quoted_rpc_dir}/req_* 2>/dev/null || true", cwd="/", timeout=10, ) @@ -589,9 +608,10 @@ def _rpc_poll_loop( call_start = time.monotonic() + quoted_req_file = shlex.quote(req_file) # Read request read_result = env.execute( - f"cat {req_file}", + f"cat {quoted_req_file}", cwd="/", timeout=10, ) @@ -600,7 +620,7 @@ def _rpc_poll_loop( except (json.JSONDecodeError, ValueError): logger.debug("Malformed RPC request in %s", req_file) # Remove bad request to avoid infinite retry - env.execute(f"rm -f {req_file}", cwd="/", timeout=5) + env.execute(f"rm -f {quoted_req_file}", cwd="/", timeout=5) continue tool_name = request.get("tool", "") @@ -608,6 +628,7 @@ def _rpc_poll_loop( seq = request.get("seq", 0) seq_str = f"{seq:06d}" res_file = f"{rpc_dir}/res_{seq_str}" + quoted_res_file = shlex.quote(res_file) # Enforce allow-list if tool_name not in allowed_tools: @@ -665,14 +686,14 @@ def _rpc_poll_loop( tool_result.encode("utf-8") ).decode("ascii") env.execute( - f"echo '{encoded_result}' | base64 -d > {res_file}.tmp" - f" && mv {res_file}.tmp {res_file}", + f"echo '{encoded_result}' | base64 -d > {quoted_res_file}.tmp" + f" && mv {quoted_res_file}.tmp {quoted_res_file}", cwd="/", timeout=60, ) # Remove the request file - env.execute(f"rm -f {req_file}", cwd="/", timeout=5) + env.execute(f"rm -f {quoted_req_file}", cwd="/", timeout=5) except Exception as e: if not stop_event.is_set(): @@ -707,7 +728,10 @@ def _execute_remote( env, env_type = _get_or_create_env(effective_task_id) sandbox_id = uuid.uuid4().hex[:12] - sandbox_dir = f"/tmp/hermes_exec_{sandbox_id}" + temp_dir = _env_temp_dir(env) + sandbox_dir = f"{temp_dir}/hermes_exec_{sandbox_id}" + quoted_sandbox_dir = shlex.quote(sandbox_dir) + quoted_rpc_dir = shlex.quote(f"{sandbox_dir}/rpc") tool_call_log: list = [] tool_call_counter = [0] @@ -735,7 +759,7 @@ def _execute_remote( # Create sandbox directory on remote env.execute( - f"mkdir -p {sandbox_dir}/rpc", cwd="/", timeout=10, + f"mkdir -p {quoted_rpc_dir}", cwd="/", timeout=10, ) # Generate and ship files @@ -759,7 +783,7 @@ def _execute_remote( # Build environment variable prefix for the script env_prefix = ( - f"HERMES_RPC_DIR={sandbox_dir}/rpc " + f"HERMES_RPC_DIR={shlex.quote(f'{sandbox_dir}/rpc')} " f"PYTHONDONTWRITEBYTECODE=1" ) tz = os.getenv("HERMES_TIMEZONE", "").strip() @@ -770,7 +794,7 @@ def _execute_remote( logger.info("Executing code on %s backend (task %s)...", env_type, effective_task_id[:8]) script_result = env.execute( - f"cd {sandbox_dir} && {env_prefix} python3 script.py", + f"cd {quoted_sandbox_dir} && {env_prefix} python3 script.py", timeout=timeout, ) @@ -807,7 +831,7 @@ def _execute_remote( # Clean up remote sandbox dir try: env.execute( - f"rm -rf {sandbox_dir}", cwd="/", timeout=15, + f"rm -rf {quoted_sandbox_dir}", cwd="/", timeout=15, ) except Exception: logger.debug("Failed to clean up remote sandbox %s", sandbox_dir) diff --git a/tools/environments/base.py b/tools/environments/base.py index 31ce0e17de..d2963e4acc 100644 --- a/tools/environments/base.py +++ b/tools/environments/base.py @@ -226,14 +226,24 @@ class BaseEnvironment(ABC): # Snapshot creation timeout (override for slow cold-starts). _snapshot_timeout: int = 30 + def get_temp_dir(self) -> str: + """Return the backend temp directory used for session artifacts. + + Most sandboxed backends use ``/tmp`` inside the target environment. + LocalEnvironment overrides this on platforms like Termux where ``/tmp`` + may be missing and ``TMPDIR`` is the portable writable location. + """ + return "/tmp" + def __init__(self, cwd: str, timeout: int, env: dict = None): self.cwd = cwd self.timeout = timeout self.env = env or {} self._session_id = uuid.uuid4().hex[:12] - self._snapshot_path = f"/tmp/hermes-snap-{self._session_id}.sh" - self._cwd_file = f"/tmp/hermes-cwd-{self._session_id}.txt" + temp_dir = self.get_temp_dir().rstrip("/") or "/" + self._snapshot_path = f"{temp_dir}/hermes-snap-{self._session_id}.sh" + self._cwd_file = f"{temp_dir}/hermes-cwd-{self._session_id}.txt" self._cwd_marker = _cwd_marker(self._session_id) self._snapshot_ready = False self._last_sync_time: float | None = ( diff --git a/tools/environments/local.py b/tools/environments/local.py index d3bb344829..bf5b37f95f 100644 --- a/tools/environments/local.py +++ b/tools/environments/local.py @@ -5,6 +5,7 @@ import platform import shutil import signal import subprocess +import tempfile from tools.environments.base import BaseEnvironment, _pipe_stdin @@ -209,6 +210,32 @@ class LocalEnvironment(BaseEnvironment): super().__init__(cwd=cwd or os.getcwd(), timeout=timeout, env=env) self.init_session() + def get_temp_dir(self) -> str: + """Return a shell-safe writable temp dir for local execution. + + Termux does not provide /tmp by default, but exposes a POSIX TMPDIR. + Prefer POSIX-style env vars when available, keep using /tmp on regular + Unix systems, and only fall back to tempfile.gettempdir() when it also + resolves to a POSIX path. + + Check the environment configured for this backend first so callers can + override the temp root explicitly (for example via terminal.env or a + custom TMPDIR), then fall back to the host process environment. + """ + for env_var in ("TMPDIR", "TMP", "TEMP"): + candidate = self.env.get(env_var) or os.environ.get(env_var) + if candidate and candidate.startswith("/"): + return candidate.rstrip("/") or "/" + + if os.path.isdir("/tmp") and os.access("/tmp", os.W_OK | os.X_OK): + return "/tmp" + + candidate = tempfile.gettempdir() + if candidate.startswith("/"): + return candidate.rstrip("/") or "/" + + return "/tmp" + def _run_bash(self, cmd_string: str, *, login: bool = False, timeout: int = 120, stdin_data: str | None = None) -> subprocess.Popen: diff --git a/tools/process_registry.py b/tools/process_registry.py index 2adad9e470..6e612fe0ec 100644 --- a/tools/process_registry.py +++ b/tools/process_registry.py @@ -183,6 +183,19 @@ class ProcessRegistry: # ----- Spawn ----- + @staticmethod + def _env_temp_dir(env: Any) -> str: + """Return the writable sandbox temp dir for env-backed background tasks.""" + get_temp_dir = getattr(env, "get_temp_dir", None) + if callable(get_temp_dir): + try: + temp_dir = get_temp_dir() + if isinstance(temp_dir, str) and temp_dir.startswith("/"): + return temp_dir.rstrip("/") or "/" + except Exception as exc: + logger.debug("Could not resolve environment temp dir: %s", exc) + return "/tmp" + def spawn_local( self, command: str, @@ -327,12 +340,20 @@ class ProcessRegistry: ) # Run the command in the sandbox with output capture - log_path = f"/tmp/hermes_bg_{session.id}.log" - pid_path = f"/tmp/hermes_bg_{session.id}.pid" + temp_dir = self._env_temp_dir(env) + log_path = f"{temp_dir}/hermes_bg_{session.id}.log" + pid_path = f"{temp_dir}/hermes_bg_{session.id}.pid" + exit_path = f"{temp_dir}/hermes_bg_{session.id}.exit" quoted_command = shlex.quote(command) + quoted_temp_dir = shlex.quote(temp_dir) + quoted_log_path = shlex.quote(log_path) + quoted_pid_path = shlex.quote(pid_path) + quoted_exit_path = shlex.quote(exit_path) bg_command = ( - f"nohup bash -c {quoted_command} > {log_path} 2>&1 & " - f"echo $! > {pid_path} && cat {pid_path}" + f"mkdir -p {quoted_temp_dir} && " + f"( nohup bash -lc {quoted_command} > {quoted_log_path} 2>&1; " + f"rc=$?; printf '%s\\n' \"$rc\" > {quoted_exit_path} ) & " + f"echo $! > {quoted_pid_path} && cat {quoted_pid_path}" ) try: @@ -353,7 +374,7 @@ class ProcessRegistry: # Start a poller thread that periodically reads the log file reader = threading.Thread( target=self._env_poller_loop, - args=(session, env, log_path, pid_path), + args=(session, env, log_path, pid_path, exit_path), daemon=True, name=f"proc-poller-{session.id}", ) @@ -397,14 +418,17 @@ class ProcessRegistry: self._move_to_finished(session) def _env_poller_loop( - self, session: ProcessSession, env: Any, log_path: str, pid_path: str + self, session: ProcessSession, env: Any, log_path: str, pid_path: str, exit_path: str ): """Background thread: poll a sandbox log file for non-local backends.""" + quoted_log_path = shlex.quote(log_path) + quoted_pid_path = shlex.quote(pid_path) + quoted_exit_path = shlex.quote(exit_path) while not session.exited: time.sleep(2) # Poll every 2 seconds try: # Read new output from the log file - result = env.execute(f"cat {log_path} 2>/dev/null", timeout=10) + result = env.execute(f"cat {quoted_log_path} 2>/dev/null", timeout=10) new_output = result.get("output", "") if new_output: with session._lock: @@ -414,14 +438,14 @@ class ProcessRegistry: # Check if process is still running check = env.execute( - f"kill -0 $(cat {pid_path} 2>/dev/null) 2>/dev/null; echo $?", + f"kill -0 \"$(cat {quoted_pid_path} 2>/dev/null)\" 2>/dev/null; echo $?", timeout=5, ) check_output = check.get("output", "").strip() if check_output and check_output.splitlines()[-1].strip() != "0": - # Process has exited -- get exit code + # Process has exited -- get exit code captured by the wrapper shell. exit_result = env.execute( - f"wait $(cat {pid_path} 2>/dev/null) 2>/dev/null; echo $?", + f"cat {quoted_exit_path} 2>/dev/null", timeout=5, ) exit_str = exit_result.get("output", "").strip() @@ -711,6 +735,29 @@ class ProcessRegistry: """Send data + newline to a running process's stdin (like pressing Enter).""" return self.write_stdin(session_id, data + "\n") + def close_stdin(self, session_id: str) -> dict: + """Close a running process's stdin / send EOF without killing the process.""" + session = self.get(session_id) + if session is None: + return {"status": "not_found", "error": f"No process with ID {session_id}"} + if session.exited: + return {"status": "already_exited", "error": "Process has already finished"} + + if hasattr(session, '_pty') and session._pty: + try: + session._pty.sendeof() + return {"status": "ok", "message": "EOF sent"} + except Exception as e: + return {"status": "error", "error": str(e)} + + if not session.process or not session.process.stdin: + return {"status": "error", "error": "Process stdin not available (non-local backend or stdin closed)"} + try: + session.process.stdin.close() + return {"status": "ok", "message": "stdin closed"} + except Exception as e: + return {"status": "error", "error": str(e)} + def list_sessions(self, task_id: str = None) -> list: """List all running and recently-finished processes.""" with self._lock: @@ -926,14 +973,14 @@ PROCESS_SCHEMA = { "Actions: 'list' (show all), 'poll' (check status + new output), " "'log' (full output with pagination), 'wait' (block until done or timeout), " "'kill' (terminate), 'write' (send raw stdin data without newline), " - "'submit' (send data + Enter, for answering prompts)." + "'submit' (send data + Enter, for answering prompts), 'close' (close stdin/send EOF)." ), "parameters": { "type": "object", "properties": { "action": { "type": "string", - "enum": ["list", "poll", "log", "wait", "kill", "write", "submit"], + "enum": ["list", "poll", "log", "wait", "kill", "write", "submit", "close"], "description": "Action to perform on background processes" }, "session_id": { @@ -973,7 +1020,7 @@ def _handle_process(args, **kw): if action == "list": return _json.dumps({"processes": process_registry.list_sessions(task_id=task_id)}, ensure_ascii=False) - elif action in ("poll", "log", "wait", "kill", "write", "submit"): + elif action in ("poll", "log", "wait", "kill", "write", "submit", "close"): if not session_id: return tool_error(f"session_id is required for {action}") if action == "poll": @@ -989,7 +1036,9 @@ def _handle_process(args, **kw): return _json.dumps(process_registry.write_stdin(session_id, str(args.get("data", ""))), ensure_ascii=False) elif action == "submit": return _json.dumps(process_registry.submit_stdin(session_id, str(args.get("data", ""))), ensure_ascii=False) - return tool_error(f"Unknown process action: {action}. Use: list, poll, log, wait, kill, write, submit") + elif action == "close": + return _json.dumps(process_registry.close_stdin(session_id), ensure_ascii=False) + return tool_error(f"Unknown process action: {action}. Use: list, poll, log, wait, kill, write, submit, close") registry.register( diff --git a/tools/send_message_tool.py b/tools/send_message_tool.py index 76b3e15820..2700231e95 100644 --- a/tools/send_message_tool.py +++ b/tools/send_message_tool.py @@ -322,6 +322,13 @@ async def _send_to_platform(platform, pconfig, chat_id, message, thread_id=None, media_files = media_files or [] + if platform == Platform.SLACK and message: + try: + slack_adapter = SlackAdapter.__new__(SlackAdapter) + message = slack_adapter.format_message(message) + except Exception: + logger.debug("Failed to apply Slack mrkdwn formatting in _send_to_platform", exc_info=True) + # Platform message length limits (from adapter class attributes) _MAX_LENGTHS = { Platform.TELEGRAM: TelegramAdapter.MAX_MESSAGE_LENGTH, @@ -548,10 +555,13 @@ async def _send_discord(token, chat_id, message): except ImportError: return {"error": "aiohttp not installed. Run: pip install aiohttp"} try: + from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp + _proxy = resolve_proxy_url(platform_env_var="DISCORD_PROXY") + _sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy) url = f"https://discord.com/api/v10/channels/{chat_id}/messages" headers = {"Authorization": f"Bot {token}", "Content-Type": "application/json"} - async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session: - async with session.post(url, headers=headers, json={"content": message}) as resp: + async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30), **_sess_kw) as session: + async with session.post(url, headers=headers, json={"content": message}, **_req_kw) as resp: if resp.status not in (200, 201): body = await resp.text() return _error(f"Discord API error ({resp.status}): {body}") @@ -568,10 +578,14 @@ async def _send_slack(token, chat_id, message): except ImportError: return {"error": "aiohttp not installed. Run: pip install aiohttp"} try: + from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp + _proxy = resolve_proxy_url() + _sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy) url = "https://slack.com/api/chat.postMessage" headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} - async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session: - async with session.post(url, headers=headers, json={"channel": chat_id, "text": message}) as resp: + async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30), **_sess_kw) as session: + payload = {"channel": chat_id, "text": message, "mrkdwn": True} + async with session.post(url, headers=headers, json=payload, **_req_kw) as resp: data = await resp.json() if data.get("ok"): return {"success": True, "platform": "slack", "chat_id": chat_id, "message_id": data.get("ts")} @@ -704,18 +718,21 @@ async def _send_sms(auth_token, chat_id, message): message = message.strip() try: + from gateway.platforms.base import resolve_proxy_url, proxy_kwargs_for_aiohttp + _proxy = resolve_proxy_url() + _sess_kw, _req_kw = proxy_kwargs_for_aiohttp(_proxy) creds = f"{account_sid}:{auth_token}" encoded = base64.b64encode(creds.encode("ascii")).decode("ascii") url = f"https://api.twilio.com/2010-04-01/Accounts/{account_sid}/Messages.json" headers = {"Authorization": f"Basic {encoded}"} - async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session: + async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30), **_sess_kw) as session: form_data = aiohttp.FormData() form_data.add_field("From", from_number) form_data.add_field("To", chat_id) form_data.add_field("Body", message) - async with session.post(url, data=form_data, headers=headers) as resp: + async with session.post(url, data=form_data, headers=headers, **_req_kw) as resp: body = await resp.json() if resp.status >= 400: error_msg = body.get("message", str(body)) diff --git a/tools/terminal_tool.py b/tools/terminal_tool.py index 0dc0fd5872..af35771c8c 100644 --- a/tools/terminal_tool.py +++ b/tools/terminal_tool.py @@ -1112,6 +1112,21 @@ def _interpret_exit_code(command: str, exit_code: int) -> str | None: return None +def _command_requires_pipe_stdin(command: str) -> bool: + """Return True when PTY mode would break stdin-driven commands. + + Some CLIs change behavior when stdin is a TTY. In particular, + `gh auth login --with-token` expects the token to arrive via piped stdin and + waits for EOF; when we launch it under a PTY, `process.submit()` only sends a + newline, so the command appears to hang forever with no visible progress. + """ + normalized = " ".join(command.lower().split()) + return ( + normalized.startswith("gh auth login") + and "--with-token" in normalized + ) + + def terminal_tool( command: str, background: bool = False, @@ -1332,6 +1347,17 @@ def terminal_tool( }, ensure_ascii=False) # Prepare command for execution + pty_disabled_reason = None + effective_pty = pty + if pty and _command_requires_pipe_stdin(command): + effective_pty = False + pty_disabled_reason = ( + "PTY disabled for this command because it expects piped stdin/EOF " + "(for example gh auth login --with-token). For local background " + "processes, call process(action='close') after writing so it receives " + "EOF." + ) + if background: # Spawn a tracked background process via the process registry. # For local backends: uses subprocess.Popen with output buffering. @@ -1349,7 +1375,7 @@ def terminal_tool( task_id=effective_task_id, session_key=session_key, env_vars=env.env if hasattr(env, 'env') else None, - use_pty=pty, + use_pty=effective_pty, ) else: proc_session = process_registry.spawn_via_env( @@ -1369,6 +1395,8 @@ def terminal_tool( } if approval_note: result_data["approval"] = approval_note + if pty_disabled_reason: + result_data["pty_note"] = pty_disabled_reason # Transparent timeout clamping note max_timeout = effective_timeout diff --git a/tools/tool_result_storage.py b/tools/tool_result_storage.py index 076d37ae07..a8ec5440bc 100644 --- a/tools/tool_result_storage.py +++ b/tools/tool_result_storage.py @@ -9,9 +9,11 @@ Defense against context-window overflow operates at three levels: 2. **Per-result persistence** (maybe_persist_tool_result): After a tool returns, if its output exceeds the tool's registered threshold (registry.get_max_result_size), the full output is written INTO THE - SANDBOX at /tmp/hermes-results/{tool_use_id}.txt via env.execute(). - The in-context content is replaced with a preview + file path reference. - The model can read_file to access the full output on any backend. + SANDBOX temp dir (for example /tmp/hermes-results/{tool_use_id}.txt on + standard Linux, or $TMPDIR/hermes-results/{tool_use_id}.txt on Termux) + via env.execute(). The in-context content is replaced with a preview + + file path reference. The model can read_file to access the full output + on any backend. 3. **Per-turn aggregate budget** (enforce_turn_budget): After all tool results in a single assistant turn are collected, if the total exceeds @@ -21,6 +23,7 @@ Defense against context-window overflow operates at three levels: """ import logging +import os import uuid from tools.budget_config import ( @@ -37,6 +40,22 @@ HEREDOC_MARKER = "HERMES_PERSIST_EOF" _BUDGET_TOOL_NAME = "__budget_enforcement__" +def _resolve_storage_dir(env) -> str: + """Return the best temp-backed storage dir for this environment.""" + if env is not None: + get_temp_dir = getattr(env, "get_temp_dir", None) + if callable(get_temp_dir): + try: + temp_dir = get_temp_dir() + except Exception as exc: + logger.debug("Could not resolve env temp dir: %s", exc) + else: + if temp_dir: + temp_dir = temp_dir.rstrip("/") or "/" + return f"{temp_dir}/hermes-results" + return STORAGE_DIR + + def generate_preview(content: str, max_chars: int = DEFAULT_PREVIEW_SIZE_CHARS) -> tuple[str, bool]: """Truncate at last newline within max_chars. Returns (preview, has_more).""" if len(content) <= max_chars: @@ -58,8 +77,9 @@ def _heredoc_marker(content: str) -> str: def _write_to_sandbox(content: str, remote_path: str, env) -> bool: """Write content into the sandbox via env.execute(). Returns True on success.""" marker = _heredoc_marker(content) + storage_dir = os.path.dirname(remote_path) cmd = ( - f"mkdir -p {STORAGE_DIR} && cat > {remote_path} << '{marker}'\n" + f"mkdir -p {storage_dir} && cat > {remote_path} << '{marker}'\n" f"{content}\n" f"{marker}" ) @@ -125,7 +145,8 @@ def maybe_persist_tool_result( if len(content) <= effective_threshold: return content - remote_path = f"{STORAGE_DIR}/{tool_use_id}.txt" + storage_dir = _resolve_storage_dir(env) + remote_path = f"{storage_dir}/{tool_use_id}.txt" preview, has_more = generate_preview(content, max_chars=config.preview_size) if env is not None: diff --git a/tools/voice_mode.py b/tools/voice_mode.py index 1b09a178c6..b6f0df29a0 100644 --- a/tools/voice_mode.py +++ b/tools/voice_mode.py @@ -48,6 +48,47 @@ def _audio_available() -> bool: return False +from hermes_constants import is_termux as _is_termux_environment + + +def _voice_capture_install_hint() -> str: + if _is_termux_environment(): + return "pkg install python-numpy portaudio && python -m pip install sounddevice" + return "pip install sounddevice numpy" + + +def _termux_microphone_command() -> Optional[str]: + if not _is_termux_environment(): + return None + return shutil.which("termux-microphone-record") + + +def _termux_media_player_command() -> Optional[str]: + if not _is_termux_environment(): + return None + return shutil.which("termux-media-player") + + +def _termux_api_app_installed() -> bool: + if not _is_termux_environment(): + return False + try: + result = subprocess.run( + ["pm", "list", "packages", "com.termux.api"], + capture_output=True, + text=True, + timeout=5, + check=False, + ) + return "package:com.termux.api" in (result.stdout or "") + except Exception: + return False + + +def _termux_voice_capture_available() -> bool: + return _termux_microphone_command() is not None and _termux_api_app_installed() + + def detect_audio_environment() -> dict: """Detect if the current environment supports audio I/O. @@ -57,6 +98,9 @@ def detect_audio_environment() -> dict: """ warnings = [] # hard-fail: these block voice mode notices = [] # informational: logged but don't block + termux_mic_cmd = _termux_microphone_command() + termux_app_installed = _termux_api_app_installed() + termux_capture = bool(termux_mic_cmd and termux_app_installed) # SSH detection if any(os.environ.get(v) for v in ('SSH_CLIENT', 'SSH_TTY', 'SSH_CONNECTION')): @@ -89,23 +133,48 @@ def detect_audio_environment() -> dict: try: devices = sd.query_devices() if not devices: - warnings.append("No audio input/output devices detected") + if termux_capture: + notices.append("No PortAudio devices detected, but Termux:API microphone capture is available") + else: + warnings.append("No audio input/output devices detected") except Exception: # In WSL with PulseAudio, device queries can fail even though # recording/playback works fine. Don't block if PULSE_SERVER is set. if os.environ.get('PULSE_SERVER'): notices.append("Audio device query failed but PULSE_SERVER is set -- continuing") + elif termux_capture: + notices.append("PortAudio device query failed, but Termux:API microphone capture is available") else: warnings.append("Audio subsystem error (PortAudio cannot query devices)") except ImportError: - warnings.append("Audio libraries not installed (pip install sounddevice numpy)") + if termux_capture: + notices.append("Termux:API microphone recording available (sounddevice not required)") + elif termux_mic_cmd and not termux_app_installed: + warnings.append( + "Termux:API Android app is not installed. Install/update the Termux:API app to use termux-microphone-record." + ) + else: + warnings.append(f"Audio libraries not installed ({_voice_capture_install_hint()})") except OSError: - warnings.append( - "PortAudio system library not found -- install it first:\n" - " Linux: sudo apt-get install libportaudio2\n" - " macOS: brew install portaudio\n" - "Then retry /voice on." - ) + if termux_capture: + notices.append("Termux:API microphone recording available (PortAudio not required)") + elif termux_mic_cmd and not termux_app_installed: + warnings.append( + "Termux:API Android app is not installed. Install/update the Termux:API app to use termux-microphone-record." + ) + elif _is_termux_environment(): + warnings.append( + "PortAudio system library not found -- install it first:\n" + " Termux: pkg install portaudio\n" + "Then retry /voice on." + ) + else: + warnings.append( + "PortAudio system library not found -- install it first:\n" + " Linux: sudo apt-get install libportaudio2\n" + " macOS: brew install portaudio\n" + "Then retry /voice on." + ) return { "available": not warnings, @@ -174,6 +243,134 @@ def play_beep(frequency: int = 880, duration: float = 0.12, count: int = 1) -> N logger.debug("Beep playback failed: %s", e) +# ============================================================================ +# Termux Audio Recorder +# ============================================================================ +class TermuxAudioRecorder: + """Recorder backend that uses Termux:API microphone capture commands.""" + + supports_silence_autostop = False + + def __init__(self) -> None: + self._lock = threading.Lock() + self._recording = False + self._start_time = 0.0 + self._recording_path: Optional[str] = None + self._current_rms = 0 + + @property + def is_recording(self) -> bool: + return self._recording + + @property + def elapsed_seconds(self) -> float: + if not self._recording: + return 0.0 + return time.monotonic() - self._start_time + + @property + def current_rms(self) -> int: + return self._current_rms + + def start(self, on_silence_stop=None) -> None: + del on_silence_stop # Termux:API does not expose live silence callbacks. + mic_cmd = _termux_microphone_command() + if not mic_cmd: + raise RuntimeError( + "Termux voice capture requires the termux-api package and app.\n" + "Install with: pkg install termux-api\n" + "Then install/update the Termux:API Android app." + ) + if not _termux_api_app_installed(): + raise RuntimeError( + "Termux voice capture requires the Termux:API Android app.\n" + "Install/update the Termux:API app, then retry /voice on." + ) + + with self._lock: + if self._recording: + return + os.makedirs(_TEMP_DIR, exist_ok=True) + timestamp = time.strftime("%Y%m%d_%H%M%S") + self._recording_path = os.path.join(_TEMP_DIR, f"recording_{timestamp}.aac") + + command = [ + mic_cmd, + "-f", self._recording_path, + "-l", "0", + "-e", "aac", + "-r", str(SAMPLE_RATE), + "-c", str(CHANNELS), + ] + try: + subprocess.run(command, capture_output=True, text=True, timeout=15, check=True) + except subprocess.CalledProcessError as e: + details = (e.stderr or e.stdout or str(e)).strip() + raise RuntimeError(f"Termux microphone start failed: {details}") from e + except Exception as e: + raise RuntimeError(f"Termux microphone start failed: {e}") from e + + with self._lock: + self._start_time = time.monotonic() + self._recording = True + self._current_rms = 0 + logger.info("Termux voice recording started") + + def _stop_termux_recording(self) -> None: + mic_cmd = _termux_microphone_command() + if not mic_cmd: + return + subprocess.run([mic_cmd, "-q"], capture_output=True, text=True, timeout=15, check=False) + + def stop(self) -> Optional[str]: + with self._lock: + if not self._recording: + return None + self._recording = False + path = self._recording_path + self._recording_path = None + started_at = self._start_time + self._current_rms = 0 + + self._stop_termux_recording() + if not path or not os.path.isfile(path): + return None + if time.monotonic() - started_at < 0.3: + try: + os.unlink(path) + except OSError: + pass + return None + if os.path.getsize(path) <= 0: + try: + os.unlink(path) + except OSError: + pass + return None + logger.info("Termux voice recording stopped: %s", path) + return path + + def cancel(self) -> None: + with self._lock: + path = self._recording_path + self._recording = False + self._recording_path = None + self._current_rms = 0 + try: + self._stop_termux_recording() + except Exception: + pass + if path and os.path.isfile(path): + try: + os.unlink(path) + except OSError: + pass + logger.info("Termux voice recording cancelled") + + def shutdown(self) -> None: + self.cancel() + + # ============================================================================ # AudioRecorder # ============================================================================ @@ -193,6 +390,8 @@ class AudioRecorder: the user is silent for ``silence_duration`` seconds and calls the callback. """ + supports_silence_autostop = True + def __init__(self) -> None: self._lock = threading.Lock() self._stream: Any = None @@ -526,6 +725,13 @@ class AudioRecorder: return wav_path +def create_audio_recorder() -> AudioRecorder | TermuxAudioRecorder: + """Return the best recorder backend for the current environment.""" + if _termux_voice_capture_available(): + return TermuxAudioRecorder() + return AudioRecorder() + + # ============================================================================ # Whisper hallucination filter # ============================================================================ @@ -734,7 +940,8 @@ def check_voice_requirements() -> Dict[str, Any]: stt_available = stt_enabled and stt_provider != "none" missing: List[str] = [] - has_audio = _audio_available() + termux_capture = _termux_voice_capture_available() + has_audio = _audio_available() or termux_capture if not has_audio: missing.extend(["sounddevice", "numpy"]) @@ -745,10 +952,12 @@ def check_voice_requirements() -> Dict[str, Any]: available = has_audio and stt_available and env_check["available"] details_parts = [] - if has_audio: + if termux_capture: + details_parts.append("Audio capture: OK (Termux:API microphone)") + elif has_audio: details_parts.append("Audio capture: OK") else: - details_parts.append("Audio capture: MISSING (pip install sounddevice numpy)") + details_parts.append(f"Audio capture: MISSING ({_voice_capture_install_hint()})") if not stt_enabled: details_parts.append("STT provider: DISABLED in config (stt.enabled: false)") diff --git a/ui-tui/README.md b/ui-tui/README.md index f9fbcd3f2d..9992cd340c 100644 --- a/ui-tui/README.md +++ b/ui-tui/README.md @@ -101,8 +101,10 @@ Current input behavior is split across `app.tsx`, `components/textInput.tsx`, an | modified `Left/Right` | Move by word when the terminal sends `Ctrl` or `Meta` with the arrow key | | `Home` / `Ctrl+A` | Start of line | | `End` / `Ctrl+E` | End of line | -| `Backspace` / `Delete` | Delete the character to the left of the cursor | -| modified `Backspace` / `Delete` | Delete the previous word | +| `Backspace` | Delete the character to the left of the cursor | +| `Delete` | Delete the character to the right of the cursor | +| modified `Backspace` | Delete the previous word | +| modified `Delete` | Delete the next word | | `Ctrl+W` | Delete the previous word | | `Ctrl+U` | Delete from the cursor back to the start of the line | | `Ctrl+K` | Delete from the cursor to the end of the line | diff --git a/ui-tui/package-lock.json b/ui-tui/package-lock.json index 18a63d6880..81b44fc537 100644 --- a/ui-tui/package-lock.json +++ b/ui-tui/package-lock.json @@ -33,8 +33,6 @@ }, "node_modules/@alcalzone/ansi-tokenize": { "version": "0.2.5", - "resolved": "https://registry.npmjs.org/@alcalzone/ansi-tokenize/-/ansi-tokenize-0.2.5.tgz", - "integrity": "sha512-3NX/MpTdroi0aKz134A6RC2Gb2iXVECN4QaAXnvCIxxIm3C3AVB1mkUe8NaaiyvOpDfsrqWhYtj+Q6a62RrTsw==", "license": "MIT", "dependencies": { "ansi-styles": "^6.2.1", @@ -44,22 +42,8 @@ "node": ">=18" } }, - "node_modules/@alcalzone/ansi-tokenize/node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, "node_modules/@babel/code-frame": { "version": "7.29.0", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", - "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", "dev": true, "license": "MIT", "dependencies": { @@ -73,8 +57,6 @@ }, "node_modules/@babel/compat-data": { "version": "7.29.0", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", - "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", "dev": true, "license": "MIT", "engines": { @@ -83,10 +65,9 @@ }, "node_modules/@babel/core": { "version": "7.29.0", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", - "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", @@ -114,8 +95,6 @@ }, "node_modules/@babel/core/node_modules/semver": { "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "license": "ISC", "bin": { @@ -124,8 +103,6 @@ }, "node_modules/@babel/generator": { "version": "7.29.1", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", - "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", "dev": true, "license": "MIT", "dependencies": { @@ -141,8 +118,6 @@ }, "node_modules/@babel/helper-compilation-targets": { "version": "7.28.6", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", - "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", "dev": true, "license": "MIT", "dependencies": { @@ -158,8 +133,6 @@ }, "node_modules/@babel/helper-compilation-targets/node_modules/semver": { "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "license": "ISC", "bin": { @@ -168,8 +141,6 @@ }, "node_modules/@babel/helper-globals": { "version": "7.28.0", - "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", - "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", "dev": true, "license": "MIT", "engines": { @@ -178,8 +149,6 @@ }, "node_modules/@babel/helper-module-imports": { "version": "7.28.6", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", - "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", "dev": true, "license": "MIT", "dependencies": { @@ -192,8 +161,6 @@ }, "node_modules/@babel/helper-module-transforms": { "version": "7.28.6", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", - "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", "dev": true, "license": "MIT", "dependencies": { @@ -210,8 +177,6 @@ }, "node_modules/@babel/helper-string-parser": { "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", - "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", "dev": true, "license": "MIT", "engines": { @@ -220,8 +185,6 @@ }, "node_modules/@babel/helper-validator-identifier": { "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", - "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", "dev": true, "license": "MIT", "engines": { @@ -230,8 +193,6 @@ }, "node_modules/@babel/helper-validator-option": { "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", - "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", "dev": true, "license": "MIT", "engines": { @@ -240,8 +201,6 @@ }, "node_modules/@babel/helpers": { "version": "7.29.2", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.29.2.tgz", - "integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==", "dev": true, "license": "MIT", "dependencies": { @@ -254,8 +213,6 @@ }, "node_modules/@babel/parser": { "version": "7.29.2", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.2.tgz", - "integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==", "dev": true, "license": "MIT", "dependencies": { @@ -270,8 +227,6 @@ }, "node_modules/@babel/template": { "version": "7.28.6", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", - "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", "dev": true, "license": "MIT", "dependencies": { @@ -285,8 +240,6 @@ }, "node_modules/@babel/traverse": { "version": "7.29.0", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", - "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", "dev": true, "license": "MIT", "dependencies": { @@ -304,8 +257,6 @@ }, "node_modules/@babel/types": { "version": "7.29.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", - "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", "dev": true, "license": "MIT", "dependencies": { @@ -316,316 +267,8 @@ "node": ">=6.9.0" } }, - "node_modules/@emnapi/core": { - "version": "1.9.2", - "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.2.tgz", - "integrity": "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "@emnapi/wasi-threads": "1.2.1", - "tslib": "^2.4.0" - } - }, - "node_modules/@emnapi/runtime": { - "version": "1.9.2", - "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.2.tgz", - "integrity": "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, - "node_modules/@emnapi/wasi-threads": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz", - "integrity": "sha512-uTII7OYF+/Mes/MrcIOYp5yOtSMLBWSIoLPpcgwipoiKbli6k322tcoFsxoIIxPDqW01SQGAgko4EzZi2BNv2w==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, - "node_modules/@esbuild/aix-ppc64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.7.tgz", - "integrity": "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.7.tgz", - "integrity": "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.7.tgz", - "integrity": "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.7.tgz", - "integrity": "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.7.tgz", - "integrity": "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.7.tgz", - "integrity": "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.7.tgz", - "integrity": "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.7.tgz", - "integrity": "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.7.tgz", - "integrity": "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.7.tgz", - "integrity": "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.7.tgz", - "integrity": "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.7.tgz", - "integrity": "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.7.tgz", - "integrity": "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw==", - "cpu": [ - "mips64el" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.7.tgz", - "integrity": "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.7.tgz", - "integrity": "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.7.tgz", - "integrity": "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, "node_modules/@esbuild/linux-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.7.tgz", - "integrity": "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA==", + "version": "0.27.5", "cpu": [ "x64" ], @@ -639,163 +282,8 @@ "node": ">=18" } }, - "node_modules/@esbuild/netbsd-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.7.tgz", - "integrity": "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.7.tgz", - "integrity": "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.7.tgz", - "integrity": "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.7.tgz", - "integrity": "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openharmony-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.7.tgz", - "integrity": "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.7.tgz", - "integrity": "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.7.tgz", - "integrity": "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.7.tgz", - "integrity": "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.7.tgz", - "integrity": "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, "node_modules/@eslint-community/eslint-utils": { "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", - "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", "dev": true, "license": "MIT", "dependencies": { @@ -811,10 +299,19 @@ "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, "node_modules/@eslint-community/regexpp": { "version": "4.12.2", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", - "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", "dev": true, "license": "MIT", "engines": { @@ -823,8 +320,6 @@ }, "node_modules/@eslint/config-array": { "version": "0.21.2", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.2.tgz", - "integrity": "sha512-nJl2KGTlrf9GjLimgIru+V/mzgSK0ABCDQRvxw5BjURL7WfH5uoWmizbH7QB6MmnMBd8cIC9uceWnezL1VZWWw==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -836,28 +331,8 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@eslint/config-array/node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@eslint/config-array/node_modules/brace-expansion": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", - "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/@eslint/config-array/node_modules/minimatch": { "version": "3.1.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", - "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", "dev": true, "license": "ISC", "dependencies": { @@ -867,10 +342,22 @@ "node": "*" } }, + "node_modules/@eslint/config-array/node_modules/minimatch/node_modules/brace-expansion": { + "version": "1.1.13", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/config-array/node_modules/minimatch/node_modules/brace-expansion/node_modules/balanced-match": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, "node_modules/@eslint/config-helpers": { "version": "0.4.2", - "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", - "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -882,8 +369,6 @@ }, "node_modules/@eslint/core": { "version": "0.17.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", - "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -895,8 +380,6 @@ }, "node_modules/@eslint/eslintrc": { "version": "3.3.5", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.5.tgz", - "integrity": "sha512-4IlJx0X0qftVsN5E+/vGujTRIFtwuLbNsVUe7TO6zYPDR1O6nFwvwhIKEKSrl6dZchmYBITazxKoUYOjdtjlRg==", "dev": true, "license": "MIT", "dependencies": { @@ -917,28 +400,8 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/@eslint/eslintrc/node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", - "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/@eslint/eslintrc/node_modules/globals": { "version": "14.0.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", - "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", "dev": true, "license": "MIT", "engines": { @@ -950,8 +413,6 @@ }, "node_modules/@eslint/eslintrc/node_modules/ignore": { "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", "dev": true, "license": "MIT", "engines": { @@ -960,8 +421,6 @@ }, "node_modules/@eslint/eslintrc/node_modules/minimatch": { "version": "3.1.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", - "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", "dev": true, "license": "ISC", "dependencies": { @@ -971,10 +430,22 @@ "node": "*" } }, + "node_modules/@eslint/eslintrc/node_modules/minimatch/node_modules/brace-expansion": { + "version": "1.1.13", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch/node_modules/brace-expansion/node_modules/balanced-match": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, "node_modules/@eslint/js": { "version": "9.39.4", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.4.tgz", - "integrity": "sha512-nE7DEIchvtiFTwBw4Lfbu59PG+kCofhjsKaCWzxTpt4lfRjRMqG6uMBzKXuEcyXhOHoUp9riAm7/aWYGhXZ9cw==", "dev": true, "license": "MIT", "engines": { @@ -986,8 +457,6 @@ }, "node_modules/@eslint/object-schema": { "version": "2.1.7", - "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", - "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -996,8 +465,6 @@ }, "node_modules/@eslint/plugin-kit": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", - "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -1010,8 +477,6 @@ }, "node_modules/@humanfs/core": { "version": "0.19.1", - "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", - "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -1020,8 +485,6 @@ }, "node_modules/@humanfs/node": { "version": "0.16.7", - "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", - "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -1034,8 +497,6 @@ }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", - "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -1048,8 +509,6 @@ }, "node_modules/@humanwhocodes/retry": { "version": "0.4.3", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", - "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", "dev": true, "license": "Apache-2.0", "engines": { @@ -1062,8 +521,6 @@ }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.13", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", - "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", "dev": true, "license": "MIT", "dependencies": { @@ -1073,8 +530,6 @@ }, "node_modules/@jridgewell/remapping": { "version": "2.3.5", - "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", - "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1084,8 +539,6 @@ }, "node_modules/@jridgewell/resolve-uri": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", "dev": true, "license": "MIT", "engines": { @@ -1094,15 +547,11 @@ }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.5", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", - "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", "dev": true, "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { "version": "0.3.31", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", - "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", "dev": true, "license": "MIT", "dependencies": { @@ -1110,192 +559,16 @@ "@jridgewell/sourcemap-codec": "^1.4.14" } }, - "node_modules/@napi-rs/wasm-runtime": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.3.tgz", - "integrity": "sha512-xK9sGVbJWYb08+mTJt3/YV24WxvxpXcXtP6B172paPZ+Ts69Re9dAr7lKwJoeIx8OoeuimEiRZ7umkiUVClmmQ==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "@tybys/wasm-util": "^0.10.1" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Brooooooklyn" - }, - "peerDependencies": { - "@emnapi/core": "^1.7.1", - "@emnapi/runtime": "^1.7.1" - } - }, "node_modules/@oxc-project/types": { - "version": "0.124.0", - "resolved": "https://registry.npmjs.org/@oxc-project/types/-/types-0.124.0.tgz", - "integrity": "sha512-VBFWMTBvHxS11Z5Lvlr3IWgrwhMTXV+Md+EQF0Xf60+wAdsGFTBx7X7K/hP4pi8N7dcm1RvcHwDxZ16Qx8keUg==", + "version": "0.123.0", "dev": true, "license": "MIT", "funding": { "url": "https://github.com/sponsors/Boshen" } }, - "node_modules/@rolldown/binding-android-arm64": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-android-arm64/-/binding-android-arm64-1.0.0-rc.15.tgz", - "integrity": "sha512-YYe6aWruPZDtHNpwu7+qAHEMbQ/yRl6atqb/AhznLTnD3UY99Q1jE7ihLSahNWkF4EqRPVC4SiR4O0UkLK02tA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-darwin-arm64": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-arm64/-/binding-darwin-arm64-1.0.0-rc.15.tgz", - "integrity": "sha512-oArR/ig8wNTPYsXL+Mzhs0oxhxfuHRfG7Ikw7jXsw8mYOtk71W0OkF2VEVh699pdmzjPQsTjlD1JIOoHkLP1Fg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-darwin-x64": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-x64/-/binding-darwin-x64-1.0.0-rc.15.tgz", - "integrity": "sha512-YzeVqOqjPYvUbJSWJ4EDL8ahbmsIXQpgL3JVipmN+MX0XnXMeWomLN3Fb+nwCmP/jfyqte5I3XRSm7OfQrbyxw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-freebsd-x64": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-freebsd-x64/-/binding-freebsd-x64-1.0.0-rc.15.tgz", - "integrity": "sha512-9Erhx956jeQ0nNTyif1+QWAXDRD38ZNjr//bSHrt6wDwB+QkAfl2q6Mn1k6OBPerznjRmbM10lgRb1Pli4xZPw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-linux-arm-gnueabihf": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-1.0.0-rc.15.tgz", - "integrity": "sha512-cVwk0w8QbZJGTnP/AHQBs5yNwmpgGYStL88t4UIaqcvYJWBfS0s3oqVLZPwsPU6M0zlW4GqjP0Zq5MnAGwFeGA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-linux-arm64-gnu": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-1.0.0-rc.15.tgz", - "integrity": "sha512-eBZ/u8iAK9SoHGanqe/jrPnY0JvBN6iXbVOsbO38mbz+ZJsaobExAm1Iu+rxa4S1l2FjG0qEZn4Rc6X8n+9M+w==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-linux-arm64-musl": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-musl/-/binding-linux-arm64-musl-1.0.0-rc.15.tgz", - "integrity": "sha512-ZvRYMGrAklV9PEkgt4LQM6MjQX2P58HPAuecwYObY2DhS2t35R0I810bKi0wmaYORt6m/2Sm+Z+nFgb0WhXNcQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-linux-ppc64-gnu": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-1.0.0-rc.15.tgz", - "integrity": "sha512-VDpgGBzgfg5hLg+uBpCLoFG5kVvEyafmfxGUV0UHLcL5irxAK7PKNeC2MwClgk6ZAiNhmo9FLhRYgvMmedLtnQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-linux-s390x-gnu": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-1.0.0-rc.15.tgz", - "integrity": "sha512-y1uXY3qQWCzcPgRJATPSOUP4tCemh4uBdY7e3EZbVwCJTY3gLJWnQABgeUetvED+bt1FQ01OeZwvhLS2bpNrAQ==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, "node_modules/@rolldown/binding-linux-x64-gnu": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-gnu/-/binding-linux-x64-gnu-1.0.0-rc.15.tgz", - "integrity": "sha512-023bTPBod7J3Y/4fzAN6QtpkSABR0rigtrwaP+qSEabUh5zf6ELr9Nc7GujaROuPY3uwdSIXWrvhn1KxOvurWA==", + "version": "1.0.0-rc.13", "cpu": [ "x64" ], @@ -1310,9 +583,7 @@ } }, "node_modules/@rolldown/binding-linux-x64-musl": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-musl/-/binding-linux-x64-musl-1.0.0-rc.15.tgz", - "integrity": "sha512-witB2O0/hU4CgfOOKUoeFgQ4GktPi1eEbAhaLAIpgD6+ZnhcPkUtPsoKKHRzmOoWPZue46IThdSgdo4XneOLYw==", + "version": "1.0.0-rc.13", "cpu": [ "x64" ], @@ -1326,105 +597,18 @@ "node": "^20.19.0 || >=22.12.0" } }, - "node_modules/@rolldown/binding-openharmony-arm64": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-openharmony-arm64/-/binding-openharmony-arm64-1.0.0-rc.15.tgz", - "integrity": "sha512-UCL68NJ0Ud5zRipXZE9dF5PmirzJE4E4BCIOOssEnM7wLDsxjc6Qb0sGDxTNRTP53I6MZpygyCpY8Aa8sPfKPg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-wasm32-wasi": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-wasm32-wasi/-/binding-wasm32-wasi-1.0.0-rc.15.tgz", - "integrity": "sha512-ApLruZq/ig+nhaE7OJm4lDjayUnOHVUa77zGeqnqZ9pn0ovdVbbNPerVibLXDmWeUZXjIYIT8V3xkT58Rm9u5Q==", - "cpu": [ - "wasm32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "@emnapi/core": "1.9.2", - "@emnapi/runtime": "1.9.2", - "@napi-rs/wasm-runtime": "^1.1.3" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/@rolldown/binding-win32-arm64-msvc": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.0.0-rc.15.tgz", - "integrity": "sha512-KmoUoU7HnN+Si5YWJigfTws1jz1bKBYDQKdbLspz0UaqjjFkddHsqorgiW1mxcAj88lYUE6NC/zJNwT+SloqtA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, - "node_modules/@rolldown/binding-win32-x64-msvc": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-x64-msvc/-/binding-win32-x64-msvc-1.0.0-rc.15.tgz", - "integrity": "sha512-3P2A8L+x75qavWLe/Dll3EYBJLQmtkJN8rfh+U/eR3MqMgL/h98PhYI+JFfXuDPgPeCB7iZAKiqii5vqOvnA0g==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": "^20.19.0 || >=22.12.0" - } - }, "node_modules/@rolldown/pluginutils": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.15.tgz", - "integrity": "sha512-UromN0peaE53IaBRe9W7CjrZgXl90fqGpK+mIZbA3qSTeYqg3pqpROBdIPvOG3F5ereDHNwoHBI2e50n1BDr1g==", + "version": "1.0.0-rc.13", "dev": true, "license": "MIT" }, "node_modules/@standard-schema/spec": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", - "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", "dev": true, "license": "MIT" }, - "node_modules/@tybys/wasm-util": { - "version": "0.10.1", - "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz", - "integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, "node_modules/@types/chai": { "version": "5.2.3", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", - "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", "dev": true, "license": "MIT", "dependencies": { @@ -1434,57 +618,48 @@ }, "node_modules/@types/deep-eql": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", - "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", "dev": true, "license": "MIT" }, "node_modules/@types/estree": { "version": "1.0.8", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", - "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", "dev": true, "license": "MIT" }, "node_modules/@types/json-schema": { "version": "7.0.15", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", - "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", "dev": true, "license": "MIT" }, "node_modules/@types/node": { - "version": "25.5.2", - "resolved": "https://registry.npmjs.org/@types/node/-/node-25.5.2.tgz", - "integrity": "sha512-tO4ZIRKNC+MDWV4qKVZe3Ql/woTnmHDr5JD8UI5hn2pwBrHEwOEMZK7WlNb5RKB6EoJ02gwmQS9OrjuFnZYdpg==", + "version": "25.5.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.18.0" } }, "node_modules/@types/react": { "version": "19.2.14", - "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", - "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "csstype": "^3.2.2" } }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.58.1.tgz", - "integrity": "sha512-eSkwoemjo76bdXl2MYqtxg51HNwUSkWfODUOQ3PaTLZGh9uIWWFZIjyjaJnex7wXDu+TRx+ATsnSxdN9YWfRTQ==", + "version": "8.58.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/regexpp": "^4.12.2", - "@typescript-eslint/scope-manager": "8.58.1", - "@typescript-eslint/type-utils": "8.58.1", - "@typescript-eslint/utils": "8.58.1", - "@typescript-eslint/visitor-keys": "8.58.1", + "@typescript-eslint/scope-manager": "8.58.0", + "@typescript-eslint/type-utils": "8.58.0", + "@typescript-eslint/utils": "8.58.0", + "@typescript-eslint/visitor-keys": "8.58.0", "ignore": "^7.0.5", "natural-compare": "^1.4.0", "ts-api-utils": "^2.5.0" @@ -1497,22 +672,21 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^8.58.1", + "@typescript-eslint/parser": "^8.58.0", "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.1.0" } }, "node_modules/@typescript-eslint/parser": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.58.1.tgz", - "integrity": "sha512-gGkiNMPqerb2cJSVcruigx9eHBlLG14fSdPdqMoOcBfh+vvn4iCq2C8MzUB89PrxOXk0y3GZ1yIWb9aOzL93bw==", + "version": "8.58.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { - "@typescript-eslint/scope-manager": "8.58.1", - "@typescript-eslint/types": "8.58.1", - "@typescript-eslint/typescript-estree": "8.58.1", - "@typescript-eslint/visitor-keys": "8.58.1", + "@typescript-eslint/scope-manager": "8.58.0", + "@typescript-eslint/types": "8.58.0", + "@typescript-eslint/typescript-estree": "8.58.0", + "@typescript-eslint/visitor-keys": "8.58.0", "debug": "^4.4.3" }, "engines": { @@ -1528,14 +702,12 @@ } }, "node_modules/@typescript-eslint/project-service": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.58.1.tgz", - "integrity": "sha512-gfQ8fk6cxhtptek+/8ZIqw8YrRW5048Gug8Ts5IYcMLCw18iUgrZAEY/D7s4hkI0FxEfGakKuPK/XUMPzPxi5g==", + "version": "8.58.0", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/tsconfig-utils": "^8.58.1", - "@typescript-eslint/types": "^8.58.1", + "@typescript-eslint/tsconfig-utils": "^8.58.0", + "@typescript-eslint/types": "^8.58.0", "debug": "^4.4.3" }, "engines": { @@ -1550,14 +722,12 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.58.1.tgz", - "integrity": "sha512-TPYUEqJK6avLcEjumWsIuTpuYODTTDAtoMdt8ZZa93uWMTX13Nb8L5leSje1NluammvU+oI3QRr5lLXPgihX3w==", + "version": "8.58.0", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.58.1", - "@typescript-eslint/visitor-keys": "8.58.1" + "@typescript-eslint/types": "8.58.0", + "@typescript-eslint/visitor-keys": "8.58.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1568,9 +738,7 @@ } }, "node_modules/@typescript-eslint/tsconfig-utils": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.58.1.tgz", - "integrity": "sha512-JAr2hOIct2Q+qk3G+8YFfqkqi7sC86uNryT+2i5HzMa2MPjw4qNFvtjnw1IiA1rP7QhNKVe21mSSLaSjwA1Olw==", + "version": "8.58.0", "dev": true, "license": "MIT", "engines": { @@ -1585,15 +753,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.58.1.tgz", - "integrity": "sha512-HUFxvTJVroT+0rXVJC7eD5zol6ID+Sn5npVPWoFuHGg9Ncq5Q4EYstqR+UOqaNRFXi5TYkpXXkLhoCHe3G0+7w==", + "version": "8.58.0", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.58.1", - "@typescript-eslint/typescript-estree": "8.58.1", - "@typescript-eslint/utils": "8.58.1", + "@typescript-eslint/types": "8.58.0", + "@typescript-eslint/typescript-estree": "8.58.0", + "@typescript-eslint/utils": "8.58.0", "debug": "^4.4.3", "ts-api-utils": "^2.5.0" }, @@ -1610,9 +776,7 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.58.1.tgz", - "integrity": "sha512-io/dV5Aw5ezwzfPBBWLoT+5QfVtP8O7q4Kftjn5azJ88bYyp/ZMCsyW1lpKK46EXJcaYMZ1JtYj+s/7TdzmQMw==", + "version": "8.58.0", "dev": true, "license": "MIT", "engines": { @@ -1624,16 +788,14 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.58.1.tgz", - "integrity": "sha512-w4w7WR7GHOjqqPnvAYbazq+Y5oS68b9CzasGtnd6jIeOIeKUzYzupGTB2T4LTPSv4d+WPeccbxuneTFHYgAAWg==", + "version": "8.58.0", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/project-service": "8.58.1", - "@typescript-eslint/tsconfig-utils": "8.58.1", - "@typescript-eslint/types": "8.58.1", - "@typescript-eslint/visitor-keys": "8.58.1", + "@typescript-eslint/project-service": "8.58.0", + "@typescript-eslint/tsconfig-utils": "8.58.0", + "@typescript-eslint/types": "8.58.0", + "@typescript-eslint/visitor-keys": "8.58.0", "debug": "^4.4.3", "minimatch": "^10.2.2", "semver": "^7.7.3", @@ -1651,17 +813,59 @@ "typescript": ">=4.8.4 <6.1.0" } }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "10.2.5", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.5" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch/node_modules/brace-expansion": { + "version": "5.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch/node_modules/brace-expansion/node_modules/balanced-match": { + "version": "4.0.4", + "dev": true, + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.7.4", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/@typescript-eslint/utils": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.58.1.tgz", - "integrity": "sha512-Ln8R0tmWC7pTtLOzgJzYTXSCjJ9rDNHAqTaVONF4FEi2qwce8mD9iSOxOpLFFvWp/wBFlew0mjM1L1ihYWfBdQ==", + "version": "8.58.0", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.9.1", - "@typescript-eslint/scope-manager": "8.58.1", - "@typescript-eslint/types": "8.58.1", - "@typescript-eslint/typescript-estree": "8.58.1" + "@typescript-eslint/scope-manager": "8.58.0", + "@typescript-eslint/types": "8.58.0", + "@typescript-eslint/typescript-estree": "8.58.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1676,13 +880,11 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.58.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.58.1.tgz", - "integrity": "sha512-y+vH7QE8ycjoa0bWciFg7OpFcipUuem1ujhrdLtq1gByKwfbC7bPeKsiny9e0urg93DqwGcHey+bGRKCnF1nZQ==", + "version": "8.58.0", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.58.1", + "@typescript-eslint/types": "8.58.0", "eslint-visitor-keys": "^5.0.0" }, "engines": { @@ -1695,8 +897,6 @@ }, "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-5.0.1.tgz", - "integrity": "sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -1707,16 +907,14 @@ } }, "node_modules/@vitest/expect": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.1.4.tgz", - "integrity": "sha512-iPBpra+VDuXmBFI3FMKHSFXp3Gx5HfmSCE8X67Dn+bwephCnQCaB7qWK2ldHa+8ncN8hJU8VTMcxjPpyMkUjww==", + "version": "4.1.3", "dev": true, "license": "MIT", "dependencies": { "@standard-schema/spec": "^1.1.0", "@types/chai": "^5.2.2", - "@vitest/spy": "4.1.4", - "@vitest/utils": "4.1.4", + "@vitest/spy": "4.1.3", + "@vitest/utils": "4.1.3", "chai": "^6.2.2", "tinyrainbow": "^3.1.0" }, @@ -1725,13 +923,11 @@ } }, "node_modules/@vitest/mocker": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.1.4.tgz", - "integrity": "sha512-R9HTZBhW6yCSGbGQnDnH3QHfJxokKN4KB+Yvk9Q1le7eQNYwiCyKxmLmurSpFy6BzJanSLuEUDrD+j97Q+ZLPg==", + "version": "4.1.3", "dev": true, "license": "MIT", "dependencies": { - "@vitest/spy": "4.1.4", + "@vitest/spy": "4.1.3", "estree-walker": "^3.0.3", "magic-string": "^0.30.21" }, @@ -1752,9 +948,7 @@ } }, "node_modules/@vitest/pretty-format": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.1.4.tgz", - "integrity": "sha512-ddmDHU0gjEUyEVLxtZa7xamrpIefdEETu3nZjWtHeZX4QxqJ7tRxSteHVXJOcr8jhiLoGAhkK4WJ3WqBpjx42A==", + "version": "4.1.3", "dev": true, "license": "MIT", "dependencies": { @@ -1765,13 +959,11 @@ } }, "node_modules/@vitest/runner": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.1.4.tgz", - "integrity": "sha512-xTp7VZ5aXP5ZJrn15UtJUWlx6qXLnGtF6jNxHepdPHpMfz/aVPx+htHtgcAL2mDXJgKhpoo2e9/hVJsIeFbytQ==", + "version": "4.1.3", "dev": true, "license": "MIT", "dependencies": { - "@vitest/utils": "4.1.4", + "@vitest/utils": "4.1.3", "pathe": "^2.0.3" }, "funding": { @@ -1779,14 +971,12 @@ } }, "node_modules/@vitest/snapshot": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.1.4.tgz", - "integrity": "sha512-MCjCFgaS8aZz+m5nTcEcgk/xhWv0rEH4Yl53PPlMXOZ1/Ka2VcZU6CJ+MgYCZbcJvzGhQRjVrGQNZqkGPttIKw==", + "version": "4.1.3", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "4.1.4", - "@vitest/utils": "4.1.4", + "@vitest/pretty-format": "4.1.3", + "@vitest/utils": "4.1.3", "magic-string": "^0.30.21", "pathe": "^2.0.3" }, @@ -1795,9 +985,7 @@ } }, "node_modules/@vitest/spy": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.1.4.tgz", - "integrity": "sha512-XxNdAsKW7C+FLydqFJLb5KhJtl3PGCMmYwFRfhvIgxJvLSXhhVI1zM8f1qD3Zg7RCjTSzDVyct6sghs9UEgBEQ==", + "version": "4.1.3", "dev": true, "license": "MIT", "funding": { @@ -1805,13 +993,11 @@ } }, "node_modules/@vitest/utils": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.1.4.tgz", - "integrity": "sha512-13QMT+eysM5uVGa1rG4kegGYNp6cnQcsTc67ELFbhNLQO+vgsygtYJx2khvdt4gVQqSSpC/KT5FZZxUpP3Oatw==", + "version": "4.1.3", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "4.1.4", + "@vitest/pretty-format": "4.1.3", "convert-source-map": "^2.0.0", "tinyrainbow": "^3.1.0" }, @@ -1821,10 +1007,9 @@ }, "node_modules/acorn": { "version": "8.16.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", - "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", "dev": true, "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -1834,8 +1019,6 @@ }, "node_modules/acorn-jsx": { "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -1844,8 +1027,6 @@ }, "node_modules/ajv": { "version": "6.14.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", - "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", "dev": true, "license": "MIT", "dependencies": { @@ -1861,8 +1042,6 @@ }, "node_modules/ansi-escapes": { "version": "7.3.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.3.0.tgz", - "integrity": "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg==", "license": "MIT", "dependencies": { "environment": "^1.0.0" @@ -1876,8 +1055,6 @@ }, "node_modules/ansi-regex": { "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", "license": "MIT", "engines": { "node": ">=12" @@ -1887,16 +1064,10 @@ } }, "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, + "version": "6.2.3", "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, "engines": { - "node": ">=8" + "node": ">=12" }, "funding": { "url": "https://github.com/chalk/ansi-styles?sponsor=1" @@ -1904,15 +1075,11 @@ }, "node_modules/argparse": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", "dev": true, "license": "Python-2.0" }, "node_modules/array-buffer-byte-length": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", - "integrity": "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==", "dev": true, "license": "MIT", "dependencies": { @@ -1928,8 +1095,6 @@ }, "node_modules/array-includes": { "version": "3.1.9", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.9.tgz", - "integrity": "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1951,8 +1116,6 @@ }, "node_modules/array.prototype.findlast": { "version": "1.2.5", - "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", - "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1972,8 +1135,6 @@ }, "node_modules/array.prototype.flat": { "version": "1.3.3", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.3.tgz", - "integrity": "sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg==", "dev": true, "license": "MIT", "dependencies": { @@ -1991,8 +1152,6 @@ }, "node_modules/array.prototype.flatmap": { "version": "1.3.3", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz", - "integrity": "sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg==", "dev": true, "license": "MIT", "dependencies": { @@ -2010,8 +1169,6 @@ }, "node_modules/array.prototype.tosorted": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", - "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", "dev": true, "license": "MIT", "dependencies": { @@ -2027,8 +1184,6 @@ }, "node_modules/arraybuffer.prototype.slice": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz", - "integrity": "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2049,8 +1204,6 @@ }, "node_modules/assertion-error": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", - "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", "dev": true, "license": "MIT", "engines": { @@ -2059,8 +1212,6 @@ }, "node_modules/async-function": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", - "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", "dev": true, "license": "MIT", "engines": { @@ -2069,8 +1220,6 @@ }, "node_modules/auto-bind": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/auto-bind/-/auto-bind-5.0.1.tgz", - "integrity": "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg==", "license": "MIT", "engines": { "node": "^12.20.0 || ^14.13.1 || >=16.0.0" @@ -2081,8 +1230,6 @@ }, "node_modules/available-typed-arrays": { "version": "1.0.7", - "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", - "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2095,20 +1242,8 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/balanced-match": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", - "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", - "dev": true, - "license": "MIT", - "engines": { - "node": "18 || 20 || >=22" - } - }, "node_modules/baseline-browser-mapping": { - "version": "2.10.17", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.17.tgz", - "integrity": "sha512-HdrkN8eVG2CXxeifv/VdJ4A4RSra1DTW8dc/hdxzhGHN8QePs6gKaWM9pHPcpCoxYZJuOZ8drHmbdpLHjCYjLA==", + "version": "2.10.13", "dev": true, "license": "Apache-2.0", "bin": { @@ -2118,23 +1253,8 @@ "node": ">=6.0.0" } }, - "node_modules/brace-expansion": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz", - "integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^4.0.2" - }, - "engines": { - "node": "18 || 20 || >=22" - } - }, "node_modules/browserslist": { "version": "4.28.2", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.2.tgz", - "integrity": "sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg==", "dev": true, "funding": [ { @@ -2151,6 +1271,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.10.12", "caniuse-lite": "^1.0.30001782", @@ -2166,15 +1287,13 @@ } }, "node_modules/call-bind": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.9.tgz", - "integrity": "sha512-a/hy+pNsFUTR+Iz8TCJvXudKVLAnz/DyeSUo10I5yvFDQJBFU2s9uqQpoSrJlroHUKoKqzg+epxyP9lqFdzfBQ==", + "version": "1.0.8", "dev": true, "license": "MIT", "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "get-intrinsic": "^1.3.0", + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", "set-function-length": "^1.2.2" }, "engines": { @@ -2186,8 +1305,6 @@ }, "node_modules/call-bind-apply-helpers": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2200,8 +1317,6 @@ }, "node_modules/call-bound": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", - "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", "dev": true, "license": "MIT", "dependencies": { @@ -2217,8 +1332,6 @@ }, "node_modules/callsites": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, "license": "MIT", "engines": { @@ -2226,9 +1339,7 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001787", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001787.tgz", - "integrity": "sha512-mNcrMN9KeI68u7muanUpEejSLghOKlVhRqS/Za2IeyGllJ9I9otGpR9g3nsw7n4W378TE/LyIteA0+/FOZm4Kg==", + "version": "1.0.30001784", "dev": true, "funding": [ { @@ -2248,35 +1359,14 @@ }, "node_modules/chai": { "version": "6.2.2", - "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", - "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", "dev": true, "license": "MIT", "engines": { "node": ">=18" } }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, "node_modules/cli-boxes": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", - "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", "license": "MIT", "engines": { "node": ">=10" @@ -2287,8 +1377,6 @@ }, "node_modules/cli-cursor": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-4.0.0.tgz", - "integrity": "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==", "license": "MIT", "dependencies": { "restore-cursor": "^4.0.0" @@ -2302,8 +1390,6 @@ }, "node_modules/cli-truncate": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-5.2.0.tgz", - "integrity": "sha512-xRwvIOMGrfOAnM1JYtqQImuaNtDEv9v6oIYAs4LIHwTiKee8uwvIi363igssOC0O5U04i4AlENs79LQLu9tEMw==", "license": "MIT", "dependencies": { "slice-ansi": "^8.0.0", @@ -2316,10 +1402,22 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/cli-truncate/node_modules/string-width": { + "version": "8.2.0", + "license": "MIT", + "dependencies": { + "get-east-asian-width": "^1.5.0", + "strip-ansi": "^7.1.2" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/code-excerpt": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/code-excerpt/-/code-excerpt-4.0.0.tgz", - "integrity": "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA==", "license": "MIT", "dependencies": { "convert-to-spaces": "^2.0.1" @@ -2330,8 +1428,6 @@ }, "node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2343,29 +1439,21 @@ }, "node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", "dev": true, "license": "MIT" }, "node_modules/concat-map": { "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true, "license": "MIT" }, "node_modules/convert-source-map": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", - "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", "dev": true, "license": "MIT" }, "node_modules/convert-to-spaces": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/convert-to-spaces/-/convert-to-spaces-2.0.1.tgz", - "integrity": "sha512-rcQ1bsQO9799wq24uE5AM2tAILy4gXGIK/njFWcVQkGNZ96edlpY+A7bjwvzjYvLDyzmG1MmMLZhpcsb+klNMQ==", "license": "MIT", "engines": { "node": "^12.20.0 || ^14.13.1 || >=16.0.0" @@ -2373,8 +1461,6 @@ }, "node_modules/cross-spawn": { "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, "license": "MIT", "dependencies": { @@ -2388,15 +1474,11 @@ }, "node_modules/csstype": { "version": "3.2.3", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", - "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", "devOptional": true, "license": "MIT" }, "node_modules/data-view-buffer": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz", - "integrity": "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2413,8 +1495,6 @@ }, "node_modules/data-view-byte-length": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz", - "integrity": "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2431,8 +1511,6 @@ }, "node_modules/data-view-byte-offset": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz", - "integrity": "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2449,8 +1527,6 @@ }, "node_modules/debug": { "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", "dev": true, "license": "MIT", "dependencies": { @@ -2467,15 +1543,11 @@ }, "node_modules/deep-is": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "dev": true, "license": "MIT" }, "node_modules/define-data-property": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", - "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", "dev": true, "license": "MIT", "dependencies": { @@ -2492,8 +1564,6 @@ }, "node_modules/define-properties": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", - "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", "dev": true, "license": "MIT", "dependencies": { @@ -2510,8 +1580,6 @@ }, "node_modules/detect-libc": { "version": "2.1.2", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", - "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", "dev": true, "license": "Apache-2.0", "engines": { @@ -2520,8 +1588,6 @@ }, "node_modules/doctrine": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -2533,8 +1599,6 @@ }, "node_modules/dunder-proto": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", "dev": true, "license": "MIT", "dependencies": { @@ -2547,22 +1611,16 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.5.334", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.334.tgz", - "integrity": "sha512-mgjZAz7Jyx1SRCwEpy9wefDS7GvNPazLthHg8eQMJ76wBdGQQDW33TCrUTvQ4wzpmOrv2zrFoD3oNufMdyMpog==", + "version": "1.5.331", "dev": true, "license": "ISC" }, "node_modules/emoji-regex": { "version": "10.6.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", - "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", "license": "MIT" }, "node_modules/environment": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz", - "integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==", "license": "MIT", "engines": { "node": ">=18" @@ -2572,9 +1630,7 @@ } }, "node_modules/es-abstract": { - "version": "1.24.2", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.2.tgz", - "integrity": "sha512-2FpH9Q5i2RRwyEP1AylXe6nYLR5OhaJTZwmlcP0dL/+JCbgg7yyEo/sEK6HeGZRf3dFpWwThaRHVApXSkW3xeg==", + "version": "1.24.1", "dev": true, "license": "MIT", "dependencies": { @@ -2642,8 +1698,6 @@ }, "node_modules/es-define-property": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", "dev": true, "license": "MIT", "engines": { @@ -2652,8 +1706,6 @@ }, "node_modules/es-errors": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", "dev": true, "license": "MIT", "engines": { @@ -2662,8 +1714,6 @@ }, "node_modules/es-iterator-helpers": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.3.1.tgz", - "integrity": "sha512-zWwRvqWiuBPr0muUG/78cW3aHROFCNIQ3zpmYDpwdbnt2m+xlNyRWpHBpa2lJjSBit7BQ+RXA1iwbSmu5yJ/EQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2691,15 +1741,11 @@ }, "node_modules/es-module-lexer": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-2.0.0.tgz", - "integrity": "sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==", "dev": true, "license": "MIT" }, "node_modules/es-object-atoms": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", "dev": true, "license": "MIT", "dependencies": { @@ -2711,8 +1757,6 @@ }, "node_modules/es-set-tostringtag": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", - "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", "dev": true, "license": "MIT", "dependencies": { @@ -2727,8 +1771,6 @@ }, "node_modules/es-shim-unscopables": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz", - "integrity": "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw==", "dev": true, "license": "MIT", "dependencies": { @@ -2740,8 +1782,6 @@ }, "node_modules/es-to-primitive": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz", - "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", "dev": true, "license": "MIT", "dependencies": { @@ -2758,8 +1798,6 @@ }, "node_modules/es-toolkit": { "version": "1.45.1", - "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.45.1.tgz", - "integrity": "sha512-/jhoOj/Fx+A+IIyDNOvO3TItGmlMKhtX8ISAHKE90c4b/k1tqaqEZ+uUqfpU8DMnW5cgNJv606zS55jGvza0Xw==", "license": "MIT", "workspaces": [ "docs", @@ -2767,9 +1805,7 @@ ] }, "node_modules/esbuild": { - "version": "0.27.7", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.7.tgz", - "integrity": "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w==", + "version": "0.27.5", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -2780,63 +1816,47 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.27.7", - "@esbuild/android-arm": "0.27.7", - "@esbuild/android-arm64": "0.27.7", - "@esbuild/android-x64": "0.27.7", - "@esbuild/darwin-arm64": "0.27.7", - "@esbuild/darwin-x64": "0.27.7", - "@esbuild/freebsd-arm64": "0.27.7", - "@esbuild/freebsd-x64": "0.27.7", - "@esbuild/linux-arm": "0.27.7", - "@esbuild/linux-arm64": "0.27.7", - "@esbuild/linux-ia32": "0.27.7", - "@esbuild/linux-loong64": "0.27.7", - "@esbuild/linux-mips64el": "0.27.7", - "@esbuild/linux-ppc64": "0.27.7", - "@esbuild/linux-riscv64": "0.27.7", - "@esbuild/linux-s390x": "0.27.7", - "@esbuild/linux-x64": "0.27.7", - "@esbuild/netbsd-arm64": "0.27.7", - "@esbuild/netbsd-x64": "0.27.7", - "@esbuild/openbsd-arm64": "0.27.7", - "@esbuild/openbsd-x64": "0.27.7", - "@esbuild/openharmony-arm64": "0.27.7", - "@esbuild/sunos-x64": "0.27.7", - "@esbuild/win32-arm64": "0.27.7", - "@esbuild/win32-ia32": "0.27.7", - "@esbuild/win32-x64": "0.27.7" + "@esbuild/aix-ppc64": "0.27.5", + "@esbuild/android-arm": "0.27.5", + "@esbuild/android-arm64": "0.27.5", + "@esbuild/android-x64": "0.27.5", + "@esbuild/darwin-arm64": "0.27.5", + "@esbuild/darwin-x64": "0.27.5", + "@esbuild/freebsd-arm64": "0.27.5", + "@esbuild/freebsd-x64": "0.27.5", + "@esbuild/linux-arm": "0.27.5", + "@esbuild/linux-arm64": "0.27.5", + "@esbuild/linux-ia32": "0.27.5", + "@esbuild/linux-loong64": "0.27.5", + "@esbuild/linux-mips64el": "0.27.5", + "@esbuild/linux-ppc64": "0.27.5", + "@esbuild/linux-riscv64": "0.27.5", + "@esbuild/linux-s390x": "0.27.5", + "@esbuild/linux-x64": "0.27.5", + "@esbuild/netbsd-arm64": "0.27.5", + "@esbuild/netbsd-x64": "0.27.5", + "@esbuild/openbsd-arm64": "0.27.5", + "@esbuild/openbsd-x64": "0.27.5", + "@esbuild/openharmony-arm64": "0.27.5", + "@esbuild/sunos-x64": "0.27.5", + "@esbuild/win32-arm64": "0.27.5", + "@esbuild/win32-ia32": "0.27.5", + "@esbuild/win32-x64": "0.27.5" } }, "node_modules/escalade": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "dev": true, "license": "MIT", "engines": { "node": ">=6" } }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/eslint": { "version": "9.39.4", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.4.tgz", - "integrity": "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", @@ -2893,8 +1913,6 @@ }, "node_modules/eslint-plugin-perfectionist": { "version": "5.8.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-perfectionist/-/eslint-plugin-perfectionist-5.8.0.tgz", - "integrity": "sha512-k8uIptWIxkUclonCFGyDzgYs9NI+Qh0a7cUXS3L7IYZDEsjXuimFBVbxXPQQngWqMiaxJRwbtYB4smMGMqF+cw==", "dev": true, "license": "MIT", "dependencies": { @@ -2910,8 +1928,6 @@ }, "node_modules/eslint-plugin-react": { "version": "7.37.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.5.tgz", - "integrity": "sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA==", "dev": true, "license": "MIT", "dependencies": { @@ -2943,8 +1959,6 @@ }, "node_modules/eslint-plugin-react-hooks": { "version": "7.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz", - "integrity": "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==", "dev": true, "license": "MIT", "dependencies": { @@ -2961,28 +1975,8 @@ "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" } }, - "node_modules/eslint-plugin-react/node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/eslint-plugin-react/node_modules/brace-expansion": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", - "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/eslint-plugin-react/node_modules/minimatch": { "version": "3.1.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", - "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", "dev": true, "license": "ISC", "dependencies": { @@ -2992,10 +1986,22 @@ "node": "*" } }, + "node_modules/eslint-plugin-react/node_modules/minimatch/node_modules/brace-expansion": { + "version": "1.1.13", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint-plugin-react/node_modules/minimatch/node_modules/brace-expansion/node_modules/balanced-match": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, "node_modules/eslint-plugin-react/node_modules/semver": { "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "license": "ISC", "bin": { @@ -3004,8 +2010,6 @@ }, "node_modules/eslint-plugin-unused-imports": { "version": "4.4.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-unused-imports/-/eslint-plugin-unused-imports-4.4.1.tgz", - "integrity": "sha512-oZGYUz1X3sRMGUB+0cZyK2VcvRX5lm/vB56PgNNcU+7ficUCKm66oZWKUubXWnOuPjQ8PvmXtCViXBMONPe7tQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -3020,8 +2024,6 @@ }, "node_modules/eslint-scope": { "version": "8.4.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", - "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", "dev": true, "license": "BSD-2-Clause", "dependencies": { @@ -3035,30 +2037,27 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "node_modules/eslint/node_modules/ansi-styles": { + "version": "4.3.0", "dev": true, - "license": "Apache-2.0", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": ">=8" }, "funding": { - "url": "https://opencollective.com/eslint" + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, "node_modules/eslint/node_modules/balanced-match": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true, "license": "MIT" }, "node_modules/eslint/node_modules/brace-expansion": { "version": "1.1.13", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", - "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", "dev": true, "license": "MIT", "dependencies": { @@ -3066,10 +2065,34 @@ "concat-map": "0.0.1" } }, + "node_modules/eslint/node_modules/chalk": { + "version": "4.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/eslint/node_modules/escape-string-regexp": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/eslint/node_modules/eslint-visitor-keys": { "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", - "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", "dev": true, "license": "Apache-2.0", "engines": { @@ -3081,8 +2104,6 @@ }, "node_modules/eslint/node_modules/ignore": { "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", "dev": true, "license": "MIT", "engines": { @@ -3091,8 +2112,6 @@ }, "node_modules/eslint/node_modules/minimatch": { "version": "3.1.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", - "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", "dev": true, "license": "ISC", "dependencies": { @@ -3104,8 +2123,6 @@ }, "node_modules/espree": { "version": "10.4.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", - "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", "dev": true, "license": "BSD-2-Clause", "dependencies": { @@ -3122,8 +2139,6 @@ }, "node_modules/espree/node_modules/eslint-visitor-keys": { "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", - "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", "dev": true, "license": "Apache-2.0", "engines": { @@ -3135,8 +2150,6 @@ }, "node_modules/esquery": { "version": "1.7.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", - "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", "dev": true, "license": "BSD-3-Clause", "dependencies": { @@ -3148,8 +2161,6 @@ }, "node_modules/esrecurse": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "license": "BSD-2-Clause", "dependencies": { @@ -3161,8 +2172,6 @@ }, "node_modules/estraverse": { "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "dev": true, "license": "BSD-2-Clause", "engines": { @@ -3171,8 +2180,6 @@ }, "node_modules/estree-walker": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", "dev": true, "license": "MIT", "dependencies": { @@ -3181,8 +2188,6 @@ }, "node_modules/esutils": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", "dev": true, "license": "BSD-2-Clause", "engines": { @@ -3191,8 +2196,6 @@ }, "node_modules/expect-type": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", - "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -3201,29 +2204,21 @@ }, "node_modules/fast-deep-equal": { "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", "dev": true, "license": "MIT" }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true, "license": "MIT" }, "node_modules/fast-levenshtein": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", "dev": true, "license": "MIT" }, "node_modules/fdir": { "version": "6.5.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", - "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", "dev": true, "license": "MIT", "engines": { @@ -3240,8 +2235,6 @@ }, "node_modules/file-entry-cache": { "version": "8.0.0", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", - "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3253,8 +2246,6 @@ }, "node_modules/find-up": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, "license": "MIT", "dependencies": { @@ -3270,8 +2261,6 @@ }, "node_modules/flat-cache": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", - "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, "license": "MIT", "dependencies": { @@ -3284,15 +2273,11 @@ }, "node_modules/flatted": { "version": "3.4.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz", - "integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==", "dev": true, "license": "ISC" }, "node_modules/for-each": { "version": "0.3.5", - "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", - "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", "dev": true, "license": "MIT", "dependencies": { @@ -3305,25 +2290,8 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, "node_modules/function-bind": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "dev": true, "license": "MIT", "funding": { @@ -3332,8 +2300,6 @@ }, "node_modules/function.prototype.name": { "version": "1.1.8", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz", - "integrity": "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==", "dev": true, "license": "MIT", "dependencies": { @@ -3353,8 +2319,6 @@ }, "node_modules/functions-have-names": { "version": "1.2.3", - "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", - "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", "dev": true, "license": "MIT", "funding": { @@ -3363,8 +2327,6 @@ }, "node_modules/generator-function": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", - "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", "dev": true, "license": "MIT", "engines": { @@ -3373,8 +2335,6 @@ }, "node_modules/gensync": { "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", "dev": true, "license": "MIT", "engines": { @@ -3383,8 +2343,6 @@ }, "node_modules/get-east-asian-width": { "version": "1.5.0", - "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz", - "integrity": "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==", "license": "MIT", "engines": { "node": ">=18" @@ -3395,8 +2353,6 @@ }, "node_modules/get-intrinsic": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3420,8 +2376,6 @@ }, "node_modules/get-proto": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", "dev": true, "license": "MIT", "dependencies": { @@ -3434,8 +2388,6 @@ }, "node_modules/get-symbol-description": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz", - "integrity": "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==", "dev": true, "license": "MIT", "dependencies": { @@ -3452,8 +2404,6 @@ }, "node_modules/get-tsconfig": { "version": "4.13.7", - "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.7.tgz", - "integrity": "sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q==", "dev": true, "license": "MIT", "dependencies": { @@ -3465,8 +2415,6 @@ }, "node_modules/glob-parent": { "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", "dev": true, "license": "ISC", "dependencies": { @@ -3478,8 +2426,6 @@ }, "node_modules/globals": { "version": "16.5.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz", - "integrity": "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==", "dev": true, "license": "MIT", "engines": { @@ -3491,8 +2437,6 @@ }, "node_modules/globalthis": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", - "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3508,8 +2452,6 @@ }, "node_modules/gopd": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", "dev": true, "license": "MIT", "engines": { @@ -3521,8 +2463,6 @@ }, "node_modules/has-bigints": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", - "integrity": "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==", "dev": true, "license": "MIT", "engines": { @@ -3534,8 +2474,6 @@ }, "node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, "license": "MIT", "engines": { @@ -3544,8 +2482,6 @@ }, "node_modules/has-property-descriptors": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", - "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dev": true, "license": "MIT", "dependencies": { @@ -3557,8 +2493,6 @@ }, "node_modules/has-proto": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz", - "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3573,8 +2507,6 @@ }, "node_modules/has-symbols": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", "dev": true, "license": "MIT", "engines": { @@ -3586,8 +2518,6 @@ }, "node_modules/has-tostringtag": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", - "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", "dev": true, "license": "MIT", "dependencies": { @@ -3602,8 +2532,6 @@ }, "node_modules/hasown": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3615,15 +2543,11 @@ }, "node_modules/hermes-estree": { "version": "0.25.1", - "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz", - "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", "dev": true, "license": "MIT" }, "node_modules/hermes-parser": { "version": "0.25.1", - "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz", - "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", "dev": true, "license": "MIT", "dependencies": { @@ -3632,8 +2556,6 @@ }, "node_modules/ignore": { "version": "7.0.5", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", - "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", "dev": true, "license": "MIT", "engines": { @@ -3642,8 +2564,6 @@ }, "node_modules/import-fresh": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", - "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3659,8 +2579,6 @@ }, "node_modules/imurmurhash": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", "dev": true, "license": "MIT", "engines": { @@ -3669,8 +2587,6 @@ }, "node_modules/indent-string": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz", - "integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==", "license": "MIT", "engines": { "node": ">=12" @@ -3681,8 +2597,6 @@ }, "node_modules/ink": { "version": "6.8.0", - "resolved": "https://registry.npmjs.org/ink/-/ink-6.8.0.tgz", - "integrity": "sha512-sbl1RdLOgkO9isK42WCZlJCFN9hb++sX9dsklOvfd1YQ3bQ2AiFu12Q6tFlr0HvEUvzraJntQCCpfEoUe9DSzA==", "license": "MIT", "dependencies": { "@alcalzone/ansi-tokenize": "^0.2.4", @@ -3730,8 +2644,6 @@ }, "node_modules/ink-text-input": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/ink-text-input/-/ink-text-input-6.0.0.tgz", - "integrity": "sha512-Fw64n7Yha5deb1rHY137zHTAbSTNelUKuB5Kkk2HACXEtwIHBCf9OH2tP/LQ9fRYTl1F0dZgbW0zPnZk6FA9Lw==", "license": "MIT", "dependencies": { "chalk": "^5.3.0", @@ -3747,8 +2659,6 @@ }, "node_modules/ink-text-input/node_modules/chalk": { "version": "5.6.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", - "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", "license": "MIT", "engines": { "node": "^12.17.0 || ^14.13 || >=16.0.0" @@ -3759,8 +2669,6 @@ }, "node_modules/ink-text-input/node_modules/type-fest": { "version": "4.41.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", - "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=16" @@ -3769,22 +2677,8 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/ink/node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, "node_modules/ink/node_modules/chalk": { "version": "5.6.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", - "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", "license": "MIT", "engines": { "node": "^12.17.0 || ^14.13 || >=16.0.0" @@ -3793,10 +2687,35 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/ink/node_modules/string-width": { + "version": "8.2.0", + "license": "MIT", + "dependencies": { + "get-east-asian-width": "^1.5.0", + "strip-ansi": "^7.1.2" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ink/node_modules/type-fest": { + "version": "5.5.0", + "license": "(MIT OR CC0-1.0)", + "dependencies": { + "tagged-tag": "^1.0.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/internal-slot": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", - "integrity": "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==", "dev": true, "license": "MIT", "dependencies": { @@ -3810,8 +2729,6 @@ }, "node_modules/is-array-buffer": { "version": "3.0.5", - "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz", - "integrity": "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==", "dev": true, "license": "MIT", "dependencies": { @@ -3828,8 +2745,6 @@ }, "node_modules/is-async-function": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", - "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3848,8 +2763,6 @@ }, "node_modules/is-bigint": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz", - "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3864,8 +2777,6 @@ }, "node_modules/is-boolean-object": { "version": "1.2.2", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz", - "integrity": "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==", "dev": true, "license": "MIT", "dependencies": { @@ -3881,8 +2792,6 @@ }, "node_modules/is-callable": { "version": "1.2.7", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", - "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", "dev": true, "license": "MIT", "engines": { @@ -3894,8 +2803,6 @@ }, "node_modules/is-core-module": { "version": "2.16.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", - "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", "dev": true, "license": "MIT", "dependencies": { @@ -3910,8 +2817,6 @@ }, "node_modules/is-data-view": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz", - "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", "dev": true, "license": "MIT", "dependencies": { @@ -3928,8 +2833,6 @@ }, "node_modules/is-date-object": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz", - "integrity": "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==", "dev": true, "license": "MIT", "dependencies": { @@ -3945,8 +2848,6 @@ }, "node_modules/is-extglob": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, "license": "MIT", "engines": { @@ -3955,8 +2856,6 @@ }, "node_modules/is-finalizationregistry": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz", - "integrity": "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==", "dev": true, "license": "MIT", "dependencies": { @@ -3971,8 +2870,6 @@ }, "node_modules/is-fullwidth-code-point": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", - "integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==", "license": "MIT", "dependencies": { "get-east-asian-width": "^1.3.1" @@ -3986,8 +2883,6 @@ }, "node_modules/is-generator-function": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz", - "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==", "dev": true, "license": "MIT", "dependencies": { @@ -4006,8 +2901,6 @@ }, "node_modules/is-glob": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, "license": "MIT", "dependencies": { @@ -4019,8 +2912,6 @@ }, "node_modules/is-in-ci": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-in-ci/-/is-in-ci-2.0.0.tgz", - "integrity": "sha512-cFeerHriAnhrQSbpAxL37W1wcJKUUX07HyLWZCW1URJT/ra3GyUTzBgUnh24TMVfNTV2Hij2HLxkPHFZfOZy5w==", "license": "MIT", "bin": { "is-in-ci": "cli.js" @@ -4034,8 +2925,6 @@ }, "node_modules/is-map": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", - "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", "dev": true, "license": "MIT", "engines": { @@ -4047,8 +2936,6 @@ }, "node_modules/is-negative-zero": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", - "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", "dev": true, "license": "MIT", "engines": { @@ -4060,8 +2947,6 @@ }, "node_modules/is-number-object": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz", - "integrity": "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==", "dev": true, "license": "MIT", "dependencies": { @@ -4077,8 +2962,6 @@ }, "node_modules/is-regex": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", - "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", "dev": true, "license": "MIT", "dependencies": { @@ -4096,8 +2979,6 @@ }, "node_modules/is-set": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", - "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", "dev": true, "license": "MIT", "engines": { @@ -4109,8 +2990,6 @@ }, "node_modules/is-shared-array-buffer": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz", - "integrity": "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==", "dev": true, "license": "MIT", "dependencies": { @@ -4125,8 +3004,6 @@ }, "node_modules/is-string": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz", - "integrity": "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==", "dev": true, "license": "MIT", "dependencies": { @@ -4142,8 +3019,6 @@ }, "node_modules/is-symbol": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz", - "integrity": "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==", "dev": true, "license": "MIT", "dependencies": { @@ -4160,8 +3035,6 @@ }, "node_modules/is-typed-array": { "version": "1.1.15", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", - "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", "dev": true, "license": "MIT", "dependencies": { @@ -4176,8 +3049,6 @@ }, "node_modules/is-weakmap": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", - "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", "dev": true, "license": "MIT", "engines": { @@ -4189,8 +3060,6 @@ }, "node_modules/is-weakref": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz", - "integrity": "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==", "dev": true, "license": "MIT", "dependencies": { @@ -4205,8 +3074,6 @@ }, "node_modules/is-weakset": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz", - "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==", "dev": true, "license": "MIT", "dependencies": { @@ -4222,22 +3089,16 @@ }, "node_modules/isarray": { "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", "dev": true, "license": "MIT" }, "node_modules/isexe": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true, "license": "ISC" }, "node_modules/iterator.prototype": { "version": "1.1.5", - "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.5.tgz", - "integrity": "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==", "dev": true, "license": "MIT", "dependencies": { @@ -4254,15 +3115,11 @@ }, "node_modules/js-tokens": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", "dev": true, "license": "MIT" }, "node_modules/js-yaml": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", - "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -4274,8 +3131,6 @@ }, "node_modules/jsesc": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", - "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", "dev": true, "license": "MIT", "bin": { @@ -4287,29 +3142,21 @@ }, "node_modules/json-buffer": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", "dev": true, "license": "MIT" }, "node_modules/json-schema-traverse": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true, "license": "MIT" }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true, "license": "MIT" }, "node_modules/json5": { "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "dev": true, "license": "MIT", "bin": { @@ -4321,8 +3168,6 @@ }, "node_modules/jsx-ast-utils": { "version": "3.3.5", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", - "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", "dev": true, "license": "MIT", "dependencies": { @@ -4337,8 +3182,6 @@ }, "node_modules/keyv": { "version": "4.5.4", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", - "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, "license": "MIT", "dependencies": { @@ -4347,8 +3190,6 @@ }, "node_modules/levn": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", "dev": true, "license": "MIT", "dependencies": { @@ -4361,8 +3202,6 @@ }, "node_modules/lightningcss": { "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.32.0.tgz", - "integrity": "sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==", "dev": true, "license": "MPL-2.0", "dependencies": { @@ -4389,157 +3228,8 @@ "lightningcss-win32-x64-msvc": "1.32.0" } }, - "node_modules/lightningcss-android-arm64": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.32.0.tgz", - "integrity": "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/lightningcss-darwin-arm64": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.32.0.tgz", - "integrity": "sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/lightningcss-darwin-x64": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.32.0.tgz", - "integrity": "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/lightningcss-freebsd-x64": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.32.0.tgz", - "integrity": "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/lightningcss-linux-arm-gnueabihf": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.32.0.tgz", - "integrity": "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/lightningcss-linux-arm64-gnu": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.32.0.tgz", - "integrity": "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/lightningcss-linux-arm64-musl": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.32.0.tgz", - "integrity": "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, "node_modules/lightningcss-linux-x64-gnu": { "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.32.0.tgz", - "integrity": "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==", "cpu": [ "x64" ], @@ -4559,8 +3249,6 @@ }, "node_modules/lightningcss-linux-x64-musl": { "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.32.0.tgz", - "integrity": "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==", "cpu": [ "x64" ], @@ -4578,52 +3266,8 @@ "url": "https://opencollective.com/parcel" } }, - "node_modules/lightningcss-win32-arm64-msvc": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.32.0.tgz", - "integrity": "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/lightningcss-win32-x64-msvc": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.32.0.tgz", - "integrity": "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MPL-2.0", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 12.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, "node_modules/locate-path": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, "license": "MIT", "dependencies": { @@ -4638,15 +3282,11 @@ }, "node_modules/lodash.merge": { "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true, "license": "MIT" }, "node_modules/loose-envify": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", "dev": true, "license": "MIT", "dependencies": { @@ -4658,8 +3298,6 @@ }, "node_modules/lru-cache": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, "license": "ISC", "dependencies": { @@ -4668,8 +3306,6 @@ }, "node_modules/magic-string": { "version": "0.30.21", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", - "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", "dev": true, "license": "MIT", "dependencies": { @@ -4678,8 +3314,6 @@ }, "node_modules/math-intrinsics": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", "dev": true, "license": "MIT", "engines": { @@ -4688,40 +3322,18 @@ }, "node_modules/mimic-fn": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "license": "MIT", "engines": { "node": ">=6" } }, - "node_modules/minimatch": { - "version": "10.2.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz", - "integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "brace-expansion": "^5.0.5" - }, - "engines": { - "node": "18 || 20 || >=22" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/ms": { "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true, "license": "MIT" }, "node_modules/nanoid": { "version": "3.3.11", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", - "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", "dev": true, "funding": [ { @@ -4739,15 +3351,11 @@ }, "node_modules/natural-compare": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", "dev": true, "license": "MIT" }, "node_modules/natural-orderby": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/natural-orderby/-/natural-orderby-5.0.0.tgz", - "integrity": "sha512-kKHJhxwpR/Okycz4HhQKKlhWe4ASEfPgkSWNmKFHd7+ezuQlxkA5cM3+XkBPvm1gmHen3w53qsYAv+8GwRrBlg==", "dev": true, "license": "MIT", "engines": { @@ -4756,8 +3364,6 @@ }, "node_modules/node-exports-info": { "version": "1.6.0", - "resolved": "https://registry.npmjs.org/node-exports-info/-/node-exports-info-1.6.0.tgz", - "integrity": "sha512-pyFS63ptit/P5WqUkt+UUfe+4oevH+bFeIiPPdfb0pFeYEu/1ELnJu5l+5EcTKYL5M7zaAa7S8ddywgXypqKCw==", "dev": true, "license": "MIT", "dependencies": { @@ -4775,8 +3381,6 @@ }, "node_modules/node-exports-info/node_modules/semver": { "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "license": "ISC", "bin": { @@ -4785,15 +3389,11 @@ }, "node_modules/node-releases": { "version": "2.0.37", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.37.tgz", - "integrity": "sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg==", "dev": true, "license": "MIT" }, "node_modules/object-assign": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", "dev": true, "license": "MIT", "engines": { @@ -4802,8 +3402,6 @@ }, "node_modules/object-inspect": { "version": "1.13.4", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", - "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", "dev": true, "license": "MIT", "engines": { @@ -4815,8 +3413,6 @@ }, "node_modules/object-keys": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", "dev": true, "license": "MIT", "engines": { @@ -4825,8 +3421,6 @@ }, "node_modules/object.assign": { "version": "4.1.7", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", - "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", "dev": true, "license": "MIT", "dependencies": { @@ -4846,8 +3440,6 @@ }, "node_modules/object.entries": { "version": "1.1.9", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.9.tgz", - "integrity": "sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw==", "dev": true, "license": "MIT", "dependencies": { @@ -4862,8 +3454,6 @@ }, "node_modules/object.fromentries": { "version": "2.0.8", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", - "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", "dev": true, "license": "MIT", "dependencies": { @@ -4881,8 +3471,6 @@ }, "node_modules/object.values": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.1.tgz", - "integrity": "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==", "dev": true, "license": "MIT", "dependencies": { @@ -4900,8 +3488,6 @@ }, "node_modules/obug": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", - "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", "dev": true, "funding": [ "https://github.com/sponsors/sxzz", @@ -4911,8 +3497,6 @@ }, "node_modules/onetime": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "license": "MIT", "dependencies": { "mimic-fn": "^2.1.0" @@ -4926,8 +3510,6 @@ }, "node_modules/optionator": { "version": "0.9.4", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", - "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", "dev": true, "license": "MIT", "dependencies": { @@ -4944,8 +3526,6 @@ }, "node_modules/own-keys": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", - "integrity": "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==", "dev": true, "license": "MIT", "dependencies": { @@ -4962,8 +3542,6 @@ }, "node_modules/p-limit": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, "license": "MIT", "dependencies": { @@ -4978,8 +3556,6 @@ }, "node_modules/p-locate": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, "license": "MIT", "dependencies": { @@ -4994,8 +3570,6 @@ }, "node_modules/parent-module": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, "license": "MIT", "dependencies": { @@ -5007,8 +3581,6 @@ }, "node_modules/patch-console": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/patch-console/-/patch-console-2.0.0.tgz", - "integrity": "sha512-0YNdUceMdaQwoKce1gatDScmMo5pu/tfABfnzEqeG0gtTmd7mh/WcwgUjtAeOU7N8nFFlbQBnFK2gXW5fGvmMA==", "license": "MIT", "engines": { "node": "^12.20.0 || ^14.13.1 || >=16.0.0" @@ -5016,8 +3588,6 @@ }, "node_modules/path-exists": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, "license": "MIT", "engines": { @@ -5026,8 +3596,6 @@ }, "node_modules/path-key": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, "license": "MIT", "engines": { @@ -5036,31 +3604,24 @@ }, "node_modules/path-parse": { "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", "dev": true, "license": "MIT" }, "node_modules/pathe": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", - "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", "dev": true, "license": "MIT" }, "node_modules/picocolors": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", "dev": true, "license": "ISC" }, "node_modules/picomatch": { "version": "4.0.4", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", - "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -5070,8 +3631,6 @@ }, "node_modules/possible-typed-array-names": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", - "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", "dev": true, "license": "MIT", "engines": { @@ -5080,8 +3639,6 @@ }, "node_modules/postcss": { "version": "8.5.9", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.9.tgz", - "integrity": "sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw==", "dev": true, "funding": [ { @@ -5109,8 +3666,6 @@ }, "node_modules/prelude-ls": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", "dev": true, "license": "MIT", "engines": { @@ -5119,8 +3674,6 @@ }, "node_modules/prettier": { "version": "3.8.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.1.tgz", - "integrity": "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==", "dev": true, "license": "MIT", "bin": { @@ -5135,8 +3688,6 @@ }, "node_modules/prop-types": { "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", "dev": true, "license": "MIT", "dependencies": { @@ -5147,8 +3698,6 @@ }, "node_modules/punycode": { "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, "license": "MIT", "engines": { @@ -5156,25 +3705,20 @@ } }, "node_modules/react": { - "version": "19.2.5", - "resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz", - "integrity": "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA==", + "version": "19.2.4", "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } }, "node_modules/react-is": { "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", "dev": true, "license": "MIT" }, "node_modules/react-reconciler": { "version": "0.33.0", - "resolved": "https://registry.npmjs.org/react-reconciler/-/react-reconciler-0.33.0.tgz", - "integrity": "sha512-KetWRytFv1epdpJc3J4G75I4WrplZE5jOL7Yq0p34+OVOKF4Se7WrdIdVC45XsSSmUTlht2FM/fM1FZb1mfQeA==", "license": "MIT", "dependencies": { "scheduler": "^0.27.0" @@ -5188,8 +3732,6 @@ }, "node_modules/reflect.getprototypeof": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", - "integrity": "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==", "dev": true, "license": "MIT", "dependencies": { @@ -5211,8 +3753,6 @@ }, "node_modules/regexp.prototype.flags": { "version": "1.5.4", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz", - "integrity": "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==", "dev": true, "license": "MIT", "dependencies": { @@ -5232,8 +3772,6 @@ }, "node_modules/resolve": { "version": "2.0.0-next.6", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.6.tgz", - "integrity": "sha512-3JmVl5hMGtJ3kMmB3zi3DL25KfkCEyy3Tw7Gmw7z5w8M9WlwoPFnIvwChzu1+cF3iaK3sp18hhPz8ANeimdJfA==", "dev": true, "license": "MIT", "dependencies": { @@ -5256,8 +3794,6 @@ }, "node_modules/resolve-from": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, "license": "MIT", "engines": { @@ -5266,8 +3802,6 @@ }, "node_modules/resolve-pkg-maps": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", - "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", "dev": true, "license": "MIT", "funding": { @@ -5276,8 +3810,6 @@ }, "node_modules/restore-cursor": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-4.0.0.tgz", - "integrity": "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==", "license": "MIT", "dependencies": { "onetime": "^5.1.0", @@ -5291,14 +3823,12 @@ } }, "node_modules/rolldown": { - "version": "1.0.0-rc.15", - "resolved": "https://registry.npmjs.org/rolldown/-/rolldown-1.0.0-rc.15.tgz", - "integrity": "sha512-Ff31guA5zT6WjnGp0SXw76X6hzGRk/OQq2hE+1lcDe+lJdHSgnSX6nK3erbONHyCbpSj9a9E+uX/OvytZoWp2g==", + "version": "1.0.0-rc.13", "dev": true, "license": "MIT", "dependencies": { - "@oxc-project/types": "=0.124.0", - "@rolldown/pluginutils": "1.0.0-rc.15" + "@oxc-project/types": "=0.123.0", + "@rolldown/pluginutils": "1.0.0-rc.13" }, "bin": { "rolldown": "bin/cli.mjs" @@ -5307,27 +3837,25 @@ "node": "^20.19.0 || >=22.12.0" }, "optionalDependencies": { - "@rolldown/binding-android-arm64": "1.0.0-rc.15", - "@rolldown/binding-darwin-arm64": "1.0.0-rc.15", - "@rolldown/binding-darwin-x64": "1.0.0-rc.15", - "@rolldown/binding-freebsd-x64": "1.0.0-rc.15", - "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-rc.15", - "@rolldown/binding-linux-arm64-gnu": "1.0.0-rc.15", - "@rolldown/binding-linux-arm64-musl": "1.0.0-rc.15", - "@rolldown/binding-linux-ppc64-gnu": "1.0.0-rc.15", - "@rolldown/binding-linux-s390x-gnu": "1.0.0-rc.15", - "@rolldown/binding-linux-x64-gnu": "1.0.0-rc.15", - "@rolldown/binding-linux-x64-musl": "1.0.0-rc.15", - "@rolldown/binding-openharmony-arm64": "1.0.0-rc.15", - "@rolldown/binding-wasm32-wasi": "1.0.0-rc.15", - "@rolldown/binding-win32-arm64-msvc": "1.0.0-rc.15", - "@rolldown/binding-win32-x64-msvc": "1.0.0-rc.15" + "@rolldown/binding-android-arm64": "1.0.0-rc.13", + "@rolldown/binding-darwin-arm64": "1.0.0-rc.13", + "@rolldown/binding-darwin-x64": "1.0.0-rc.13", + "@rolldown/binding-freebsd-x64": "1.0.0-rc.13", + "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-rc.13", + "@rolldown/binding-linux-arm64-gnu": "1.0.0-rc.13", + "@rolldown/binding-linux-arm64-musl": "1.0.0-rc.13", + "@rolldown/binding-linux-ppc64-gnu": "1.0.0-rc.13", + "@rolldown/binding-linux-s390x-gnu": "1.0.0-rc.13", + "@rolldown/binding-linux-x64-gnu": "1.0.0-rc.13", + "@rolldown/binding-linux-x64-musl": "1.0.0-rc.13", + "@rolldown/binding-openharmony-arm64": "1.0.0-rc.13", + "@rolldown/binding-wasm32-wasi": "1.0.0-rc.13", + "@rolldown/binding-win32-arm64-msvc": "1.0.0-rc.13", + "@rolldown/binding-win32-x64-msvc": "1.0.0-rc.13" } }, "node_modules/safe-array-concat": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", - "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", "dev": true, "license": "MIT", "dependencies": { @@ -5346,8 +3874,6 @@ }, "node_modules/safe-push-apply": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz", - "integrity": "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==", "dev": true, "license": "MIT", "dependencies": { @@ -5363,8 +3889,6 @@ }, "node_modules/safe-regex-test": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", - "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", "dev": true, "license": "MIT", "dependencies": { @@ -5381,27 +3905,10 @@ }, "node_modules/scheduler": { "version": "0.27.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", - "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", "license": "MIT" }, - "node_modules/semver": { - "version": "7.7.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", - "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/set-function-length": { "version": "1.2.2", - "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", - "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", "dev": true, "license": "MIT", "dependencies": { @@ -5418,8 +3925,6 @@ }, "node_modules/set-function-name": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", - "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", "dev": true, "license": "MIT", "dependencies": { @@ -5434,8 +3939,6 @@ }, "node_modules/set-proto": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz", - "integrity": "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==", "dev": true, "license": "MIT", "dependencies": { @@ -5449,8 +3952,6 @@ }, "node_modules/shebang-command": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, "license": "MIT", "dependencies": { @@ -5462,8 +3963,6 @@ }, "node_modules/shebang-regex": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true, "license": "MIT", "engines": { @@ -5472,8 +3971,6 @@ }, "node_modules/side-channel": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", - "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", "dev": true, "license": "MIT", "dependencies": { @@ -5491,14 +3988,12 @@ } }, "node_modules/side-channel-list": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.1.tgz", - "integrity": "sha512-mjn/0bi/oUURjc5Xl7IaWi/OJJJumuoJFQJfDDyO46+hBWsfaVM65TBHq2eoZBhzl9EchxOijpkbRC8SVBQU0w==", + "version": "1.0.0", "dev": true, "license": "MIT", "dependencies": { "es-errors": "^1.3.0", - "object-inspect": "^1.13.4" + "object-inspect": "^1.13.3" }, "engines": { "node": ">= 0.4" @@ -5509,8 +4004,6 @@ }, "node_modules/side-channel-map": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", - "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", "dev": true, "license": "MIT", "dependencies": { @@ -5528,8 +4021,6 @@ }, "node_modules/side-channel-weakmap": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", - "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", "dev": true, "license": "MIT", "dependencies": { @@ -5548,21 +4039,15 @@ }, "node_modules/siginfo": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", - "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", "dev": true, "license": "ISC" }, "node_modules/signal-exit": { "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "license": "ISC" }, "node_modules/slice-ansi": { "version": "8.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-8.0.0.tgz", - "integrity": "sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg==", "license": "MIT", "dependencies": { "ansi-styles": "^6.2.3", @@ -5575,22 +4060,8 @@ "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, - "node_modules/slice-ansi/node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, "node_modules/source-map-js": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", - "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", "dev": true, "license": "BSD-3-Clause", "engines": { @@ -5599,8 +4070,6 @@ }, "node_modules/stack-utils": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", - "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", "license": "MIT", "dependencies": { "escape-string-regexp": "^2.0.0" @@ -5611,8 +4080,6 @@ }, "node_modules/stack-utils/node_modules/escape-string-regexp": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", "license": "MIT", "engines": { "node": ">=8" @@ -5620,22 +4087,16 @@ }, "node_modules/stackback": { "version": "0.0.2", - "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", - "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", "dev": true, "license": "MIT" }, "node_modules/std-env": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-4.0.0.tgz", - "integrity": "sha512-zUMPtQ/HBY3/50VbpkupYHbRroTRZJPRLvreamgErJVys0ceuzMkD44J/QjqhHjOzK42GQ3QZIeFG1OYfOtKqQ==", "dev": true, "license": "MIT" }, "node_modules/stop-iteration-iterator": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz", - "integrity": "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==", "dev": true, "license": "MIT", "dependencies": { @@ -5646,26 +4107,8 @@ "node": ">= 0.4" } }, - "node_modules/string-width": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.2.0.tgz", - "integrity": "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw==", - "license": "MIT", - "dependencies": { - "get-east-asian-width": "^1.5.0", - "strip-ansi": "^7.1.2" - }, - "engines": { - "node": ">=20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/string.prototype.matchall": { "version": "4.0.12", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", - "integrity": "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==", "dev": true, "license": "MIT", "dependencies": { @@ -5692,8 +4135,6 @@ }, "node_modules/string.prototype.repeat": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz", - "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==", "dev": true, "license": "MIT", "dependencies": { @@ -5703,8 +4144,6 @@ }, "node_modules/string.prototype.trim": { "version": "1.2.10", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", - "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", "dev": true, "license": "MIT", "dependencies": { @@ -5725,8 +4164,6 @@ }, "node_modules/string.prototype.trimend": { "version": "1.0.9", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz", - "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", "dev": true, "license": "MIT", "dependencies": { @@ -5744,8 +4181,6 @@ }, "node_modules/string.prototype.trimstart": { "version": "1.0.8", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", - "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", "dev": true, "license": "MIT", "dependencies": { @@ -5762,8 +4197,6 @@ }, "node_modules/strip-ansi": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", - "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", "license": "MIT", "dependencies": { "ansi-regex": "^6.2.2" @@ -5777,8 +4210,6 @@ }, "node_modules/strip-json-comments": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, "license": "MIT", "engines": { @@ -5790,8 +4221,6 @@ }, "node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, "license": "MIT", "dependencies": { @@ -5803,8 +4232,6 @@ }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", "dev": true, "license": "MIT", "engines": { @@ -5816,8 +4243,6 @@ }, "node_modules/tagged-tag": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/tagged-tag/-/tagged-tag-1.0.0.tgz", - "integrity": "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==", "license": "MIT", "engines": { "node": ">=20" @@ -5828,8 +4253,6 @@ }, "node_modules/terminal-size": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/terminal-size/-/terminal-size-4.0.1.tgz", - "integrity": "sha512-avMLDQpUI9I5XFrklECw1ZEUPJhqzcwSWsyyI8blhRLT+8N1jLJWLWWYQpB2q2xthq8xDvjZPISVh53T/+CLYQ==", "license": "MIT", "engines": { "node": ">=18" @@ -5840,15 +4263,11 @@ }, "node_modules/tinybench": { "version": "2.9.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", - "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", "dev": true, "license": "MIT" }, "node_modules/tinyexec": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.1.1.tgz", - "integrity": "sha512-VKS/ZaQhhkKFMANmAOhhXVoIfBXblQxGX1myCQ2faQrfmobMftXeJPcZGp0gS07ocvGJWDLZGyOZDadDBqYIJg==", "dev": true, "license": "MIT", "engines": { @@ -5856,14 +4275,12 @@ } }, "node_modules/tinyglobby": { - "version": "0.2.16", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.16.tgz", - "integrity": "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg==", + "version": "0.2.15", "dev": true, "license": "MIT", "dependencies": { "fdir": "^6.5.0", - "picomatch": "^4.0.4" + "picomatch": "^4.0.3" }, "engines": { "node": ">=12.0.0" @@ -5874,8 +4291,6 @@ }, "node_modules/tinyrainbow": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.1.0.tgz", - "integrity": "sha512-Bf+ILmBgretUrdJxzXM0SgXLZ3XfiaUuOj/IKQHuTXip+05Xn+uyEYdVg0kYDipTBcLrCVyUzAPz7QmArb0mmw==", "dev": true, "license": "MIT", "engines": { @@ -5884,8 +4299,6 @@ }, "node_modules/ts-api-utils": { "version": "2.5.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.5.0.tgz", - "integrity": "sha512-OJ/ibxhPlqrMM0UiNHJ/0CKQkoKF243/AEmplt3qpRgkW8VG7IfOS41h7V8TjITqdByHzrjcS/2si+y4lIh8NA==", "dev": true, "license": "MIT", "engines": { @@ -5895,20 +4308,11 @@ "typescript": ">=4.8.4" } }, - "node_modules/tslib": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", - "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", - "dev": true, - "license": "0BSD", - "optional": true - }, "node_modules/tsx": { "version": "4.21.0", - "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", - "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" @@ -5925,8 +4329,6 @@ }, "node_modules/type-check": { "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", "dev": true, "license": "MIT", "dependencies": { @@ -5936,25 +4338,8 @@ "node": ">= 0.8.0" } }, - "node_modules/type-fest": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-5.5.0.tgz", - "integrity": "sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g==", - "license": "(MIT OR CC0-1.0)", - "dependencies": { - "tagged-tag": "^1.0.0" - }, - "engines": { - "node": ">=20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/typed-array-buffer": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", - "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", "dev": true, "license": "MIT", "dependencies": { @@ -5968,8 +4353,6 @@ }, "node_modules/typed-array-byte-length": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz", - "integrity": "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==", "dev": true, "license": "MIT", "dependencies": { @@ -5988,8 +4371,6 @@ }, "node_modules/typed-array-byte-offset": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz", - "integrity": "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==", "dev": true, "license": "MIT", "dependencies": { @@ -6010,8 +4391,6 @@ }, "node_modules/typed-array-length": { "version": "1.0.7", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz", - "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", "dev": true, "license": "MIT", "dependencies": { @@ -6031,10 +4410,9 @@ }, "node_modules/typescript": { "version": "5.9.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", - "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -6045,8 +4423,6 @@ }, "node_modules/unbox-primitive": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz", - "integrity": "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==", "dev": true, "license": "MIT", "dependencies": { @@ -6064,15 +4440,11 @@ }, "node_modules/undici-types": { "version": "7.18.2", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz", - "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==", "dev": true, "license": "MIT" }, "node_modules/unicode-animations": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/unicode-animations/-/unicode-animations-1.0.3.tgz", - "integrity": "sha512-+klB2oWwcYZjYWhwP4Pr8UZffWDFVx6jKeIahE6z0QYyM2dwDeDPyn5nevCYbyotxvtT9lh21cVURO1RX0+YMg==", "hasInstallScript": true, "license": "MIT", "dependencies": { @@ -6084,8 +4456,6 @@ }, "node_modules/update-browserslist-db": { "version": "1.2.3", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", - "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", "dev": true, "funding": [ { @@ -6115,8 +4485,6 @@ }, "node_modules/uri-js": { "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", "dev": true, "license": "BSD-2-Clause", "dependencies": { @@ -6124,16 +4492,15 @@ } }, "node_modules/vite": { - "version": "8.0.8", - "resolved": "https://registry.npmjs.org/vite/-/vite-8.0.8.tgz", - "integrity": "sha512-dbU7/iLVa8KZALJyLOBOQ88nOXtNG8vxKuOT4I2mD+Ya70KPceF4IAmDsmU0h1Qsn5bPrvsY9HJstCRh3hG6Uw==", + "version": "8.0.7", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "lightningcss": "^1.32.0", "picomatch": "^4.0.4", "postcss": "^8.5.8", - "rolldown": "1.0.0-rc.15", + "rolldown": "1.0.0-rc.13", "tinyglobby": "^0.2.15" }, "bin": { @@ -6202,19 +4569,17 @@ } }, "node_modules/vitest": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.1.4.tgz", - "integrity": "sha512-tFuJqTxKb8AvfyqMfnavXdzfy3h3sWZRWwfluGbkeR7n0HUev+FmNgZ8SDrRBTVrVCjgH5cA21qGbCffMNtWvg==", + "version": "4.1.3", "dev": true, "license": "MIT", "dependencies": { - "@vitest/expect": "4.1.4", - "@vitest/mocker": "4.1.4", - "@vitest/pretty-format": "4.1.4", - "@vitest/runner": "4.1.4", - "@vitest/snapshot": "4.1.4", - "@vitest/spy": "4.1.4", - "@vitest/utils": "4.1.4", + "@vitest/expect": "4.1.3", + "@vitest/mocker": "4.1.3", + "@vitest/pretty-format": "4.1.3", + "@vitest/runner": "4.1.3", + "@vitest/snapshot": "4.1.3", + "@vitest/spy": "4.1.3", + "@vitest/utils": "4.1.3", "es-module-lexer": "^2.0.0", "expect-type": "^1.3.0", "magic-string": "^0.30.21", @@ -6242,12 +4607,12 @@ "@edge-runtime/vm": "*", "@opentelemetry/api": "^1.9.0", "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", - "@vitest/browser-playwright": "4.1.4", - "@vitest/browser-preview": "4.1.4", - "@vitest/browser-webdriverio": "4.1.4", - "@vitest/coverage-istanbul": "4.1.4", - "@vitest/coverage-v8": "4.1.4", - "@vitest/ui": "4.1.4", + "@vitest/browser-playwright": "4.1.3", + "@vitest/browser-preview": "4.1.3", + "@vitest/browser-webdriverio": "4.1.3", + "@vitest/coverage-istanbul": "4.1.3", + "@vitest/coverage-v8": "4.1.3", + "@vitest/ui": "4.1.3", "happy-dom": "*", "jsdom": "*", "vite": "^6.0.0 || ^7.0.0 || ^8.0.0" @@ -6293,8 +4658,6 @@ }, "node_modules/which": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, "license": "ISC", "dependencies": { @@ -6309,8 +4672,6 @@ }, "node_modules/which-boxed-primitive": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz", - "integrity": "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==", "dev": true, "license": "MIT", "dependencies": { @@ -6329,8 +4690,6 @@ }, "node_modules/which-builtin-type": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz", - "integrity": "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==", "dev": true, "license": "MIT", "dependencies": { @@ -6357,8 +4716,6 @@ }, "node_modules/which-collection": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", - "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", "dev": true, "license": "MIT", "dependencies": { @@ -6376,8 +4733,6 @@ }, "node_modules/which-typed-array": { "version": "1.1.20", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.20.tgz", - "integrity": "sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==", "dev": true, "license": "MIT", "dependencies": { @@ -6398,8 +4753,6 @@ }, "node_modules/why-is-node-running": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", - "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", "dev": true, "license": "MIT", "dependencies": { @@ -6415,8 +4768,6 @@ }, "node_modules/widest-line": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-6.0.0.tgz", - "integrity": "sha512-U89AsyEeAsyoF0zVJBkG9zBgekjgjK7yk9sje3F4IQpXBJ10TF6ByLlIfjMhcmHMJgHZI4KHt4rdNfktzxIAMA==", "license": "MIT", "dependencies": { "string-width": "^8.1.0" @@ -6428,10 +4779,22 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/widest-line/node_modules/string-width": { + "version": "8.2.0", + "license": "MIT", + "dependencies": { + "get-east-asian-width": "^1.5.0", + "strip-ansi": "^7.1.2" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/word-wrap": { "version": "1.2.5", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", - "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", "dev": true, "license": "MIT", "engines": { @@ -6440,8 +4803,6 @@ }, "node_modules/wrap-ansi": { "version": "9.0.2", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", - "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", "license": "MIT", "dependencies": { "ansi-styles": "^6.2.1", @@ -6455,22 +4816,8 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, "node_modules/wrap-ansi/node_modules/string-width": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", "license": "MIT", "dependencies": { "emoji-regex": "^10.3.0", @@ -6486,8 +4833,6 @@ }, "node_modules/ws": { "version": "8.20.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", - "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", "license": "MIT", "engines": { "node": ">=10.0.0" @@ -6507,15 +4852,11 @@ }, "node_modules/yallist": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", "dev": true, "license": "ISC" }, "node_modules/yocto-queue": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, "license": "MIT", "engines": { @@ -6527,24 +4868,19 @@ }, "node_modules/yoga-layout": { "version": "3.2.1", - "resolved": "https://registry.npmjs.org/yoga-layout/-/yoga-layout-3.2.1.tgz", - "integrity": "sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ==", "license": "MIT" }, "node_modules/zod": { "version": "4.3.6", - "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", - "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", "dev": true, "license": "MIT", + "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } }, "node_modules/zod-validation-error": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-4.0.2.tgz", - "integrity": "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==", "dev": true, "license": "MIT", "engines": { diff --git a/ui-tui/src/components/textInput.tsx b/ui-tui/src/components/textInput.tsx index 9ec083c9dd..f5deb9c49c 100644 --- a/ui-tui/src/components/textInput.tsx +++ b/ui-tui/src/components/textInput.tsx @@ -1,4 +1,4 @@ -import { Text, useInput } from 'ink' +import { Text, useInput, useStdin } from 'ink' import { useEffect, useRef, useState } from 'react' function wordLeft(s: string, p: number) { @@ -29,6 +29,29 @@ function wordRight(s: string, p: number) { return i } +const FWD_DELETE_RE = /\x1b\[3[~$^]|\x1b\[3;/ + +function useForwardDeleteRef(isActive: boolean) { + const ref = useRef(false) + const { internal_eventEmitter: ee } = useStdin() + + useEffect(() => { + if (!isActive) return + + const onInput = (data: string) => { + ref.current = FWD_DELETE_RE.test(data) + } + + ee.prependListener('input', onInput) + + return () => { + ee.removeListener('input', onInput) + } + }, [isActive, ee]) + + return ref +} + const ESC = '\x1b' const INV = ESC + '[7m' const INV_OFF = ESC + '[27m' @@ -56,6 +79,7 @@ interface Props { export function TextInput({ value, onChange, onPaste, onSubmit, placeholder = '', focus = true }: Props) { const [cur, setCur] = useState(value.length) + const isFwdDelete = useForwardDeleteRef(focus) const curRef = useRef(cur) const vRef = useRef(value) @@ -211,7 +235,7 @@ export function TextInput({ value, onChange, onPaste, onSubmit, placeholder = '' c = mod ? wordLeft(v, c) : Math.max(0, c - 1) } else if (k.rightArrow) { c = mod ? wordRight(v, c) : Math.min(v.length, c + 1) - } else if ((k.backspace || k.delete) && c > 0) { + } else if ((k.backspace || k.delete) && !isFwdDelete.current && c > 0) { if (mod) { const t = wordLeft(v, c) v = v.slice(0, t) + v.slice(c) @@ -220,6 +244,13 @@ export function TextInput({ value, onChange, onPaste, onSubmit, placeholder = '' v = v.slice(0, c - 1) + v.slice(c) c-- } + } else if (k.delete && isFwdDelete.current && c < v.length) { + if (mod) { + const t = wordRight(v, c) + v = v.slice(0, c) + v.slice(t) + } else { + v = v.slice(0, c) + v.slice(c + 1) + } } else if (k.ctrl && inp === 'w' && c > 0) { const t = wordLeft(v, c) v = v.slice(0, t) + v.slice(c) diff --git a/uv.lock b/uv.lock index 8bad8b3857..7691ea984d 100644 --- a/uv.lock +++ b/uv.lock @@ -1772,6 +1772,15 @@ slack = [ sms = [ { name = "aiohttp" }, ] +termux = [ + { name = "agent-client-protocol" }, + { name = "croniter" }, + { name = "honcho-ai" }, + { name = "mcp" }, + { name = "ptyprocess", marker = "sys_platform != 'win32'" }, + { name = "pywinpty", marker = "sys_platform == 'win32'" }, + { name = "simple-term-menu" }, +] tts-premium = [ { name = "elevenlabs" }, ] @@ -1806,19 +1815,25 @@ requires-dist = [ { name = "fire", specifier = ">=0.7.1,<1" }, { name = "firecrawl-py", specifier = ">=4.16.0,<5" }, { name = "hermes-agent", extras = ["acp"], marker = "extra == 'all'" }, + { name = "hermes-agent", extras = ["acp"], marker = "extra == 'termux'" }, { name = "hermes-agent", extras = ["cli"], marker = "extra == 'all'" }, + { name = "hermes-agent", extras = ["cli"], marker = "extra == 'termux'" }, { name = "hermes-agent", extras = ["cron"], marker = "extra == 'all'" }, + { name = "hermes-agent", extras = ["cron"], marker = "extra == 'termux'" }, { name = "hermes-agent", extras = ["daytona"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["dev"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["dingtalk"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["feishu"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["homeassistant"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["honcho"], marker = "extra == 'all'" }, + { name = "hermes-agent", extras = ["honcho"], marker = "extra == 'termux'" }, { name = "hermes-agent", extras = ["mcp"], marker = "extra == 'all'" }, + { name = "hermes-agent", extras = ["mcp"], marker = "extra == 'termux'" }, { name = "hermes-agent", extras = ["messaging"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["mistral"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["modal"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["pty"], marker = "extra == 'all'" }, + { name = "hermes-agent", extras = ["pty"], marker = "extra == 'termux'" }, { name = "hermes-agent", extras = ["slack"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["sms"], marker = "extra == 'all'" }, { name = "hermes-agent", extras = ["tts-premium"], marker = "extra == 'all'" }, @@ -1861,7 +1876,7 @@ requires-dist = [ { name = "wandb", marker = "extra == 'rl'", specifier = ">=0.15.0,<1" }, { name = "yc-bench", marker = "python_full_version >= '3.12' and extra == 'yc-bench'", git = "https://github.com/collinear-ai/yc-bench.git" }, ] -provides-extras = ["modal", "daytona", "dev", "messaging", "cron", "slack", "matrix", "cli", "tts-premium", "voice", "pty", "honcho", "mcp", "homeassistant", "sms", "acp", "mistral", "dingtalk", "feishu", "rl", "yc-bench", "all"] +provides-extras = ["modal", "daytona", "dev", "messaging", "cron", "slack", "matrix", "cli", "tts-premium", "voice", "pty", "honcho", "mcp", "homeassistant", "sms", "acp", "mistral", "termux", "dingtalk", "feishu", "rl", "yc-bench", "all"] [[package]] name = "hf-transfer" diff --git a/website/docs/getting-started/installation.md b/website/docs/getting-started/installation.md index e3282fa8da..5bdb6809e7 100644 --- a/website/docs/getting-started/installation.md +++ b/website/docs/getting-started/installation.md @@ -1,7 +1,7 @@ --- sidebar_position: 2 title: "Installation" -description: "Install Hermes Agent on Linux, macOS, or WSL2" +description: "Install Hermes Agent on Linux, macOS, WSL2, or Android via Termux" --- # Installation @@ -16,6 +16,23 @@ Get Hermes Agent up and running in under two minutes with the one-line installer curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash ``` +### Android / Termux + +Hermes now ships a Termux-aware installer path too: + +```bash +curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash +``` + +The installer detects Termux automatically and switches to a tested Android flow: +- uses Termux `pkg` for system dependencies (`git`, `python`, `nodejs`, `ripgrep`, `ffmpeg`, build tools) +- creates the virtualenv with `python -m venv` +- exports `ANDROID_API_LEVEL` automatically for Android wheel builds +- installs a curated `.[termux]` extra with `pip` +- skips the untested browser / WhatsApp bootstrap by default + +If you want the fully explicit path, follow the dedicated [Termux guide](./termux.md). + :::warning Windows Native Windows is **not supported**. Please install [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) and run Hermes Agent from there. The install command above works inside WSL2. ::: @@ -125,6 +142,7 @@ uv pip install -e "." | `tts-premium` | ElevenLabs premium voices | `uv pip install -e ".[tts-premium]"` | | `voice` | CLI microphone input + audio playback | `uv pip install -e ".[voice]"` | | `pty` | PTY terminal support | `uv pip install -e ".[pty]"` | +| `termux` | Tested Android / Termux bundle (`cron`, `cli`, `pty`, `mcp`, `honcho`, `acp`) | `python -m pip install -e ".[termux]" -c constraints-termux.txt` | | `honcho` | AI-native memory (Honcho integration) | `uv pip install -e ".[honcho]"` | | `mcp` | Model Context Protocol support | `uv pip install -e ".[mcp]"` | | `homeassistant` | Home Assistant integration | `uv pip install -e ".[homeassistant]"` | @@ -134,6 +152,10 @@ uv pip install -e "." You can combine extras: `uv pip install -e ".[messaging,cron]"` +:::tip Termux users +`.[all]` is not currently available on Android because the `voice` extra pulls `faster-whisper`, which depends on `ctranslate2` wheels that are not published for Android. Use `.[termux]` for the tested mobile install path, then add individual extras only as needed. +::: + ### Step 4: Install Optional Submodules (if needed) diff --git a/website/docs/getting-started/quickstart.md b/website/docs/getting-started/quickstart.md index 7ed83e8198..bd26f1eebb 100644 --- a/website/docs/getting-started/quickstart.md +++ b/website/docs/getting-started/quickstart.md @@ -13,10 +13,14 @@ This guide walks you through installing Hermes Agent, setting up a provider, and Run the one-line installer: ```bash -# Linux / macOS / WSL2 +# Linux / macOS / WSL2 / Android (Termux) curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash ``` +:::tip Android / Termux +If you're installing on a phone, see the dedicated [Termux guide](./termux.md) for the tested manual path, supported extras, and current Android-specific limitations. +::: + :::tip Windows Users Install [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) first, then run the command above inside your WSL2 terminal. ::: diff --git a/website/docs/getting-started/termux.md b/website/docs/getting-started/termux.md new file mode 100644 index 0000000000..1ad71e5313 --- /dev/null +++ b/website/docs/getting-started/termux.md @@ -0,0 +1,237 @@ +--- +sidebar_position: 3 +title: "Android / Termux" +description: "Run Hermes Agent directly on an Android phone with Termux" +--- + +# Hermes on Android with Termux + +This is the tested path for running Hermes Agent directly on an Android phone through [Termux](https://termux.dev/). + +It gives you a working local CLI on the phone, plus the core extras that are currently known to install cleanly on Android. + +## What is supported in the tested path? + +The tested Termux bundle installs: +- the Hermes CLI +- cron support +- PTY/background terminal support +- MCP support +- Honcho memory support +- ACP support + +Concretely, it maps to: + +```bash +python -m pip install -e '.[termux]' -c constraints-termux.txt +``` + +## What is not part of the tested path yet? + +A few features still need desktop/server-style dependencies that are not published for Android, or have not been validated on phones yet: + +- `.[all]` is not supported on Android today +- the `voice` extra is blocked by `faster-whisper -> ctranslate2`, and `ctranslate2` does not publish Android wheels +- automatic browser / Playwright bootstrap is skipped in the Termux installer +- Docker-based terminal isolation is not available inside Termux + +That does not stop Hermes from working well as a phone-native CLI agent — it just means the recommended mobile install is intentionally narrower than the desktop/server install. + +--- + +## Option 1: One-line installer + +Hermes now ships a Termux-aware installer path: + +```bash +curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash +``` + +On Termux, the installer automatically: +- uses `pkg` for system packages +- creates the venv with `python -m venv` +- installs `.[termux]` with `pip` +- links `hermes` into `$PREFIX/bin` so it stays on your Termux PATH +- skips the untested browser / WhatsApp bootstrap + +If you want the explicit commands or need to debug a failed install, use the manual path below. + +--- + +## Option 2: Manual install (fully explicit) + +### 1. Update Termux and install system packages + +```bash +pkg update +pkg install -y git python clang rust make pkg-config libffi openssl nodejs ripgrep ffmpeg +``` + +Why these packages? +- `python` — runtime + venv support +- `git` — clone/update the repo +- `clang`, `rust`, `make`, `pkg-config`, `libffi`, `openssl` — needed to build a few Python dependencies on Android +- `nodejs` — optional Node runtime for experiments beyond the tested core path +- `ripgrep` — fast file search +- `ffmpeg` — media / TTS conversions + +### 2. Clone Hermes + +```bash +git clone --recurse-submodules https://github.com/NousResearch/hermes-agent.git +cd hermes-agent +``` + +If you already cloned without submodules: + +```bash +git submodule update --init --recursive +``` + +### 3. Create a virtual environment + +```bash +python -m venv venv +source venv/bin/activate +export ANDROID_API_LEVEL="$(getprop ro.build.version.sdk)" +python -m pip install --upgrade pip setuptools wheel +``` + +`ANDROID_API_LEVEL` is important for Rust / maturin-based packages such as `jiter`. + +### 4. Install the tested Termux bundle + +```bash +python -m pip install -e '.[termux]' -c constraints-termux.txt +``` + +If you only want the minimal core agent, this also works: + +```bash +python -m pip install -e '.' -c constraints-termux.txt +``` + +### 5. Put `hermes` on your Termux PATH + +```bash +ln -sf "$PWD/venv/bin/hermes" "$PREFIX/bin/hermes" +``` + +`$PREFIX/bin` is already on PATH in Termux, so this makes the `hermes` command persist across new shells without re-activating the venv every time. + +### 6. Verify the install + +```bash +hermes version +hermes doctor +``` + +### 7. Start Hermes + +```bash +hermes +``` + +--- + +## Recommended follow-up setup + +### Configure a model + +```bash +hermes model +``` + +Or set keys directly in `~/.hermes/.env`. + +### Re-run the full interactive setup wizard later + +```bash +hermes setup +``` + +### Install optional Node dependencies manually + +The tested Termux path skips Node/browser bootstrap on purpose. If you want to experiment later: + +```bash +npm install +``` + +Treat browser / WhatsApp tooling on Android as experimental until documented otherwise. + +--- + +## Troubleshooting + +### `No solution found` when installing `.[all]` + +Use the tested Termux bundle instead: + +```bash +python -m pip install -e '.[termux]' -c constraints-termux.txt +``` + +The blocker is currently the `voice` extra: +- `voice` pulls `faster-whisper` +- `faster-whisper` depends on `ctranslate2` +- `ctranslate2` does not publish Android wheels + +### `uv pip install` fails on Android + +Use the Termux path with the stdlib venv + `pip` instead: + +```bash +python -m venv venv +source venv/bin/activate +export ANDROID_API_LEVEL="$(getprop ro.build.version.sdk)" +python -m pip install --upgrade pip setuptools wheel +python -m pip install -e '.[termux]' -c constraints-termux.txt +``` + +### `jiter` / `maturin` complains about `ANDROID_API_LEVEL` + +Set the API level explicitly before installing: + +```bash +export ANDROID_API_LEVEL="$(getprop ro.build.version.sdk)" +python -m pip install -e '.[termux]' -c constraints-termux.txt +``` + +### `hermes doctor` says ripgrep or Node is missing + +Install them with Termux packages: + +```bash +pkg install ripgrep nodejs +``` + +### Build failures while installing Python packages + +Make sure the build toolchain is installed: + +```bash +pkg install clang rust make pkg-config libffi openssl +``` + +Then retry: + +```bash +python -m pip install -e '.[termux]' -c constraints-termux.txt +``` + +--- + +## Known limitations on phones + +- Docker backend is unavailable +- local voice transcription via `faster-whisper` is unavailable in the tested path +- browser automation setup is intentionally skipped by the installer +- some optional extras may work, but only `.[termux]` is currently documented as the tested Android bundle + +If you hit a new Android-specific issue, please open a GitHub issue with: +- your Android version +- `termux-info` +- `python --version` +- `hermes doctor` +- the exact install command and full error output diff --git a/website/docs/integrations/providers.md b/website/docs/integrations/providers.md index fbfa69ade6..133990b442 100644 --- a/website/docs/integrations/providers.md +++ b/website/docs/integrations/providers.md @@ -657,8 +657,8 @@ model: #### Responses get cut off mid-sentence **Possible causes:** -1. **Low `max_tokens` on the server** — SGLang defaults to 128 tokens per response. Set `--default-max-tokens` on the server or configure Hermes with `model.max_tokens` in config.yaml. -2. **Context exhaustion** — The model filled its context window. Increase context length or enable [context compression](/docs/user-guide/configuration#context-compression) in Hermes. +1. **Low output cap (`max_tokens`) on the server** — SGLang defaults to 128 tokens per response. Set `--default-max-tokens` on the server or configure Hermes with `model.max_tokens` in config.yaml. Note: `max_tokens` controls response length only — it is unrelated to how long your conversation history can be (that is `context_length`). +2. **Context exhaustion** — The model filled its context window. Increase `model.context_length` or enable [context compression](/docs/user-guide/configuration#context-compression) in Hermes. --- @@ -751,6 +751,15 @@ model: ### Context Length Detection +:::note Two settings, easy to confuse +**`context_length`** is the **total context window** — the combined budget for input *and* output tokens (e.g. 200,000 for Claude Opus 4.6). Hermes uses this to decide when to compress history and to validate API requests. + +**`model.max_tokens`** is the **output cap** — the maximum number of tokens the model may generate in a *single response*. It has nothing to do with how long your conversation history can be. The industry-standard name `max_tokens` is a common source of confusion; Anthropic's native API has since renamed it `max_output_tokens` for clarity. + +Set `context_length` when auto-detection gets the window size wrong. +Set `model.max_tokens` only when you need to limit how long individual responses can be. +::: + Hermes uses a multi-source resolution chain to detect the correct context window for your model and provider: 1. **Config override** — `model.context_length` in config.yaml (highest priority) diff --git a/website/docs/reference/faq.md b/website/docs/reference/faq.md index e8e6fe435e..0ec0abd409 100644 --- a/website/docs/reference/faq.md +++ b/website/docs/reference/faq.md @@ -36,6 +36,20 @@ Set your provider with `hermes model` or by editing `~/.hermes/.env`. See the [E curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash ``` +### Does it work on Android / Termux? + +Yes — Hermes now has a tested Termux install path for Android phones. + +Quick install: + +```bash +curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash +``` + +For the fully explicit manual steps, supported extras, and current limitations, see the [Termux guide](../getting-started/termux.md). + +Important caveat: the full `.[all]` extra is not currently available on Android because the `voice` extra depends on `faster-whisper` → `ctranslate2`, and `ctranslate2` does not publish Android wheels. Use the tested `.[termux]` extra instead. + ### Is my data sent anywhere? API calls go **only to the LLM provider you configure** (e.g., OpenRouter, your local Ollama instance). Hermes Agent does not collect telemetry, usage data, or analytics. Your conversations, memory, and skills are stored locally in `~/.hermes/`. diff --git a/website/docs/reference/slash-commands.md b/website/docs/reference/slash-commands.md index 89a30c46b6..a695d8dc12 100644 --- a/website/docs/reference/slash-commands.md +++ b/website/docs/reference/slash-commands.md @@ -46,7 +46,6 @@ Type `/` in the CLI to open the autocomplete menu. Built-in commands are case-in | `/config` | Show current configuration | | `/model [model-name]` | Show or change the current model. Supports: `/model claude-sonnet-4`, `/model provider:model` (switch providers), `/model custom:model` (custom endpoint), `/model custom:name:model` (named custom provider), `/model custom` (auto-detect from endpoint) | | `/provider` | Show available providers and current provider | -| `/prompt` | View/set custom system prompt | | `/personality` | Set a predefined personality | | `/verbose` | Cycle tool progress display: off → new → all → verbose. Can be [enabled for messaging](#notes) via config. | | `/reasoning` | Manage reasoning effort and display (usage: /reasoning [level\|show\|hide]) | @@ -144,7 +143,7 @@ The messaging gateway supports the following built-in commands inside Telegram, ## Notes -- `/skin`, `/tools`, `/toolsets`, `/browser`, `/config`, `/prompt`, `/cron`, `/skills`, `/platforms`, `/paste`, `/statusbar`, and `/plugins` are **CLI-only** commands. +- `/skin`, `/tools`, `/toolsets`, `/browser`, `/config`, `/cron`, `/skills`, `/platforms`, `/paste`, `/statusbar`, and `/plugins` are **CLI-only** commands. - `/verbose` is **CLI-only by default**, but can be enabled for messaging platforms by setting `display.tool_progress_command: true` in `config.yaml`. When enabled, it cycles the `display.tool_progress` mode and saves to config. - `/status`, `/sethome`, `/update`, `/approve`, `/deny`, and `/commands` are **messaging-only** commands. - `/background`, `/voice`, `/reload-mcp`, `/rollback`, and `/yolo` work in **both** the CLI and the messaging gateway. diff --git a/website/docs/user-guide/configuration.md b/website/docs/user-guide/configuration.md index 0ac24db184..819a379eb1 100644 --- a/website/docs/user-guide/configuration.md +++ b/website/docs/user-guide/configuration.md @@ -747,7 +747,7 @@ Control how much "thinking" the model does before responding: ```yaml agent: - reasoning_effort: "" # empty = medium (default). Options: xhigh (max), high, medium, low, minimal, none + reasoning_effort: "" # empty = medium (default). Options: none, minimal, low, medium, high, xhigh (max) ``` When unset (default), reasoning effort defaults to "medium" — a balanced level that works well for most tasks. Setting a value overrides it — higher reasoning effort gives better results on complex tasks at the cost of more tokens and latency. diff --git a/website/docs/user-guide/features/batch-processing.md b/website/docs/user-guide/features/batch-processing.md index 3cab1eba22..59554e34df 100644 --- a/website/docs/user-guide/features/batch-processing.md +++ b/website/docs/user-guide/features/batch-processing.md @@ -79,7 +79,7 @@ Entries can optionally include: | Parameter | Description | |-----------|-------------| -| `--reasoning_effort` | Effort level: `xhigh`, `high`, `medium`, `low`, `minimal`, `none` | +| `--reasoning_effort` | Effort level: `none`, `minimal`, `low`, `medium`, `high`, `xhigh` | | `--reasoning_disabled` | Completely disable reasoning/thinking tokens | ### Advanced Options diff --git a/website/sidebars.ts b/website/sidebars.ts index 39b60d88e9..720ccafd52 100644 --- a/website/sidebars.ts +++ b/website/sidebars.ts @@ -9,6 +9,7 @@ const sidebars: SidebarsConfig = { items: [ 'getting-started/quickstart', 'getting-started/installation', + 'getting-started/termux', 'getting-started/nix-setup', 'getting-started/updating', 'getting-started/learning-path',