diff --git a/.env.example b/.env.example index bcb5708d6..3df76497e 100644 --- a/.env.example +++ b/.env.example @@ -231,6 +231,21 @@ VOICE_TOOLS_OPENAI_KEY= # Slack allowed users (comma-separated Slack user IDs) # SLACK_ALLOWED_USERS= +# ============================================================================= +# TELEGRAM INTEGRATION +# ============================================================================= +# Telegram Bot Token - From @BotFather (https://t.me/BotFather) +# TELEGRAM_BOT_TOKEN= +# TELEGRAM_ALLOWED_USERS= # Comma-separated user IDs +# TELEGRAM_HOME_CHANNEL= # Default chat for cron delivery +# TELEGRAM_HOME_CHANNEL_NAME= # Display name for home channel + +# Webhook mode (optional — for cloud deployments like Fly.io/Railway) +# Default is long polling. Setting TELEGRAM_WEBHOOK_URL switches to webhook mode. +# TELEGRAM_WEBHOOK_URL=https://my-app.fly.dev/telegram +# TELEGRAM_WEBHOOK_PORT=8443 +# TELEGRAM_WEBHOOK_SECRET= # Recommended for production + # WhatsApp (built-in Baileys bridge — run `hermes whatsapp` to pair) # WHATSAPP_ENABLED=false # WHATSAPP_ALLOWED_USERS=15551234567 diff --git a/.github/workflows/deploy-site.yml b/.github/workflows/deploy-site.yml index 89e031e58..3c21e8a00 100644 --- a/.github/workflows/deploy-site.yml +++ b/.github/workflows/deploy-site.yml @@ -19,6 +19,8 @@ concurrency: jobs: build-and-deploy: + # Only run on the upstream repository, not on forks + if: github.repository == 'NousResearch/hermes-agent' runs-on: ubuntu-latest environment: name: github-pages diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 1f83913b2..6c1bb6eaa 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -14,6 +14,8 @@ concurrency: jobs: build-and-push: + # Only run on the upstream repository, not on forks + if: github.repository == 'NousResearch/hermes-agent' runs-on: ubuntu-latest timeout-minutes: 30 steps: diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..876aeeb7d --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +graft skills +graft optional-skills +global-exclude __pycache__ +global-exclude *.py[cod] diff --git a/agent/anthropic_adapter.py b/agent/anthropic_adapter.py index a81736496..2fae12dde 100644 --- a/agent/anthropic_adapter.py +++ b/agent/anthropic_adapter.py @@ -162,6 +162,21 @@ def _is_oauth_token(key: str) -> bool: return True +def _is_third_party_anthropic_endpoint(base_url: str | None) -> bool: + """Return True for non-Anthropic endpoints using the Anthropic Messages API. + + Third-party proxies (Azure AI Foundry, AWS Bedrock, self-hosted) authenticate + with their own API keys via x-api-key, not Anthropic OAuth tokens. OAuth + detection should be skipped for these endpoints. + """ + if not base_url: + return False # No base_url = direct Anthropic API + normalized = base_url.rstrip("/").lower() + if "anthropic.com" in normalized: + return False # Direct Anthropic API — OAuth applies + return True # Any other endpoint is a third-party proxy + + def _requires_bearer_auth(base_url: str | None) -> bool: """Return True for Anthropic-compatible providers that require Bearer auth. @@ -205,6 +220,14 @@ def build_anthropic_client(api_key: str, base_url: str = None): kwargs["auth_token"] = api_key if _COMMON_BETAS: kwargs["default_headers"] = {"anthropic-beta": ",".join(_COMMON_BETAS)} + elif _is_third_party_anthropic_endpoint(base_url): + # Third-party proxies (Azure AI Foundry, AWS Bedrock, etc.) use their + # own API keys with x-api-key auth. Skip OAuth detection — their keys + # don't follow Anthropic's sk-ant-* prefix convention and would be + # misclassified as OAuth tokens. + kwargs["api_key"] = api_key + if _COMMON_BETAS: + kwargs["default_headers"] = {"anthropic-beta": ",".join(_COMMON_BETAS)} elif _is_oauth_token(api_key): # OAuth access token / setup-token → Bearer auth + Claude Code identity. # Anthropic routes OAuth requests based on user-agent and headers; @@ -284,71 +307,105 @@ def is_claude_code_token_valid(creds: Dict[str, Any]) -> bool: return now_ms < (expires_at - 60_000) -def _refresh_oauth_token(creds: Dict[str, Any]) -> Optional[str]: - """Attempt to refresh an expired Claude Code OAuth token. - - Uses the same token endpoint and client_id as Claude Code / OpenCode. - Only works for credentials that have a refresh token (from claude /login - or claude setup-token with OAuth flow). - - Tries the new platform.claude.com endpoint first (Claude Code >=2.1.81), - then falls back to console.anthropic.com for older tokens. - - Returns the new access token, or None if refresh fails. - """ +def refresh_anthropic_oauth_pure(refresh_token: str, *, use_json: bool = False) -> Dict[str, Any]: + """Refresh an Anthropic OAuth token without mutating local credential files.""" import time + import urllib.parse import urllib.request + if not refresh_token: + raise ValueError("refresh_token is required") + + client_id = "9d1c250a-e61b-44d9-88ed-5944d1962f5e" + if use_json: + data = json.dumps({ + "grant_type": "refresh_token", + "refresh_token": refresh_token, + "client_id": client_id, + }).encode() + content_type = "application/json" + else: + data = urllib.parse.urlencode({ + "grant_type": "refresh_token", + "refresh_token": refresh_token, + "client_id": client_id, + }).encode() + content_type = "application/x-www-form-urlencoded" + + token_endpoints = [ + "https://platform.claude.com/v1/oauth/token", + "https://console.anthropic.com/v1/oauth/token", + ] + last_error = None + for endpoint in token_endpoints: + req = urllib.request.Request( + endpoint, + data=data, + headers={ + "Content-Type": content_type, + "User-Agent": f"claude-cli/{_get_claude_code_version()} (external, cli)", + }, + method="POST", + ) + try: + with urllib.request.urlopen(req, timeout=10) as resp: + result = json.loads(resp.read().decode()) + except Exception as exc: + last_error = exc + logger.debug("Anthropic token refresh failed at %s: %s", endpoint, exc) + continue + + access_token = result.get("access_token", "") + if not access_token: + raise ValueError("Anthropic refresh response was missing access_token") + next_refresh = result.get("refresh_token", refresh_token) + expires_in = result.get("expires_in", 3600) + return { + "access_token": access_token, + "refresh_token": next_refresh, + "expires_at_ms": int(time.time() * 1000) + (expires_in * 1000), + } + + if last_error is not None: + raise last_error + raise ValueError("Anthropic token refresh failed") + + +def _refresh_oauth_token(creds: Dict[str, Any]) -> Optional[str]: + """Attempt to refresh an expired Claude Code OAuth token.""" refresh_token = creds.get("refreshToken", "") if not refresh_token: logger.debug("No refresh token available — cannot refresh") return None - # Client ID used by Claude Code's OAuth flow - CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e" - - # Anthropic migrated OAuth from console.anthropic.com to platform.claude.com - # (Claude Code v2.1.81+). Try new endpoint first, fall back to old. - token_endpoints = [ - "https://platform.claude.com/v1/oauth/token", - "https://console.anthropic.com/v1/oauth/token", - ] - - payload = json.dumps({ - "grant_type": "refresh_token", - "refresh_token": refresh_token, - "client_id": CLIENT_ID, - }).encode() - - headers = { - "Content-Type": "application/json", - "User-Agent": f"claude-cli/{_get_claude_code_version()} (external, cli)", - } - - for endpoint in token_endpoints: - req = urllib.request.Request( - endpoint, data=payload, headers=headers, method="POST", + try: + refreshed = refresh_anthropic_oauth_pure(refresh_token, use_json=False) + _write_claude_code_credentials( + refreshed["access_token"], + refreshed["refresh_token"], + refreshed["expires_at_ms"], ) - try: - with urllib.request.urlopen(req, timeout=10) as resp: - result = json.loads(resp.read().decode()) - new_access = result.get("access_token", "") - new_refresh = result.get("refresh_token", refresh_token) - expires_in = result.get("expires_in", 3600) - - if new_access: - new_expires_ms = int(time.time() * 1000) + (expires_in * 1000) - _write_claude_code_credentials(new_access, new_refresh, new_expires_ms) - logger.debug("Refreshed Claude Code OAuth token via %s", endpoint) - return new_access - except Exception as e: - logger.debug("Token refresh failed at %s: %s", endpoint, e) - - return None + logger.debug("Successfully refreshed Claude Code OAuth token") + return refreshed["access_token"] + except Exception as e: + logger.debug("Failed to refresh Claude Code token: %s", e) + return None -def _write_claude_code_credentials(access_token: str, refresh_token: str, expires_at_ms: int) -> None: - """Write refreshed credentials back to ~/.claude/.credentials.json.""" +def _write_claude_code_credentials( + access_token: str, + refresh_token: str, + expires_at_ms: int, + *, + scopes: Optional[list] = None, +) -> None: + """Write refreshed credentials back to ~/.claude/.credentials.json. + + The optional *scopes* list (e.g. ``["user:inference", "user:profile", ...]``) + is persisted so that Claude Code's own auth check recognises the credential + as valid. Claude Code >=2.1.81 gates on the presence of ``"user:inference"`` + in the stored scopes before it will use the token. + """ cred_path = Path.home() / ".claude" / ".credentials.json" try: # Read existing file to preserve other fields @@ -356,11 +413,19 @@ def _write_claude_code_credentials(access_token: str, refresh_token: str, expire if cred_path.exists(): existing = json.loads(cred_path.read_text(encoding="utf-8")) - existing["claudeAiOauth"] = { + oauth_data: Dict[str, Any] = { "accessToken": access_token, "refreshToken": refresh_token, "expiresAt": expires_at_ms, } + if scopes is not None: + oauth_data["scopes"] = scopes + elif "claudeAiOauth" in existing and "scopes" in existing["claudeAiOauth"]: + # Preserve previously-stored scopes when the refresh response + # does not include a scope field. + oauth_data["scopes"] = existing["claudeAiOauth"]["scopes"] + + existing["claudeAiOauth"] = oauth_data cred_path.parent.mkdir(parents=True, exist_ok=True) cred_path.write_text(json.dumps(existing, indent=2), encoding="utf-8") @@ -520,10 +585,208 @@ def run_oauth_setup_token() -> Optional[str]: return None +# ── Hermes-native PKCE OAuth flow ──────────────────────────────────────── +# Mirrors the flow used by Claude Code, pi-ai, and OpenCode. +# Stores credentials in ~/.hermes/.anthropic_oauth.json (our own file). + +_OAUTH_CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e" +_OAUTH_TOKEN_URL = "https://console.anthropic.com/v1/oauth/token" +_OAUTH_REDIRECT_URI = "https://console.anthropic.com/oauth/code/callback" +_OAUTH_SCOPES = "org:create_api_key user:profile user:inference" +_HERMES_OAUTH_FILE = get_hermes_home() / ".anthropic_oauth.json" +def _generate_pkce() -> tuple: + """Generate PKCE code_verifier and code_challenge (S256).""" + import base64 + import hashlib + import secrets + + verifier = base64.urlsafe_b64encode(secrets.token_bytes(32)).rstrip(b"=").decode() + challenge = base64.urlsafe_b64encode( + hashlib.sha256(verifier.encode()).digest() + ).rstrip(b"=").decode() + return verifier, challenge +def run_hermes_oauth_login_pure() -> Optional[Dict[str, Any]]: + """Run Hermes-native OAuth PKCE flow and return credential state.""" + import time + import webbrowser + + verifier, challenge = _generate_pkce() + + params = { + "code": "true", + "client_id": _OAUTH_CLIENT_ID, + "response_type": "code", + "redirect_uri": _OAUTH_REDIRECT_URI, + "scope": _OAUTH_SCOPES, + "code_challenge": challenge, + "code_challenge_method": "S256", + "state": verifier, + } + from urllib.parse import urlencode + + auth_url = f"https://claude.ai/oauth/authorize?{urlencode(params)}" + + print() + print("Authorize Hermes with your Claude Pro/Max subscription.") + print() + print("╭─ Claude Pro/Max Authorization ────────────────────╮") + print("│ │") + print("│ Open this link in your browser: │") + print("╰───────────────────────────────────────────────────╯") + print() + print(f" {auth_url}") + print() + + try: + webbrowser.open(auth_url) + print(" (Browser opened automatically)") + except Exception: + pass + + print() + print("After authorizing, you'll see a code. Paste it below.") + print() + try: + auth_code = input("Authorization code: ").strip() + except (KeyboardInterrupt, EOFError): + return None + + if not auth_code: + print("No code entered.") + return None + + splits = auth_code.split("#") + code = splits[0] + state = splits[1] if len(splits) > 1 else "" + + try: + import urllib.request + + exchange_data = json.dumps({ + "grant_type": "authorization_code", + "client_id": _OAUTH_CLIENT_ID, + "code": code, + "state": state, + "redirect_uri": _OAUTH_REDIRECT_URI, + "code_verifier": verifier, + }).encode() + + req = urllib.request.Request( + _OAUTH_TOKEN_URL, + data=exchange_data, + headers={ + "Content-Type": "application/json", + "User-Agent": f"claude-cli/{_get_claude_code_version()} (external, cli)", + }, + method="POST", + ) + + with urllib.request.urlopen(req, timeout=15) as resp: + result = json.loads(resp.read().decode()) + except Exception as e: + print(f"Token exchange failed: {e}") + return None + + access_token = result.get("access_token", "") + refresh_token = result.get("refresh_token", "") + expires_in = result.get("expires_in", 3600) + + if not access_token: + print("No access token in response.") + return None + + expires_at_ms = int(time.time() * 1000) + (expires_in * 1000) + return { + "access_token": access_token, + "refresh_token": refresh_token, + "expires_at_ms": expires_at_ms, + } + + +def run_hermes_oauth_login() -> Optional[str]: + """Run Hermes-native OAuth PKCE flow for Claude Pro/Max subscription. + + Opens a browser to claude.ai for authorization, prompts for the code, + exchanges it for tokens, and stores them in ~/.hermes/.anthropic_oauth.json. + + Returns the access token on success, None on failure. + """ + result = run_hermes_oauth_login_pure() + if not result: + return None + + access_token = result["access_token"] + refresh_token = result["refresh_token"] + expires_at_ms = result["expires_at_ms"] + + _save_hermes_oauth_credentials(access_token, refresh_token, expires_at_ms) + _write_claude_code_credentials(access_token, refresh_token, expires_at_ms) + + print("Authentication successful!") + return access_token + + +def _save_hermes_oauth_credentials(access_token: str, refresh_token: str, expires_at_ms: int) -> None: + """Save OAuth credentials to ~/.hermes/.anthropic_oauth.json.""" + data = { + "accessToken": access_token, + "refreshToken": refresh_token, + "expiresAt": expires_at_ms, + } + try: + _HERMES_OAUTH_FILE.parent.mkdir(parents=True, exist_ok=True) + _HERMES_OAUTH_FILE.write_text(json.dumps(data, indent=2), encoding="utf-8") + _HERMES_OAUTH_FILE.chmod(0o600) + except (OSError, IOError) as e: + logger.debug("Failed to save Hermes OAuth credentials: %s", e) + + +def read_hermes_oauth_credentials() -> Optional[Dict[str, Any]]: + """Read Hermes-managed OAuth credentials from ~/.hermes/.anthropic_oauth.json.""" + if _HERMES_OAUTH_FILE.exists(): + try: + data = json.loads(_HERMES_OAUTH_FILE.read_text(encoding="utf-8")) + if data.get("accessToken"): + return data + except (json.JSONDecodeError, OSError, IOError) as e: + logger.debug("Failed to read Hermes OAuth credentials: %s", e) + return None + + +def refresh_hermes_oauth_token() -> Optional[str]: + """Refresh the Hermes-managed OAuth token using the stored refresh token. + + Returns the new access token, or None if refresh fails. + """ + creds = read_hermes_oauth_credentials() + if not creds or not creds.get("refreshToken"): + return None + + try: + refreshed = refresh_anthropic_oauth_pure( + creds["refreshToken"], + use_json=True, + ) + _save_hermes_oauth_credentials( + refreshed["access_token"], + refreshed["refresh_token"], + refreshed["expires_at_ms"], + ) + _write_claude_code_credentials( + refreshed["access_token"], + refreshed["refresh_token"], + refreshed["expires_at_ms"], + ) + logger.debug("Successfully refreshed Hermes OAuth token") + return refreshed["access_token"] + except Exception as e: + logger.debug("Failed to refresh Hermes OAuth token: %s", e) + + return None # --------------------------------------------------------------------------- @@ -1056,4 +1319,4 @@ def normalize_anthropic_response( reasoning_details=None, ), finish_reason, - ) + ) \ No newline at end of file diff --git a/agent/auxiliary_client.py b/agent/auxiliary_client.py index 0de263c41..3b05e8d12 100644 --- a/agent/auxiliary_client.py +++ b/agent/auxiliary_client.py @@ -7,7 +7,7 @@ the best available backend without duplicating fallback logic. Resolution order for text tasks (auto mode): 1. OpenRouter (OPENROUTER_API_KEY) 2. Nous Portal (~/.hermes/auth.json active provider) - 3. Custom endpoint (OPENAI_BASE_URL + OPENAI_API_KEY) + 3. Custom endpoint (config.yaml model.base_url + OPENAI_API_KEY) 4. Codex OAuth (Responses API via chatgpt.com with gpt-5.3-codex, wrapped to look like a chat.completions client) 5. Native Anthropic @@ -47,6 +47,7 @@ from typing import Any, Dict, List, Optional, Tuple from openai import OpenAI +from agent.credential_pool import load_pool from hermes_cli.config import get_hermes_home from hermes_constants import OPENROUTER_BASE_URL @@ -96,6 +97,45 @@ _CODEX_AUX_MODEL = "gpt-5.2-codex" _CODEX_AUX_BASE_URL = "https://chatgpt.com/backend-api/codex" +def _select_pool_entry(provider: str) -> Tuple[bool, Optional[Any]]: + """Return (pool_exists_for_provider, selected_entry).""" + try: + pool = load_pool(provider) + except Exception as exc: + logger.debug("Auxiliary client: could not load pool for %s: %s", provider, exc) + return False, None + if not pool or not pool.has_credentials(): + return False, None + try: + return True, pool.select() + except Exception as exc: + logger.debug("Auxiliary client: could not select pool entry for %s: %s", provider, exc) + return True, None + + +def _pool_runtime_api_key(entry: Any) -> str: + if entry is None: + return "" + # Use the PooledCredential.runtime_api_key property which handles + # provider-specific fallback (e.g. agent_key for nous). + key = getattr(entry, "runtime_api_key", None) or getattr(entry, "access_token", "") + return str(key or "").strip() + + +def _pool_runtime_base_url(entry: Any, fallback: str = "") -> str: + if entry is None: + return str(fallback or "").strip().rstrip("/") + # runtime_base_url handles provider-specific logic (e.g. nous prefers inference_base_url). + # Fall back through inference_base_url and base_url for non-PooledCredential entries. + url = ( + getattr(entry, "runtime_base_url", None) + or getattr(entry, "inference_base_url", None) + or getattr(entry, "base_url", None) + or fallback + ) + return str(url or "").strip().rstrip("/") + + # ── Codex Responses → chat.completions adapter ───────────────────────────── # All auxiliary consumers call client.chat.completions.create(**kwargs) and # read response.choices[0].message.content. This adapter translates those @@ -439,6 +479,22 @@ def _read_nous_auth() -> Optional[dict]: Returns the provider state dict if Nous is active with tokens, otherwise None. """ + pool_present, entry = _select_pool_entry("nous") + if pool_present: + if entry is None: + return None + return { + "access_token": getattr(entry, "access_token", ""), + "refresh_token": getattr(entry, "refresh_token", None), + "agent_key": getattr(entry, "agent_key", None), + "inference_base_url": _pool_runtime_base_url(entry, _NOUS_DEFAULT_BASE_URL), + "portal_base_url": getattr(entry, "portal_base_url", None), + "client_id": getattr(entry, "client_id", None), + "scope": getattr(entry, "scope", None), + "token_type": getattr(entry, "token_type", "Bearer"), + "source": "pool", + } + try: if not _AUTH_JSON_PATH.is_file(): return None @@ -467,6 +523,11 @@ def _nous_base_url() -> str: def _read_codex_access_token() -> Optional[str]: """Read a valid, non-expired Codex OAuth access token from Hermes auth store.""" + pool_present, entry = _select_pool_entry("openai-codex") + if pool_present: + token = _pool_runtime_api_key(entry) + return token or None + try: from hermes_cli.auth import _read_codex_tokens data = _read_codex_tokens() @@ -513,6 +574,24 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]: if provider_id == "anthropic": return _try_anthropic() + pool_present, entry = _select_pool_entry(provider_id) + if pool_present: + api_key = _pool_runtime_api_key(entry) + if not api_key: + continue + + base_url = _pool_runtime_base_url(entry, pconfig.inference_base_url) or pconfig.inference_base_url + model = _API_KEY_PROVIDER_AUX_MODELS.get(provider_id, "default") + logger.debug("Auxiliary text client: %s (%s) via pool", pconfig.name, model) + extra = {} + if "api.kimi.com" in base_url.lower(): + extra["default_headers"] = {"User-Agent": "KimiCLI/1.0"} + elif "api.githubcopilot.com" in base_url.lower(): + from hermes_cli.models import copilot_default_headers + + extra["default_headers"] = copilot_default_headers() + return OpenAI(api_key=api_key, base_url=base_url, **extra), model + creds = resolve_api_key_provider_credentials(provider_id) api_key = str(creds.get("api_key", "")).strip() if not api_key: @@ -562,6 +641,16 @@ def _get_auxiliary_env_override(task: str, suffix: str) -> Optional[str]: def _try_openrouter() -> Tuple[Optional[OpenAI], Optional[str]]: + pool_present, entry = _select_pool_entry("openrouter") + if pool_present: + or_key = _pool_runtime_api_key(entry) + if not or_key: + return None, None + base_url = _pool_runtime_base_url(entry, OPENROUTER_BASE_URL) or OPENROUTER_BASE_URL + logger.debug("Auxiliary client: OpenRouter via pool") + return OpenAI(api_key=or_key, base_url=base_url, + default_headers=_OR_HEADERS), _OPENROUTER_MODEL + or_key = os.getenv("OPENROUTER_API_KEY") if not or_key: return None, None @@ -577,22 +666,22 @@ def _try_nous() -> Tuple[Optional[OpenAI], Optional[str]]: global auxiliary_is_nous auxiliary_is_nous = True logger.debug("Auxiliary client: Nous Portal") + model = "gemini-3-flash" if nous.get("source") == "pool" else _NOUS_MODEL return ( - OpenAI(api_key=_nous_api_key(nous), base_url=_nous_base_url()), - _NOUS_MODEL, + OpenAI( + api_key=_nous_api_key(nous), + base_url=str(nous.get("inference_base_url") or _nous_base_url()).rstrip("/"), + ), + model, ) def _read_main_model() -> str: - """Read the user's configured main model from config/env. + """Read the user's configured main model from config.yaml. - Falls back through HERMES_MODEL → LLM_MODEL → config.yaml model.default - so the auxiliary client can use the same model as the main agent when no - dedicated auxiliary model is available. + config.yaml model.default is the single source of truth for the active + model. Environment variables are no longer consulted. """ - from_env = os.getenv("OPENAI_MODEL") or os.getenv("HERMES_MODEL") or os.getenv("LLM_MODEL") - if from_env: - return from_env.strip() try: from hermes_cli.config import load_config cfg = load_config() @@ -659,11 +748,19 @@ def _try_custom_endpoint() -> Tuple[Optional[OpenAI], Optional[str]]: def _try_codex() -> Tuple[Optional[Any], Optional[str]]: - codex_token = _read_codex_access_token() - if not codex_token: - return None, None + pool_present, entry = _select_pool_entry("openai-codex") + if pool_present: + codex_token = _pool_runtime_api_key(entry) + if not codex_token: + return None, None + base_url = _pool_runtime_base_url(entry, _CODEX_AUX_BASE_URL) or _CODEX_AUX_BASE_URL + else: + codex_token = _read_codex_access_token() + if not codex_token: + return None, None + base_url = _CODEX_AUX_BASE_URL logger.debug("Auxiliary client: Codex OAuth (%s via Responses API)", _CODEX_AUX_MODEL) - real_client = OpenAI(api_key=codex_token, base_url=_CODEX_AUX_BASE_URL) + real_client = OpenAI(api_key=codex_token, base_url=base_url) return CodexAuxiliaryClient(real_client, _CODEX_AUX_MODEL), _CODEX_AUX_MODEL @@ -673,14 +770,21 @@ def _try_anthropic() -> Tuple[Optional[Any], Optional[str]]: except ImportError: return None, None - token = resolve_anthropic_token() + pool_present, entry = _select_pool_entry("anthropic") + if pool_present: + if entry is None: + return None, None + token = _pool_runtime_api_key(entry) + else: + entry = None + token = resolve_anthropic_token() if not token: return None, None # Allow base URL override from config.yaml model.base_url, but only # when the configured provider is anthropic — otherwise a non-Anthropic # base_url (e.g. Codex endpoint) would leak into Anthropic requests. - base_url = _ANTHROPIC_DEFAULT_BASE_URL + base_url = _pool_runtime_base_url(entry, _ANTHROPIC_DEFAULT_BASE_URL) if pool_present else _ANTHROPIC_DEFAULT_BASE_URL try: from hermes_cli.config import load_config cfg = load_config() diff --git a/agent/credential_pool.py b/agent/credential_pool.py new file mode 100644 index 000000000..ad4dbcfc1 --- /dev/null +++ b/agent/credential_pool.py @@ -0,0 +1,844 @@ +"""Persistent multi-credential pool for same-provider failover.""" + +from __future__ import annotations + +import logging +import random +import threading +import time +import uuid +import os +from dataclasses import dataclass, fields, replace +from typing import Any, Dict, List, Optional, Set, Tuple + +from hermes_constants import OPENROUTER_BASE_URL +import hermes_cli.auth as auth_mod +from hermes_cli.auth import ( + ACCESS_TOKEN_REFRESH_SKEW_SECONDS, + CODEX_ACCESS_TOKEN_REFRESH_SKEW_SECONDS, + DEFAULT_AGENT_KEY_MIN_TTL_SECONDS, + PROVIDER_REGISTRY, + _agent_key_is_usable, + _codex_access_token_is_expiring, + _decode_jwt_claims, + _is_expiring, + _load_auth_store, + _load_provider_state, + read_credential_pool, + write_credential_pool, +) + +logger = logging.getLogger(__name__) + + +def _load_config_safe() -> Optional[dict]: + """Load config.yaml, returning None on any error.""" + try: + from hermes_cli.config import load_config + + return load_config() + except Exception: + return None + + +# --- Status and type constants --- + +STATUS_OK = "ok" +STATUS_EXHAUSTED = "exhausted" + +AUTH_TYPE_OAUTH = "oauth" +AUTH_TYPE_API_KEY = "api_key" + +SOURCE_MANUAL = "manual" + +STRATEGY_FILL_FIRST = "fill_first" +STRATEGY_ROUND_ROBIN = "round_robin" +STRATEGY_RANDOM = "random" +STRATEGY_LEAST_USED = "least_used" +SUPPORTED_POOL_STRATEGIES = { + STRATEGY_FILL_FIRST, + STRATEGY_ROUND_ROBIN, + STRATEGY_RANDOM, + STRATEGY_LEAST_USED, +} + +# Cooldown before retrying an exhausted credential. +# 429 (rate-limited) cools down faster since quotas reset frequently. +# 402 (billing/quota) and other codes use a longer default. +EXHAUSTED_TTL_429_SECONDS = 60 * 60 # 1 hour +EXHAUSTED_TTL_DEFAULT_SECONDS = 24 * 60 * 60 # 24 hours + +# Pool key prefix for custom OpenAI-compatible endpoints. +# Custom endpoints all share provider='custom' but are keyed by their +# custom_providers name: 'custom:'. +CUSTOM_POOL_PREFIX = "custom:" + + +# Fields that are only round-tripped through JSON — never used for logic as attributes. +_EXTRA_KEYS = frozenset({ + "token_type", "scope", "client_id", "portal_base_url", "obtained_at", + "expires_in", "agent_key_id", "agent_key_expires_in", "agent_key_reused", + "agent_key_obtained_at", "tls", +}) + + +@dataclass +class PooledCredential: + provider: str + id: str + label: str + auth_type: str + priority: int + source: str + access_token: str + refresh_token: Optional[str] = None + last_status: Optional[str] = None + last_status_at: Optional[float] = None + last_error_code: Optional[int] = None + base_url: Optional[str] = None + expires_at: Optional[str] = None + expires_at_ms: Optional[int] = None + last_refresh: Optional[str] = None + inference_base_url: Optional[str] = None + agent_key: Optional[str] = None + agent_key_expires_at: Optional[str] = None + request_count: int = 0 + extra: Dict[str, Any] = None # type: ignore[assignment] + + def __post_init__(self): + if self.extra is None: + self.extra = {} + + def __getattr__(self, name: str): + if name in _EXTRA_KEYS: + return self.extra.get(name) + raise AttributeError(f"'{type(self).__name__}' object has no attribute {name!r}") + + @classmethod + def from_dict(cls, provider: str, payload: Dict[str, Any]) -> "PooledCredential": + field_names = {f.name for f in fields(cls) if f.name != "provider"} + data = {k: payload.get(k) for k in field_names if k in payload} + extra = {k: payload[k] for k in _EXTRA_KEYS if k in payload and payload[k] is not None} + data["extra"] = extra + data.setdefault("id", uuid.uuid4().hex[:6]) + data.setdefault("label", payload.get("source", provider)) + data.setdefault("auth_type", AUTH_TYPE_API_KEY) + data.setdefault("priority", 0) + data.setdefault("source", SOURCE_MANUAL) + data.setdefault("access_token", "") + return cls(provider=provider, **data) + + def to_dict(self) -> Dict[str, Any]: + _ALWAYS_EMIT = {"last_status", "last_status_at", "last_error_code"} + result: Dict[str, Any] = {} + for field_def in fields(self): + if field_def.name in ("provider", "extra"): + continue + value = getattr(self, field_def.name) + if value is not None or field_def.name in _ALWAYS_EMIT: + result[field_def.name] = value + for k, v in self.extra.items(): + if v is not None: + result[k] = v + return result + + @property + def runtime_api_key(self) -> str: + if self.provider == "nous": + return str(self.agent_key or self.access_token or "") + return str(self.access_token or "") + + @property + def runtime_base_url(self) -> Optional[str]: + if self.provider == "nous": + return self.inference_base_url or self.base_url + return self.base_url + + +def label_from_token(token: str, fallback: str) -> str: + claims = _decode_jwt_claims(token) + for key in ("email", "preferred_username", "upn"): + value = claims.get(key) + if isinstance(value, str) and value.strip(): + return value.strip() + return fallback + + +def _next_priority(entries: List[PooledCredential]) -> int: + return max((entry.priority for entry in entries), default=-1) + 1 + + +def _is_manual_source(source: str) -> bool: + normalized = (source or "").strip().lower() + return normalized == SOURCE_MANUAL or normalized.startswith(f"{SOURCE_MANUAL}:") + + +def _exhausted_ttl(error_code: Optional[int]) -> int: + """Return cooldown seconds based on the HTTP status that caused exhaustion.""" + if error_code == 429: + return EXHAUSTED_TTL_429_SECONDS + return EXHAUSTED_TTL_DEFAULT_SECONDS + + +def _normalize_custom_pool_name(name: str) -> str: + """Normalize a custom provider name for use as a pool key suffix.""" + return name.strip().lower().replace(" ", "-") + + +def _iter_custom_providers(config: Optional[dict] = None): + """Yield (normalized_name, entry_dict) for each valid custom_providers entry.""" + if config is None: + config = _load_config_safe() + if config is None: + return + custom_providers = config.get("custom_providers") + if not isinstance(custom_providers, list): + return + for entry in custom_providers: + if not isinstance(entry, dict): + continue + name = entry.get("name") + if not isinstance(name, str): + continue + yield _normalize_custom_pool_name(name), entry + + +def get_custom_provider_pool_key(base_url: str) -> Optional[str]: + """Look up the custom_providers list in config.yaml and return 'custom:' for a matching base_url. + + Returns None if no match is found. + """ + if not base_url: + return None + normalized_url = base_url.strip().rstrip("/") + for norm_name, entry in _iter_custom_providers(): + entry_url = str(entry.get("base_url") or "").strip().rstrip("/") + if entry_url and entry_url == normalized_url: + return f"{CUSTOM_POOL_PREFIX}{norm_name}" + return None + + +def list_custom_pool_providers() -> List[str]: + """Return all 'custom:*' pool keys that have entries in auth.json.""" + pool_data = read_credential_pool(None) + return sorted( + key for key in pool_data + if key.startswith(CUSTOM_POOL_PREFIX) + and isinstance(pool_data.get(key), list) + and pool_data[key] + ) + + +def _get_custom_provider_config(pool_key: str) -> Optional[Dict[str, Any]]: + """Return the custom_providers config entry matching a pool key like 'custom:together.ai'.""" + if not pool_key.startswith(CUSTOM_POOL_PREFIX): + return None + suffix = pool_key[len(CUSTOM_POOL_PREFIX):] + for norm_name, entry in _iter_custom_providers(): + if norm_name == suffix: + return entry + return None + + +def get_pool_strategy(provider: str) -> str: + """Return the configured selection strategy for a provider.""" + config = _load_config_safe() + if config is None: + return STRATEGY_FILL_FIRST + + strategies = config.get("credential_pool_strategies") + if not isinstance(strategies, dict): + return STRATEGY_FILL_FIRST + + strategy = str(strategies.get(provider, "") or "").strip().lower() + if strategy in SUPPORTED_POOL_STRATEGIES: + return strategy + return STRATEGY_FILL_FIRST + + +class CredentialPool: + def __init__(self, provider: str, entries: List[PooledCredential]): + self.provider = provider + self._entries = sorted(entries, key=lambda entry: entry.priority) + self._current_id: Optional[str] = None + self._strategy = get_pool_strategy(provider) + self._lock = threading.Lock() + + def has_credentials(self) -> bool: + return bool(self._entries) + + def entries(self) -> List[PooledCredential]: + return list(self._entries) + + def current(self) -> Optional[PooledCredential]: + if not self._current_id: + return None + return next((entry for entry in self._entries if entry.id == self._current_id), None) + + def _replace_entry(self, old: PooledCredential, new: PooledCredential) -> None: + """Swap an entry in-place by id, preserving sort order.""" + for idx, entry in enumerate(self._entries): + if entry.id == old.id: + self._entries[idx] = new + return + + def _persist(self) -> None: + write_credential_pool( + self.provider, + [entry.to_dict() for entry in self._entries], + ) + + def _mark_exhausted(self, entry: PooledCredential, status_code: Optional[int]) -> PooledCredential: + updated = replace( + entry, + last_status=STATUS_EXHAUSTED, + last_status_at=time.time(), + last_error_code=status_code, + ) + self._replace_entry(entry, updated) + self._persist() + return updated + + def _refresh_entry(self, entry: PooledCredential, *, force: bool) -> Optional[PooledCredential]: + if entry.auth_type != AUTH_TYPE_OAUTH or not entry.refresh_token: + if force: + self._mark_exhausted(entry, None) + return None + + try: + if self.provider == "anthropic": + from agent.anthropic_adapter import refresh_anthropic_oauth_pure + + refreshed = refresh_anthropic_oauth_pure( + entry.refresh_token, + use_json=entry.source.endswith("hermes_pkce"), + ) + updated = replace( + entry, + access_token=refreshed["access_token"], + refresh_token=refreshed["refresh_token"], + expires_at_ms=refreshed["expires_at_ms"], + ) + elif self.provider == "openai-codex": + refreshed = auth_mod.refresh_codex_oauth_pure( + entry.access_token, + entry.refresh_token, + ) + updated = replace( + entry, + access_token=refreshed["access_token"], + refresh_token=refreshed["refresh_token"], + last_refresh=refreshed.get("last_refresh"), + ) + elif self.provider == "nous": + nous_state = { + "access_token": entry.access_token, + "refresh_token": entry.refresh_token, + "client_id": entry.client_id, + "portal_base_url": entry.portal_base_url, + "inference_base_url": entry.inference_base_url, + "token_type": entry.token_type, + "scope": entry.scope, + "obtained_at": entry.obtained_at, + "expires_at": entry.expires_at, + "agent_key": entry.agent_key, + "agent_key_expires_at": entry.agent_key_expires_at, + "tls": entry.tls, + } + refreshed = auth_mod.refresh_nous_oauth_from_state( + nous_state, + min_key_ttl_seconds=DEFAULT_AGENT_KEY_MIN_TTL_SECONDS, + force_refresh=force, + force_mint=force, + ) + # Apply returned fields: dataclass fields via replace, extras via dict update + field_updates = {} + extra_updates = dict(entry.extra) + _field_names = {f.name for f in fields(entry)} + for k, v in refreshed.items(): + if k in _field_names: + field_updates[k] = v + elif k in _EXTRA_KEYS: + extra_updates[k] = v + updated = replace(entry, extra=extra_updates, **field_updates) + else: + return entry + except Exception as exc: + logger.debug("Credential refresh failed for %s/%s: %s", self.provider, entry.id, exc) + self._mark_exhausted(entry, None) + return None + + updated = replace(updated, last_status=STATUS_OK, last_status_at=None, last_error_code=None) + self._replace_entry(entry, updated) + self._persist() + return updated + + def _entry_needs_refresh(self, entry: PooledCredential) -> bool: + if entry.auth_type != AUTH_TYPE_OAUTH: + return False + if self.provider == "anthropic": + if entry.expires_at_ms is None: + return False + return int(entry.expires_at_ms) <= int(time.time() * 1000) + 120_000 + if self.provider == "openai-codex": + return _codex_access_token_is_expiring( + entry.access_token, + CODEX_ACCESS_TOKEN_REFRESH_SKEW_SECONDS, + ) + if self.provider == "nous": + # Nous refresh/mint can require network access and should happen when + # runtime credentials are actually resolved, not merely when the pool + # is enumerated for listing, migration, or selection. + return False + return False + + def mark_used(self, entry_id: Optional[str] = None) -> None: + """Increment request_count for tracking. Used by least_used strategy.""" + target_id = entry_id or self._current_id + if not target_id: + return + with self._lock: + for idx, entry in enumerate(self._entries): + if entry.id == target_id: + self._entries[idx] = replace(entry, request_count=entry.request_count + 1) + return + + def select(self) -> Optional[PooledCredential]: + with self._lock: + return self._select_unlocked() + + def _available_entries(self, *, clear_expired: bool = False, refresh: bool = False) -> List[PooledCredential]: + """Return entries not currently in exhaustion cooldown. + + When *clear_expired* is True, entries whose cooldown has elapsed are + reset to STATUS_OK and persisted. When *refresh* is True, entries + that need a token refresh are refreshed (skipped on failure). + """ + now = time.time() + cleared_any = False + available: List[PooledCredential] = [] + for entry in self._entries: + if entry.last_status == STATUS_EXHAUSTED: + ttl = _exhausted_ttl(entry.last_error_code) + if entry.last_status_at and now - entry.last_status_at < ttl: + continue + if clear_expired: + cleared = replace(entry, last_status=STATUS_OK, last_status_at=None, last_error_code=None) + self._replace_entry(entry, cleared) + entry = cleared + cleared_any = True + if refresh and self._entry_needs_refresh(entry): + refreshed = self._refresh_entry(entry, force=False) + if refreshed is None: + continue + entry = refreshed + available.append(entry) + if cleared_any: + self._persist() + return available + + def _select_unlocked(self) -> Optional[PooledCredential]: + available = self._available_entries(clear_expired=True, refresh=True) + if not available: + self._current_id = None + return None + + if self._strategy == STRATEGY_RANDOM: + entry = random.choice(available) + self._current_id = entry.id + return entry + + if self._strategy == STRATEGY_LEAST_USED and len(available) > 1: + entry = min(available, key=lambda e: e.request_count) + self._current_id = entry.id + return entry + + if self._strategy == STRATEGY_ROUND_ROBIN and len(available) > 1: + entry = available[0] + rotated = [candidate for candidate in self._entries if candidate.id != entry.id] + rotated.append(replace(entry, priority=len(self._entries) - 1)) + self._entries = [replace(candidate, priority=idx) for idx, candidate in enumerate(rotated)] + self._persist() + self._current_id = entry.id + return self.current() or entry + + entry = available[0] + self._current_id = entry.id + return entry + + def peek(self) -> Optional[PooledCredential]: + current = self.current() + if current is not None: + return current + available = self._available_entries() + return available[0] if available else None + + def mark_exhausted_and_rotate(self, *, status_code: Optional[int]) -> Optional[PooledCredential]: + with self._lock: + entry = self.current() or self._select_unlocked() + if entry is None: + return None + self._mark_exhausted(entry, status_code) + self._current_id = None + return self._select_unlocked() + + def try_refresh_current(self) -> Optional[PooledCredential]: + with self._lock: + return self._try_refresh_current_unlocked() + + def _try_refresh_current_unlocked(self) -> Optional[PooledCredential]: + entry = self.current() + if entry is None: + return None + refreshed = self._refresh_entry(entry, force=True) + if refreshed is not None: + self._current_id = refreshed.id + return refreshed + + def reset_statuses(self) -> int: + count = 0 + new_entries = [] + for entry in self._entries: + if entry.last_status or entry.last_status_at or entry.last_error_code: + new_entries.append(replace(entry, last_status=None, last_status_at=None, last_error_code=None)) + count += 1 + else: + new_entries.append(entry) + if count: + self._entries = new_entries + self._persist() + return count + + def remove_index(self, index: int) -> Optional[PooledCredential]: + if index < 1 or index > len(self._entries): + return None + removed = self._entries.pop(index - 1) + self._entries = [ + replace(entry, priority=new_priority) + for new_priority, entry in enumerate(self._entries) + ] + self._persist() + if self._current_id == removed.id: + self._current_id = None + return removed + + def add_entry(self, entry: PooledCredential) -> PooledCredential: + entry = replace(entry, priority=_next_priority(self._entries)) + self._entries.append(entry) + self._persist() + return entry + + +def _upsert_entry(entries: List[PooledCredential], provider: str, source: str, payload: Dict[str, Any]) -> bool: + existing_idx = None + for idx, entry in enumerate(entries): + if entry.source == source: + existing_idx = idx + break + + if existing_idx is None: + payload.setdefault("id", uuid.uuid4().hex[:6]) + payload.setdefault("priority", _next_priority(entries)) + payload.setdefault("label", payload.get("label") or source) + entries.append(PooledCredential.from_dict(provider, payload)) + return True + + existing = entries[existing_idx] + field_updates = {} + extra_updates = {} + _field_names = {f.name for f in fields(existing)} + for key, value in payload.items(): + if key in {"id", "priority"} or value is None: + continue + if key == "label" and existing.label: + continue + if key in _field_names: + if getattr(existing, key) != value: + field_updates[key] = value + elif key in _EXTRA_KEYS: + if existing.extra.get(key) != value: + extra_updates[key] = value + if field_updates or extra_updates: + if extra_updates: + field_updates["extra"] = {**existing.extra, **extra_updates} + entries[existing_idx] = replace(existing, **field_updates) + return True + return False + + +def _normalize_pool_priorities(provider: str, entries: List[PooledCredential]) -> bool: + if provider != "anthropic": + return False + + source_rank = { + "env:ANTHROPIC_TOKEN": 0, + "env:CLAUDE_CODE_OAUTH_TOKEN": 1, + "hermes_pkce": 2, + "claude_code": 3, + "env:ANTHROPIC_API_KEY": 4, + } + manual_entries = sorted( + (entry for entry in entries if _is_manual_source(entry.source)), + key=lambda entry: entry.priority, + ) + seeded_entries = sorted( + (entry for entry in entries if not _is_manual_source(entry.source)), + key=lambda entry: ( + source_rank.get(entry.source, len(source_rank)), + entry.priority, + entry.label, + ), + ) + + ordered = [*manual_entries, *seeded_entries] + id_to_idx = {entry.id: idx for idx, entry in enumerate(entries)} + changed = False + for new_priority, entry in enumerate(ordered): + if entry.priority != new_priority: + entries[id_to_idx[entry.id]] = replace(entry, priority=new_priority) + changed = True + return changed + + +def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tuple[bool, Set[str]]: + changed = False + active_sources: Set[str] = set() + auth_store = _load_auth_store() + + if provider == "anthropic": + from agent.anthropic_adapter import read_claude_code_credentials, read_hermes_oauth_credentials + + for source_name, creds in ( + ("hermes_pkce", read_hermes_oauth_credentials()), + ("claude_code", read_claude_code_credentials()), + ): + if creds and creds.get("accessToken"): + active_sources.add(source_name) + changed |= _upsert_entry( + entries, + provider, + source_name, + { + "source": source_name, + "auth_type": AUTH_TYPE_OAUTH, + "access_token": creds.get("accessToken", ""), + "refresh_token": creds.get("refreshToken"), + "expires_at_ms": creds.get("expiresAt"), + "label": label_from_token(creds.get("accessToken", ""), source_name), + }, + ) + + elif provider == "nous": + state = _load_provider_state(auth_store, "nous") + if state: + active_sources.add("device_code") + changed |= _upsert_entry( + entries, + provider, + "device_code", + { + "source": "device_code", + "auth_type": AUTH_TYPE_OAUTH, + "access_token": state.get("access_token", ""), + "refresh_token": state.get("refresh_token"), + "expires_at": state.get("expires_at"), + "token_type": state.get("token_type"), + "scope": state.get("scope"), + "client_id": state.get("client_id"), + "portal_base_url": state.get("portal_base_url"), + "inference_base_url": state.get("inference_base_url"), + "agent_key": state.get("agent_key"), + "agent_key_expires_at": state.get("agent_key_expires_at"), + "tls": state.get("tls") if isinstance(state.get("tls"), dict) else None, + "label": label_from_token(state.get("access_token", ""), "device_code"), + }, + ) + + elif provider == "openai-codex": + state = _load_provider_state(auth_store, "openai-codex") + tokens = state.get("tokens") if isinstance(state, dict) else None + if isinstance(tokens, dict) and tokens.get("access_token"): + active_sources.add("device_code") + changed |= _upsert_entry( + entries, + provider, + "device_code", + { + "source": "device_code", + "auth_type": AUTH_TYPE_OAUTH, + "access_token": tokens.get("access_token", ""), + "refresh_token": tokens.get("refresh_token"), + "base_url": "https://chatgpt.com/backend-api/codex", + "last_refresh": state.get("last_refresh"), + "label": label_from_token(tokens.get("access_token", ""), "device_code"), + }, + ) + + return changed, active_sources + + +def _seed_from_env(provider: str, entries: List[PooledCredential]) -> Tuple[bool, Set[str]]: + changed = False + active_sources: Set[str] = set() + if provider == "openrouter": + token = os.getenv("OPENROUTER_API_KEY", "").strip() + if token: + source = "env:OPENROUTER_API_KEY" + active_sources.add(source) + changed |= _upsert_entry( + entries, + provider, + source, + { + "source": source, + "auth_type": AUTH_TYPE_API_KEY, + "access_token": token, + "base_url": OPENROUTER_BASE_URL, + "label": "OPENROUTER_API_KEY", + }, + ) + return changed, active_sources + + pconfig = PROVIDER_REGISTRY.get(provider) + if not pconfig or pconfig.auth_type != AUTH_TYPE_API_KEY: + return changed, active_sources + + env_url = "" + if pconfig.base_url_env_var: + env_url = os.getenv(pconfig.base_url_env_var, "").strip().rstrip("/") + + env_vars = list(pconfig.api_key_env_vars) + if provider == "anthropic": + env_vars = [ + "ANTHROPIC_TOKEN", + "CLAUDE_CODE_OAUTH_TOKEN", + "ANTHROPIC_API_KEY", + ] + + for env_var in env_vars: + token = os.getenv(env_var, "").strip() + if not token: + continue + source = f"env:{env_var}" + active_sources.add(source) + auth_type = AUTH_TYPE_OAUTH if provider == "anthropic" and not token.startswith("sk-ant-api") else AUTH_TYPE_API_KEY + base_url = env_url or pconfig.inference_base_url + changed |= _upsert_entry( + entries, + provider, + source, + { + "source": source, + "auth_type": auth_type, + "access_token": token, + "base_url": base_url, + "label": env_var, + }, + ) + return changed, active_sources + + +def _prune_stale_seeded_entries(entries: List[PooledCredential], active_sources: Set[str]) -> bool: + retained = [ + entry + for entry in entries + if _is_manual_source(entry.source) + or entry.source in active_sources + or not ( + entry.source.startswith("env:") + or entry.source in {"claude_code", "hermes_pkce"} + ) + ] + if len(retained) == len(entries): + return False + entries[:] = retained + return True + + +def _seed_custom_pool(pool_key: str, entries: List[PooledCredential]) -> Tuple[bool, Set[str]]: + """Seed a custom endpoint pool from custom_providers config and model config.""" + changed = False + active_sources: Set[str] = set() + + # Seed from the custom_providers config entry's api_key field + cp_config = _get_custom_provider_config(pool_key) + if cp_config: + api_key = str(cp_config.get("api_key") or "").strip() + base_url = str(cp_config.get("base_url") or "").strip().rstrip("/") + name = str(cp_config.get("name") or "").strip() + if api_key: + source = f"config:{name}" + active_sources.add(source) + changed |= _upsert_entry( + entries, + pool_key, + source, + { + "source": source, + "auth_type": AUTH_TYPE_API_KEY, + "access_token": api_key, + "base_url": base_url, + "label": name or source, + }, + ) + + # Seed from model.api_key if model.provider=='custom' and model.base_url matches + try: + config = _load_config_safe() + model_cfg = config.get("model") if config else None + if isinstance(model_cfg, dict): + model_provider = str(model_cfg.get("provider") or "").strip().lower() + model_base_url = str(model_cfg.get("base_url") or "").strip().rstrip("/") + model_api_key = "" + for k in ("api_key", "api"): + v = model_cfg.get(k) + if isinstance(v, str) and v.strip(): + model_api_key = v.strip() + break + if model_provider == "custom" and model_base_url and model_api_key: + # Check if this model's base_url matches our custom provider + matched_key = get_custom_provider_pool_key(model_base_url) + if matched_key == pool_key: + source = "model_config" + active_sources.add(source) + changed |= _upsert_entry( + entries, + pool_key, + source, + { + "source": source, + "auth_type": AUTH_TYPE_API_KEY, + "access_token": model_api_key, + "base_url": model_base_url, + "label": "model_config", + }, + ) + except Exception: + pass + + return changed, active_sources + + +def load_pool(provider: str) -> CredentialPool: + provider = (provider or "").strip().lower() + raw_entries = read_credential_pool(provider) + entries = [PooledCredential.from_dict(provider, payload) for payload in raw_entries] + + if provider.startswith(CUSTOM_POOL_PREFIX): + # Custom endpoint pool — seed from custom_providers config and model config + custom_changed, custom_sources = _seed_custom_pool(provider, entries) + changed = custom_changed + changed |= _prune_stale_seeded_entries(entries, custom_sources) + else: + singleton_changed, singleton_sources = _seed_from_singletons(provider, entries) + env_changed, env_sources = _seed_from_env(provider, entries) + changed = singleton_changed or env_changed + changed |= _prune_stale_seeded_entries(entries, singleton_sources | env_sources) + changed |= _normalize_pool_priorities(provider, entries) + + if changed: + write_credential_pool( + provider, + [entry.to_dict() for entry in sorted(entries, key=lambda item: item.priority)], + ) + return CredentialPool(provider, entries) diff --git a/agent/model_metadata.py b/agent/model_metadata.py index 0c121e6f6..7486afb04 100644 --- a/agent/model_metadata.py +++ b/agent/model_metadata.py @@ -176,6 +176,7 @@ _URL_TO_PROVIDER: Dict[str, str] = { "api.deepseek.com": "deepseek", "api.githubcopilot.com": "copilot", "models.github.ai": "copilot", + "api.fireworks.ai": "fireworks", } diff --git a/agent/models_dev.py b/agent/models_dev.py index 283e8018f..b4b699558 100644 --- a/agent/models_dev.py +++ b/agent/models_dev.py @@ -43,6 +43,7 @@ PROVIDER_TO_MODELS_DEV: Dict[str, str] = { "opencode-zen": "opencode", "opencode-go": "opencode-go", "kilocode": "kilo", + "fireworks": "fireworks-ai", } diff --git a/cli.py b/cli.py index e01a0e797..978b36091 100644 --- a/cli.py +++ b/cli.py @@ -1124,9 +1124,9 @@ class HermesCLI: self.acp_args: list[str] = [] self.base_url = ( base_url - or os.getenv("OPENAI_BASE_URL") - or os.getenv("OPENROUTER_BASE_URL", CLI_CONFIG["model"]["base_url"]) - ) + or CLI_CONFIG["model"].get("base_url", "") + or os.getenv("OPENROUTER_BASE_URL", "") + ) or None # Match key to resolved base_url: OpenRouter URL → prefer OPENROUTER_API_KEY, # custom endpoint → prefer OPENAI_API_KEY (issue #560). # Note: _ensure_runtime_credentials() re-resolves this before first use. @@ -1955,6 +1955,7 @@ class HermesCLI: resolved_api_mode = runtime.get("api_mode", self.api_mode) resolved_acp_command = runtime.get("command") resolved_acp_args = list(runtime.get("args") or []) + resolved_credential_pool = runtime.get("credential_pool") if not isinstance(api_key, str) or not api_key: # Custom / local endpoints (llama.cpp, ollama, vLLM, etc.) often # don't require authentication. When a base_url IS configured but @@ -1987,6 +1988,7 @@ class HermesCLI: self.api_mode = resolved_api_mode self.acp_command = resolved_acp_command self.acp_args = resolved_acp_args + self._credential_pool = resolved_credential_pool self._provider_source = runtime.get("source") self.api_key = api_key self.base_url = base_url @@ -2088,6 +2090,7 @@ class HermesCLI: "api_mode": self.api_mode, "command": self.acp_command, "args": list(self.acp_args or []), + "credential_pool": getattr(self, "_credential_pool", None), } effective_model = model_override or self.model self.agent = AIAgent( @@ -2098,6 +2101,7 @@ class HermesCLI: api_mode=runtime.get("api_mode"), acp_command=runtime.get("command"), acp_args=runtime.get("args"), + credential_pool=runtime.get("credential_pool"), max_iterations=self.max_turns, enabled_toolsets=self.enabled_toolsets, verbose_logging=self.verbose, @@ -3239,7 +3243,7 @@ class HermesCLI: print(f" {mid}{current_marker}") elif p["id"] == "custom": from hermes_cli.models import _get_custom_base_url - custom_url = _get_custom_base_url() or os.getenv("OPENAI_BASE_URL", "") + custom_url = _get_custom_base_url() if custom_url: print(f" endpoint: {custom_url}") if is_active: @@ -3904,6 +3908,8 @@ class HermesCLI: self._handle_stop_command() elif canonical == "background": self._handle_background_command(cmd_original) + elif canonical == "btw": + self._handle_btw_command(cmd_original) elif canonical == "queue": # Extract prompt after "/queue " or "/q " parts = cmd_original.split(None, 1) @@ -4190,6 +4196,121 @@ class HermesCLI: self._background_tasks[task_id] = thread thread.start() + def _handle_btw_command(self, cmd: str): + """Handle /btw — ephemeral side question using session context. + + Snapshots the current conversation history, spawns a no-tools agent in + a background thread, and prints the answer without persisting anything + to the main session. + """ + parts = cmd.strip().split(maxsplit=1) + if len(parts) < 2 or not parts[1].strip(): + _cprint(" Usage: /btw ") + _cprint(" Example: /btw what module owns session title sanitization?") + _cprint(" Answers using session context. No tools, not persisted.") + return + + question = parts[1].strip() + task_id = f"btw_{datetime.now().strftime('%H%M%S')}_{uuid.uuid4().hex[:6]}" + + if not self._ensure_runtime_credentials(): + _cprint(" (>_<) Cannot start /btw: no valid credentials.") + return + + turn_route = self._resolve_turn_agent_config(question) + history_snapshot = list(self.conversation_history) + + preview = question[:60] + ("..." if len(question) > 60 else "") + _cprint(f' 💬 /btw: "{preview}"') + + def run_btw(): + try: + btw_agent = AIAgent( + model=turn_route["model"], + api_key=turn_route["runtime"].get("api_key"), + base_url=turn_route["runtime"].get("base_url"), + provider=turn_route["runtime"].get("provider"), + api_mode=turn_route["runtime"].get("api_mode"), + acp_command=turn_route["runtime"].get("command"), + acp_args=turn_route["runtime"].get("args"), + max_iterations=8, + enabled_toolsets=[], + quiet_mode=True, + verbose_logging=False, + session_id=task_id, + platform="cli", + reasoning_config=self.reasoning_config, + providers_allowed=self._providers_only, + providers_ignored=self._providers_ignore, + providers_order=self._providers_order, + provider_sort=self._provider_sort, + provider_require_parameters=self._provider_require_params, + provider_data_collection=self._provider_data_collection, + fallback_model=self._fallback_model, + session_db=None, + skip_memory=True, + skip_context_files=True, + persist_session=False, + ) + + btw_prompt = ( + "[Ephemeral /btw side question. Answer using the conversation " + "context. No tools available. Be direct and concise.]\n\n" + + question + ) + result = btw_agent.run_conversation( + user_message=btw_prompt, + conversation_history=history_snapshot, + task_id=task_id, + sync_honcho=False, + ) + + response = (result.get("final_response") or "") if result else "" + if not response and result and result.get("error"): + response = f"Error: {result['error']}" + + # TUI refresh before printing + if self._app: + self._app.invalidate() + time.sleep(0.05) + print() + + if response: + try: + from hermes_cli.skin_engine import get_active_skin + _skin = get_active_skin() + _resp_color = _skin.get_color("response_border", "#4F6D4A") + except Exception: + _resp_color = "#4F6D4A" + + ChatConsole().print(Panel( + _rich_text_from_ansi(response), + title=f"[{_resp_color} bold]⚕ /btw[/]", + title_align="left", + border_style=_resp_color, + box=rich_box.HORIZONTALS, + padding=(1, 2), + )) + else: + _cprint(" 💬 /btw: (no response)") + + if self.bell_on_complete: + sys.stdout.write("\a") + sys.stdout.flush() + + except Exception as e: + if self._app: + self._app.invalidate() + time.sleep(0.05) + print() + _cprint(f" ❌ /btw failed: {e}") + finally: + if self._app: + self._invalidate(min_interval=0) + + thread = threading.Thread(target=run_btw, daemon=True, name=f"btw-{task_id}") + thread.start() + @staticmethod def _try_launch_chrome_debug(port: int, system: str) -> bool: """Try to launch Chrome/Chromium with remote debugging enabled. @@ -5597,6 +5718,8 @@ class HermesCLI: self.agent = None # Initialize agent if needed + if self.agent is None: + _cprint(f"{_DIM}Initializing agent...{_RST}") if not self._init_agent( model_override=turn_route["model"], runtime_override=turn_route["runtime"], @@ -7445,6 +7568,20 @@ class HermesCLI: # Register atexit cleanup so resources are freed even on unexpected exit atexit.register(_run_cleanup) + # Register signal handlers for graceful shutdown on SSH disconnect / SIGTERM + def _signal_handler(signum, frame): + """Handle SIGHUP/SIGTERM by triggering graceful cleanup.""" + logger.debug("Received signal %s, triggering graceful shutdown", signum) + raise KeyboardInterrupt() + + try: + import signal as _signal + _signal.signal(_signal.SIGTERM, _signal_handler) + if hasattr(_signal, 'SIGHUP'): + _signal.signal(_signal.SIGHUP, _signal_handler) + except Exception: + pass # Signal handlers may fail in restricted environments + # Install a custom asyncio exception handler that suppresses the # "Event loop is closed" RuntimeError from httpx transport cleanup. # This is defense-in-depth — the primary fix is neuter_async_httpx_del @@ -7468,7 +7605,7 @@ class HermesCLI: except Exception: pass app.run() - except (EOFError, KeyboardInterrupt): + except (EOFError, KeyboardInterrupt, BrokenPipeError): pass finally: self._should_exit = True @@ -7507,6 +7644,23 @@ class HermesCLI: self._session_db.end_session(self.agent.session_id, "cli_close") except (Exception, KeyboardInterrupt) as e: logger.debug("Could not close session in DB: %s", e) + # Plugin hook: on_session_end — safety net for interrupted exits. + # run_conversation() already fires this per-turn on normal completion, + # so only fire here if the agent was mid-turn (_agent_running) when + # the exit occurred, meaning run_conversation's hook didn't fire. + if self.agent and getattr(self, '_agent_running', False): + try: + from hermes_cli.plugins import invoke_hook as _invoke_hook + _invoke_hook( + "on_session_end", + session_id=self.agent.session_id, + completed=False, + interrupted=True, + model=getattr(self.agent, 'model', None), + platform=getattr(self.agent, 'platform', None) or "cli", + ) + except Exception: + pass _run_cleanup() self._print_exit_summary() diff --git a/gateway/config.py b/gateway/config.py index 8c7843780..c660bb48e 100644 --- a/gateway/config.py +++ b/gateway/config.py @@ -550,6 +550,8 @@ def load_gateway_config() -> GatewayConfig: os.environ["DISCORD_FREE_RESPONSE_CHANNELS"] = str(frc) if "auto_thread" in discord_cfg and not os.getenv("DISCORD_AUTO_THREAD"): os.environ["DISCORD_AUTO_THREAD"] = str(discord_cfg["auto_thread"]).lower() + if "reactions" in discord_cfg and not os.getenv("DISCORD_REACTIONS"): + os.environ["DISCORD_REACTIONS"] = str(discord_cfg["reactions"]).lower() # Telegram settings → env vars (env vars take precedence) telegram_cfg = yaml_cfg.get("telegram", {}) diff --git a/gateway/platforms/api_server.py b/gateway/platforms/api_server.py index 19fa5f60d..a27408f4c 100644 --- a/gateway/platforms/api_server.py +++ b/gateway/platforms/api_server.py @@ -380,6 +380,7 @@ class APIServerAdapter(BasePlatformAdapter): ephemeral_system_prompt: Optional[str] = None, session_id: Optional[str] = None, stream_delta_callback=None, + tool_progress_callback=None, ) -> Any: """ Create an AIAgent instance using the gateway's runtime config. @@ -412,6 +413,7 @@ class APIServerAdapter(BasePlatformAdapter): session_id=session_id, platform="api_server", stream_delta_callback=stream_delta_callback, + tool_progress_callback=tool_progress_callback, ) return agent @@ -514,6 +516,15 @@ class APIServerAdapter(BasePlatformAdapter): if delta is not None: _stream_q.put(delta) + def _on_tool_progress(name, preview, args): + """Inject tool progress into the SSE stream for Open WebUI.""" + if name.startswith("_"): + return # Skip internal events (_thinking) + from agent.display import get_tool_emoji + emoji = get_tool_emoji(name) + label = preview or name + _stream_q.put(f"\n`{emoji} {label}`\n") + # Start agent in background. agent_ref is a mutable container # so the SSE writer can interrupt the agent on client disconnect. agent_ref = [None] @@ -523,6 +534,7 @@ class APIServerAdapter(BasePlatformAdapter): ephemeral_system_prompt=system_prompt, session_id=session_id, stream_delta_callback=_on_delta, + tool_progress_callback=_on_tool_progress, agent_ref=agent_ref, )) @@ -1194,6 +1206,7 @@ class APIServerAdapter(BasePlatformAdapter): ephemeral_system_prompt: Optional[str] = None, session_id: Optional[str] = None, stream_delta_callback=None, + tool_progress_callback=None, agent_ref: Optional[list] = None, ) -> tuple: """ @@ -1214,6 +1227,7 @@ class APIServerAdapter(BasePlatformAdapter): ephemeral_system_prompt=ephemeral_system_prompt, session_id=session_id, stream_delta_callback=stream_delta_callback, + tool_progress_callback=tool_progress_callback, ) if agent_ref is not None: agent_ref[0] = agent diff --git a/gateway/platforms/discord.py b/gateway/platforms/discord.py index 9e0c9c123..168919b09 100644 --- a/gateway/platforms/discord.py +++ b/gateway/platforms/discord.py @@ -683,14 +683,22 @@ class DiscordAdapter(BasePlatformAdapter): logger.debug("[%s] remove_reaction failed (%s): %s", self.name, emoji, e) return False + def _reactions_enabled(self) -> bool: + """Check if message reactions are enabled via config/env.""" + return os.getenv("DISCORD_REACTIONS", "true").lower() not in ("false", "0", "no") + async def on_processing_start(self, event: MessageEvent) -> None: """Add an in-progress reaction for normal Discord message events.""" + if not self._reactions_enabled(): + return message = event.raw_message if hasattr(message, "add_reaction"): await self._add_reaction(message, "👀") async def on_processing_complete(self, event: MessageEvent, success: bool) -> None: """Swap the in-progress reaction for a final success/failure reaction.""" + if not self._reactions_enabled(): + return message = event.raw_message if hasattr(message, "add_reaction"): await self._remove_reaction(message, "👀") diff --git a/gateway/platforms/matrix.py b/gateway/platforms/matrix.py index 309baeee7..c9bcd945a 100644 --- a/gateway/platforms/matrix.py +++ b/gateway/platforms/matrix.py @@ -49,6 +49,14 @@ _STORE_DIR = _get_hermes_dir("platforms/matrix/store", "matrix/store") # Grace period: ignore messages older than this many seconds before startup. _STARTUP_GRACE_SECONDS = 5 +# E2EE key export file for persistence across restarts. +_KEY_EXPORT_FILE = _STORE_DIR / "exported_keys.txt" +_KEY_EXPORT_PASSPHRASE = "hermes-matrix-e2ee-keys" + +# Pending undecrypted events: cap and TTL for retry buffer. +_MAX_PENDING_EVENTS = 100 +_PENDING_EVENT_TTL = 300 # seconds — stop retrying after 5 min + def check_matrix_requirements() -> bool: """Return True if the Matrix adapter can be used.""" @@ -111,6 +119,10 @@ class MatrixAdapter(BasePlatformAdapter): self._processed_events: deque = deque(maxlen=1000) self._processed_events_set: set = set() + # Buffer for undecrypted events pending key receipt. + # Each entry: (room, event, timestamp) + self._pending_megolm: list = [] + def _is_duplicate_event(self, event_id) -> bool: """Return True if this event was already processed. Tracks the ID otherwise.""" if not event_id: @@ -232,6 +244,16 @@ class MatrixAdapter(BasePlatformAdapter): logger.info("Matrix: E2EE crypto initialized") except Exception as exc: logger.warning("Matrix: crypto init issue: %s", exc) + + # Import previously exported Megolm keys (survives restarts). + if _KEY_EXPORT_FILE.exists(): + try: + await client.import_keys( + str(_KEY_EXPORT_FILE), _KEY_EXPORT_PASSPHRASE, + ) + logger.info("Matrix: imported Megolm keys from backup") + except Exception as exc: + logger.debug("Matrix: could not import keys: %s", exc) elif self._encryption: logger.warning( "Matrix: E2EE requested but crypto store is not loaded; " @@ -286,6 +308,18 @@ class MatrixAdapter(BasePlatformAdapter): except (asyncio.CancelledError, Exception): pass + # Export Megolm keys before closing so the next restart can decrypt + # events that used sessions from this run. + if self._client and self._encryption and getattr(self._client, "olm", None): + try: + _STORE_DIR.mkdir(parents=True, exist_ok=True) + await self._client.export_keys( + str(_KEY_EXPORT_FILE), _KEY_EXPORT_PASSPHRASE, + ) + logger.info("Matrix: exported Megolm keys for next restart") + except Exception as exc: + logger.debug("Matrix: could not export keys on disconnect: %s", exc) + if self._client: await self._client.close() self._client = None @@ -665,17 +699,22 @@ class MatrixAdapter(BasePlatformAdapter): Hermes uses a custom sync loop instead of matrix-nio's sync_forever(), so we need to explicitly drive the key management work that sync_forever() normally handles for encrypted rooms. + + Also auto-trusts all devices (so senders share session keys with us) + and retries decryption for any buffered MegolmEvents. """ client = self._client if not client or not self._encryption or not getattr(client, "olm", None): return + did_query_keys = client.should_query_keys + tasks = [asyncio.create_task(client.send_to_device_messages())] if client.should_upload_keys: tasks.append(asyncio.create_task(client.keys_upload())) - if client.should_query_keys: + if did_query_keys: tasks.append(asyncio.create_task(client.keys_query())) if client.should_claim_keys: @@ -691,6 +730,111 @@ class MatrixAdapter(BasePlatformAdapter): except Exception as exc: logger.warning("Matrix: E2EE maintenance task failed: %s", exc) + # After key queries, auto-trust all devices so senders share keys with + # us. For a bot this is the right default — we want to decrypt + # everything, not enforce manual verification. + if did_query_keys: + self._auto_trust_devices() + + # Retry any buffered undecrypted events now that new keys may have + # arrived (from key requests, key queries, or to-device forwarding). + if self._pending_megolm: + await self._retry_pending_decryptions() + + def _auto_trust_devices(self) -> None: + """Trust/verify all unverified devices we know about. + + When other clients see our device as verified, they proactively share + Megolm session keys with us. Without this, many clients will refuse + to include an unverified device in key distributions. + """ + client = self._client + if not client: + return + + device_store = getattr(client, "device_store", None) + if not device_store: + return + + own_device = getattr(client, "device_id", None) + trusted_count = 0 + + try: + # DeviceStore.__iter__ yields OlmDevice objects directly. + for device in device_store: + if getattr(device, "device_id", None) == own_device: + continue + if not getattr(device, "verified", False): + client.verify_device(device) + trusted_count += 1 + except Exception as exc: + logger.debug("Matrix: auto-trust error: %s", exc) + + if trusted_count: + logger.info("Matrix: auto-trusted %d new device(s)", trusted_count) + + async def _retry_pending_decryptions(self) -> None: + """Retry decrypting buffered MegolmEvents after new keys arrive.""" + import nio + + client = self._client + if not client or not self._pending_megolm: + return + + now = time.time() + still_pending: list = [] + + for room, event, ts in self._pending_megolm: + # Drop events that have aged past the TTL. + if now - ts > _PENDING_EVENT_TTL: + logger.debug( + "Matrix: dropping expired pending event %s (age %.0fs)", + getattr(event, "event_id", "?"), now - ts, + ) + continue + + try: + decrypted = client.decrypt_event(event) + except Exception: + # Still missing the key — keep in buffer. + still_pending.append((room, event, ts)) + continue + + if isinstance(decrypted, nio.MegolmEvent): + # decrypt_event returned the same undecryptable event. + still_pending.append((room, event, ts)) + continue + + logger.info( + "Matrix: decrypted buffered event %s (%s)", + getattr(event, "event_id", "?"), + type(decrypted).__name__, + ) + + # Route to the appropriate handler based on decrypted type. + try: + if isinstance(decrypted, nio.RoomMessageText): + await self._on_room_message(room, decrypted) + elif isinstance( + decrypted, + (nio.RoomMessageImage, nio.RoomMessageAudio, + nio.RoomMessageVideo, nio.RoomMessageFile), + ): + await self._on_room_message_media(room, decrypted) + else: + logger.debug( + "Matrix: decrypted event %s has unhandled type %s", + getattr(event, "event_id", "?"), + type(decrypted).__name__, + ) + except Exception as exc: + logger.warning( + "Matrix: error processing decrypted event %s: %s", + getattr(event, "event_id", "?"), exc, + ) + + self._pending_megolm = still_pending + # ------------------------------------------------------------------ # Event callbacks # ------------------------------------------------------------------ @@ -712,13 +856,29 @@ class MatrixAdapter(BasePlatformAdapter): if event_ts and event_ts < self._startup_ts - _STARTUP_GRACE_SECONDS: return - # Handle decrypted MegolmEvents — extract the inner event. + # Handle undecryptable MegolmEvents: request the missing session key + # and buffer the event for retry once the key arrives. if isinstance(event, nio.MegolmEvent): - # Failed to decrypt. logger.warning( - "Matrix: could not decrypt event %s in %s", + "Matrix: could not decrypt event %s in %s — requesting key", event.event_id, room.room_id, ) + + # Ask other devices in the room to forward the session key. + try: + resp = await self._client.request_room_key(event) + if hasattr(resp, "event_id") or not isinstance(resp, Exception): + logger.debug( + "Matrix: room key request sent for session %s", + getattr(event, "session_id", "?"), + ) + except Exception as exc: + logger.debug("Matrix: room key request failed: %s", exc) + + # Buffer for retry on next maintenance cycle. + self._pending_megolm.append((room, event, time.time())) + if len(self._pending_megolm) > _MAX_PENDING_EVENTS: + self._pending_megolm = self._pending_megolm[-_MAX_PENDING_EVENTS:] return # Skip edits (m.replace relation). diff --git a/gateway/platforms/telegram_network.py b/gateway/platforms/telegram_network.py index 93f1f0fb5..9f6d8bb46 100644 --- a/gateway/platforms/telegram_network.py +++ b/gateway/platforms/telegram_network.py @@ -135,6 +135,9 @@ def _normalize_fallback_ips(values: Iterable[str]) -> list[str]: if addr.version != 4: logger.warning("Ignoring non-IPv4 Telegram fallback IP: %s", raw) continue + if addr.is_private or addr.is_loopback or addr.is_link_local or addr.is_unspecified: + logger.warning("Ignoring private/internal Telegram fallback IP: %s", raw) + continue normalized.append(str(addr)) return normalized diff --git a/gateway/run.py b/gateway/run.py index 7638d8a51..2fe929447 100644 --- a/gateway/run.py +++ b/gateway/run.py @@ -298,6 +298,7 @@ def _resolve_runtime_agent_kwargs() -> dict: "api_mode": runtime.get("api_mode"), "command": runtime.get("command"), "args": list(runtime.get("args") or []), + "credential_pool": runtime.get("credential_pool"), } @@ -325,9 +326,9 @@ def _check_unavailable_skill(command_name: str) -> str | None: ) # Check optional skills (shipped with repo but not installed) - from hermes_constants import get_hermes_home + from hermes_constants import get_hermes_home, get_optional_skills_dir repo_root = Path(__file__).resolve().parent.parent - optional_dir = repo_root / "optional-skills" + optional_dir = get_optional_skills_dir(repo_root / "optional-skills") if optional_dir.exists(): for skill_md in optional_dir.rglob("SKILL.md"): name = skill_md.parent.name.lower().replace("_", "-") @@ -364,20 +365,19 @@ def _load_gateway_config() -> dict: def _resolve_gateway_model(config: dict | None = None) -> str: - """Read model from env/config — mirrors the resolution in _run_agent_sync. + """Read model from config.yaml — single source of truth. Without this, temporary AIAgent instances (memory flush, /compress) fall back to the hardcoded default which fails when the active provider is openai-codex. """ - model = os.getenv("HERMES_MODEL") or os.getenv("LLM_MODEL") or "" cfg = config if config is not None else _load_gateway_config() model_cfg = cfg.get("model", {}) if isinstance(model_cfg, str): - model = model_cfg + return model_cfg elif isinstance(model_cfg, dict): - model = model_cfg.get("default") or model_cfg.get("model") or model - return model + return model_cfg.get("default") or model_cfg.get("model") or "" + return "" def _resolve_hermes_bin() -> Optional[list[str]]: @@ -476,12 +476,7 @@ class GatewayRunner: self._honcho_managers: Dict[str, Any] = {} self._honcho_configs: Dict[str, Any] = {} - # Rate-limit compression warning messages sent to users. - # Keyed by chat_id — value is the timestamp of the last warning sent. - # Prevents the warning from firing on every message when a session - # remains above the threshold after compression. - self._compression_warn_sent: Dict[str, float] = {} - self._compression_warn_cooldown: int = 3600 # seconds (1 hour) + # Ensure tirith security scanner is available (downloads if needed) try: @@ -1702,6 +1697,11 @@ class GatewayRunner: # In DMs: offer pairing code. In groups: silently ignore. if source.chat_type == "dm" and self._get_unauthorized_dm_behavior(source.platform) == "pair": platform_name = source.platform.value if source.platform else "unknown" + # Rate-limit ALL pairing responses (code or rejection) to + # prevent spamming the user with repeated messages when + # multiple DMs arrive in quick succession. + if self.pairing_store._is_rate_limited(platform_name, source.user_id): + return None code = self.pairing_store.generate_code( platform_name, source.user_id, source.user_name or "" ) @@ -1723,6 +1723,8 @@ class GatewayRunner: "Too many pairing requests right now~ " "Please try again later!" ) + # Record rate limit so subsequent messages are silently ignored + self.pairing_store._record_rate_limit(platform_name, source.user_id) return None # PRIORITY handling when an agent is already running for this session. @@ -1960,6 +1962,9 @@ class GatewayRunner: if canonical == "background": return await self._handle_background_command(event) + if canonical == "btw": + return await self._handle_btw_command(event) + if canonical == "voice": return await self._handle_voice_command(event) @@ -2277,6 +2282,29 @@ class GatewayRunner: _hyg_api_key = _hyg_runtime.get("api_key") except Exception: pass + + # Check custom_providers per-model context_length + # (same fallback as run_agent.py lines 1171-1189). + # Must run after runtime resolution so _hyg_base_url is set. + if _hyg_config_context_length is None and _hyg_base_url: + try: + _hyg_custom_providers = _hyg_data.get("custom_providers") + if isinstance(_hyg_custom_providers, list): + for _cp in _hyg_custom_providers: + if not isinstance(_cp, dict): + continue + _cp_url = (_cp.get("base_url") or "").rstrip("/") + if _cp_url and _cp_url == _hyg_base_url.rstrip("/"): + _cp_models = _cp.get("models", {}) + if isinstance(_cp_models, dict): + _cp_model_cfg = _cp_models.get(_hyg_model, {}) + if isinstance(_cp_model_cfg, dict): + _cp_ctx = _cp_model_cfg.get("context_length") + if _cp_ctx is not None: + _hyg_config_context_length = int(_cp_ctx) + break + except (TypeError, ValueError): + pass except Exception: pass @@ -2324,18 +2352,7 @@ class GatewayRunner: f"{_compress_token_threshold:,}", ) - _hyg_adapter = self.adapters.get(source.platform) _hyg_meta = {"thread_id": source.thread_id} if source.thread_id else None - if _hyg_adapter: - try: - await _hyg_adapter.send( - source.chat_id, - f"🗜️ Session is large ({_msg_count} messages, " - f"~{_approx_tokens:,} tokens). Auto-compressing...", - metadata=_hyg_meta, - ) - except Exception: - pass try: from run_agent import AIAgent @@ -2396,70 +2413,17 @@ class GatewayRunner: f"{_approx_tokens:,}", f"{_new_tokens:,}", ) - if _hyg_adapter: - try: - await _hyg_adapter.send( - source.chat_id, - f"🗜️ Compressed: {_msg_count} → " - f"{_new_count} messages, " - f"~{_approx_tokens:,} → " - f"~{_new_tokens:,} tokens", - metadata=_hyg_meta, - ) - except Exception: - pass - - # Still too large after compression — warn user - # Rate-limited to once per cooldown period per - # chat to avoid spamming on every message. if _new_tokens >= _warn_token_threshold: logger.warning( "Session hygiene: still ~%s tokens after " - "compression — suggesting /reset", + "compression", f"{_new_tokens:,}", ) - _now = time.time() - _last_warn = self._compression_warn_sent.get(source.chat_id, 0) - if _hyg_adapter and _now - _last_warn >= self._compression_warn_cooldown: - self._compression_warn_sent[source.chat_id] = _now - try: - await _hyg_adapter.send( - source.chat_id, - "⚠️ Session is still very large " - "after compression " - f"(~{_new_tokens:,} tokens). " - "Consider using /reset to start " - "fresh if you experience issues.", - metadata=_hyg_meta, - ) - except Exception: - pass except Exception as e: logger.warning( "Session hygiene auto-compress failed: %s", e ) - # Compression failed and session is dangerously large - if _approx_tokens >= _warn_token_threshold: - _hyg_adapter = self.adapters.get(source.platform) - _hyg_meta = {"thread_id": source.thread_id} if source.thread_id else None - _now = time.time() - _last_warn = self._compression_warn_sent.get(source.chat_id, 0) - if _hyg_adapter and _now - _last_warn >= self._compression_warn_cooldown: - self._compression_warn_sent[source.chat_id] = _now - try: - await _hyg_adapter.send( - source.chat_id, - f"⚠️ Session is very large " - f"({_msg_count} messages, " - f"~{_approx_tokens:,} tokens) and " - "auto-compression failed. Consider " - "using /compress or /reset to avoid " - "issues.", - metadata=_hyg_meta, - ) - except Exception: - pass # First-message onboarding -- only on the very first interaction ever if not history and not self.session_store.has_any_sessions(): @@ -2798,7 +2762,7 @@ class GatewayRunner: { "role": "session_meta", "tools": tool_defs or [], - "model": os.getenv("HERMES_MODEL", ""), + "model": _resolve_gateway_model(), "platform": source.platform.value if source.platform else "", "timestamp": ts, } @@ -3263,9 +3227,11 @@ class GatewayRunner: except Exception: current_provider = "openrouter" - # Detect custom endpoint - if current_provider == "openrouter" and os.getenv("OPENAI_BASE_URL", "").strip(): - current_provider = "custom" + # Detect custom endpoint from config base_url + if current_provider == "openrouter": + _cfg_base = model_cfg.get("base_url", "") if isinstance(model_cfg, dict) else "" + if _cfg_base and "openrouter.ai" not in _cfg_base: + current_provider = "custom" current_label = _PROVIDER_LABELS.get(current_provider, current_provider) @@ -4077,6 +4043,167 @@ class GatewayRunner: except Exception: pass + async def _handle_btw_command(self, event: MessageEvent) -> str: + """Handle /btw — ephemeral side question in the same chat.""" + question = event.get_command_args().strip() + if not question: + return ( + "Usage: /btw \n" + "Example: /btw what module owns session title sanitization?\n\n" + "Answers using session context. No tools, not persisted." + ) + + source = event.source + session_key = self._session_key_for_source(source) + + # Guard: one /btw at a time per session + existing = getattr(self, "_active_btw_tasks", {}).get(session_key) + if existing and not existing.done(): + return "A /btw is already running for this chat. Wait for it to finish." + + if not hasattr(self, "_active_btw_tasks"): + self._active_btw_tasks: dict = {} + + import uuid as _uuid + task_id = f"btw_{datetime.now().strftime('%H%M%S')}_{_uuid.uuid4().hex[:6]}" + _task = asyncio.create_task(self._run_btw_task(question, source, session_key, task_id)) + self._background_tasks.add(_task) + self._active_btw_tasks[session_key] = _task + + def _cleanup(task): + self._background_tasks.discard(task) + if self._active_btw_tasks.get(session_key) is task: + self._active_btw_tasks.pop(session_key, None) + + _task.add_done_callback(_cleanup) + + preview = question[:60] + ("..." if len(question) > 60 else "") + return f'💬 /btw: "{preview}"\nReply will appear here shortly.' + + async def _run_btw_task( + self, question: str, source, session_key: str, task_id: str, + ) -> None: + """Execute an ephemeral /btw side question and deliver the answer.""" + from run_agent import AIAgent + + adapter = self.adapters.get(source.platform) + if not adapter: + logger.warning("No adapter for platform %s in /btw task %s", source.platform, task_id) + return + + _thread_meta = {"thread_id": source.thread_id} if source.thread_id else None + + try: + runtime_kwargs = _resolve_runtime_agent_kwargs() + if not runtime_kwargs.get("api_key"): + await adapter.send( + source.chat_id, + "❌ /btw failed: no provider credentials configured.", + metadata=_thread_meta, + ) + return + + user_config = _load_gateway_config() + model = _resolve_gateway_model(user_config) + platform_key = _platform_config_key(source.platform) + reasoning_config = self._load_reasoning_config() + turn_route = self._resolve_turn_agent_config(question, model, runtime_kwargs) + pr = self._provider_routing + + # Snapshot history from running agent or stored transcript + running_agent = self._running_agents.get(session_key) + if running_agent and running_agent is not _AGENT_PENDING_SENTINEL: + history_snapshot = list(getattr(running_agent, "_session_messages", []) or []) + else: + session_entry = self.session_store.get_or_create_session(source) + history_snapshot = self.session_store.load_transcript(session_entry.session_id) + + btw_prompt = ( + "[Ephemeral /btw side question. Answer using the conversation " + "context. No tools available. Be direct and concise.]\n\n" + + question + ) + + def run_sync(): + agent = AIAgent( + model=turn_route["model"], + **turn_route["runtime"], + max_iterations=8, + quiet_mode=True, + verbose_logging=False, + enabled_toolsets=[], + reasoning_config=reasoning_config, + providers_allowed=pr.get("only"), + providers_ignored=pr.get("ignore"), + providers_order=pr.get("order"), + provider_sort=pr.get("sort"), + provider_require_parameters=pr.get("require_parameters", False), + provider_data_collection=pr.get("data_collection"), + session_id=task_id, + platform=platform_key, + session_db=None, + fallback_model=self._fallback_model, + skip_memory=True, + skip_context_files=True, + persist_session=False, + ) + return agent.run_conversation( + user_message=btw_prompt, + conversation_history=history_snapshot, + task_id=task_id, + sync_honcho=False, + ) + + loop = asyncio.get_event_loop() + result = await loop.run_in_executor(None, run_sync) + + response = (result.get("final_response") or "") if result else "" + if not response and result and result.get("error"): + response = f"Error: {result['error']}" + if not response: + response = "(No response generated)" + + media_files, response = adapter.extract_media(response) + images, text_content = adapter.extract_images(response) + preview = question[:60] + ("..." if len(question) > 60 else "") + header = f'💬 /btw: "{preview}"\n\n' + + if text_content: + await adapter.send( + chat_id=source.chat_id, + content=header + text_content, + metadata=_thread_meta, + ) + elif not images and not media_files: + await adapter.send( + chat_id=source.chat_id, + content=header + "(No response generated)", + metadata=_thread_meta, + ) + + for image_url, alt_text in (images or []): + try: + await adapter.send_image(chat_id=source.chat_id, image_url=image_url, caption=alt_text) + except Exception: + pass + + for media_path in (media_files or []): + try: + await adapter.send_file(chat_id=source.chat_id, file_path=media_path) + except Exception: + pass + + except Exception as e: + logger.exception("/btw task %s failed", task_id) + try: + await adapter.send( + chat_id=source.chat_id, + content=f"❌ /btw failed: {e}", + metadata=_thread_meta, + ) + except Exception: + pass + async def _handle_reasoning_command(self, event: MessageEvent) -> str: """Handle /reasoning command — manage reasoning effort and display toggle. @@ -4656,8 +4783,8 @@ class GatewayRunner: async def _handle_update_command(self, event: MessageEvent) -> str: """Handle /update command — update Hermes Agent to the latest version. - Spawns ``hermes update`` in a separate systemd scope so it survives the - gateway restart that ``hermes update`` may trigger at the end. Marker + Spawns ``hermes update`` in a detached session (via ``setsid``) so it + survives the gateway restart that ``hermes update`` may trigger. Marker files are written so either the current gateway process or the next one can notify the user when the update finishes. """ @@ -4665,6 +4792,10 @@ class GatewayRunner: import shutil import subprocess from datetime import datetime + from hermes_cli.config import is_managed, format_managed_message + + if is_managed(): + return f"✗ {format_managed_message('update Hermes Agent')}" project_root = Path(__file__).parent.parent.resolve() git_dir = project_root / '.git' @@ -4693,28 +4824,28 @@ class GatewayRunner: pending_path.write_text(json.dumps(pending)) exit_code_path.unlink(missing_ok=True) - # Spawn `hermes update` in a separate cgroup so it survives gateway - # restart. systemd-run --user --scope creates a transient scope unit. + # Spawn `hermes update` detached so it survives gateway restart. + # Use setsid for portable session detach (works under system services + # where systemd-run --user fails due to missing D-Bus session). hermes_cmd_str = " ".join(shlex.quote(part) for part in hermes_cmd) update_cmd = ( f"{hermes_cmd_str} update > {shlex.quote(str(output_path))} 2>&1; " f"status=$?; printf '%s' \"$status\" > {shlex.quote(str(exit_code_path))}" ) try: - systemd_run = shutil.which("systemd-run") - if systemd_run: + setsid_bin = shutil.which("setsid") + if setsid_bin: + # Preferred: setsid creates a new session, fully detached subprocess.Popen( - [systemd_run, "--user", "--scope", - "--unit=hermes-update", "--", - "bash", "-c", update_cmd], + [setsid_bin, "bash", "-c", update_cmd], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, start_new_session=True, ) else: - # Fallback: best-effort detach with start_new_session + # Fallback: start_new_session=True calls os.setsid() in child subprocess.Popen( - ["bash", "-c", f"nohup {update_cmd} &"], + ["bash", "-c", update_cmd], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, start_new_session=True, @@ -5705,7 +5836,9 @@ class GatewayRunner: # If so, update the session store entry so the NEXT message loads # the compressed transcript, not the stale pre-compression one. agent = agent_holder[0] + _session_was_split = False if agent and session_key and hasattr(agent, 'session_id') and agent.session_id != session_id: + _session_was_split = True logger.info( "Session split detected: %s → %s (compression)", session_id, agent.session_id, @@ -5717,6 +5850,13 @@ class GatewayRunner: effective_session_id = getattr(agent, 'session_id', session_id) if agent else session_id + # When compression created a new session, the messages list was + # shortened. Using the original history offset would produce an + # empty new_messages slice, causing the gateway to write only a + # user/assistant pair — losing the compressed summary and tail. + # Reset to 0 so the gateway writes ALL compressed messages. + _effective_history_offset = 0 if _session_was_split else len(agent_history) + # Auto-generate session title after first exchange (non-blocking) if final_response and self._session_db: try: @@ -5738,7 +5878,7 @@ class GatewayRunner: "messages": result_holder[0].get("messages", []) if result_holder[0] else [], "api_calls": result_holder[0].get("api_calls", 0) if result_holder[0] else 0, "tools": tools_holder[0] or [], - "history_offset": len(agent_history), + "history_offset": _effective_history_offset, "last_prompt_tokens": _last_prompt_toks, "input_tokens": _input_toks, "output_tokens": _output_toks, diff --git a/hermes_cli/auth.py b/hermes_cli/auth.py index 940a15564..250f842c7 100644 --- a/hermes_cli/auth.py +++ b/hermes_cli/auth.py @@ -160,7 +160,7 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = { id="alibaba", name="Alibaba Cloud (DashScope)", auth_type="api_key", - inference_base_url="https://coding-intl.dashscope.aliyuncs.com/v1", + inference_base_url="https://dashscope-intl.aliyuncs.com/compatible-mode/v1", api_key_env_vars=("DASHSCOPE_API_KEY",), base_url_env_var="DASHSCOPE_BASE_URL", ), @@ -545,7 +545,11 @@ def _load_auth_store(auth_file: Optional[Path] = None) -> Dict[str, Any]: except Exception: return {"version": AUTH_STORE_VERSION, "providers": {}} - if isinstance(raw, dict) and isinstance(raw.get("providers"), dict): + if isinstance(raw, dict) and ( + isinstance(raw.get("providers"), dict) + or isinstance(raw.get("credential_pool"), dict) + ): + raw.setdefault("providers", {}) return raw # Migrate from PR's "systems" format if present @@ -613,6 +617,30 @@ def _save_provider_state(auth_store: Dict[str, Any], provider_id: str, state: Di auth_store["active_provider"] = provider_id +def read_credential_pool(provider_id: Optional[str] = None) -> Dict[str, Any]: + """Return the persisted credential pool, or one provider slice.""" + auth_store = _load_auth_store() + pool = auth_store.get("credential_pool") + if not isinstance(pool, dict): + pool = {} + if provider_id is None: + return dict(pool) + provider_entries = pool.get(provider_id) + return list(provider_entries) if isinstance(provider_entries, list) else [] + + +def write_credential_pool(provider_id: str, entries: List[Dict[str, Any]]) -> Path: + """Persist one provider's credential pool under auth.json.""" + with _auth_store_lock(): + auth_store = _load_auth_store() + pool = auth_store.get("credential_pool") + if not isinstance(pool, dict): + pool = {} + auth_store["credential_pool"] = pool + pool[provider_id] = list(entries) + return _save_auth_store(auth_store) + + def get_provider_auth_state(provider_id: str) -> Optional[Dict[str, Any]]: """Return persisted auth state for a provider, or None.""" auth_store = _load_auth_store() @@ -638,10 +666,25 @@ def clear_provider_auth(provider_id: Optional[str] = None) -> bool: return False providers = auth_store.get("providers", {}) - if target not in providers: - return False + if not isinstance(providers, dict): + providers = {} + auth_store["providers"] = providers - del providers[target] + pool = auth_store.get("credential_pool") + if not isinstance(pool, dict): + pool = {} + auth_store["credential_pool"] = pool + + cleared = False + if target in providers: + del providers[target] + cleared = True + if target in pool: + del pool[target] + cleared = True + + if not cleared: + return False if auth_store.get("active_provider") == target: auth_store["active_provider"] = None _save_auth_store(auth_store) @@ -898,15 +941,14 @@ def _save_codex_tokens(tokens: Dict[str, str], last_refresh: str = None) -> None _save_auth_store(auth_store) -def _refresh_codex_auth_tokens( - tokens: Dict[str, str], - timeout_seconds: float, -) -> Dict[str, str]: - """Refresh Codex access token using the refresh token. - - Saves the new tokens to Hermes auth store automatically. - """ - refresh_token = tokens.get("refresh_token") +def refresh_codex_oauth_pure( + access_token: str, + refresh_token: str, + *, + timeout_seconds: float = 20.0, +) -> Dict[str, Any]: + """Refresh Codex OAuth tokens without mutating Hermes auth state.""" + del access_token # Access token is only used by callers to decide whether to refresh. if not isinstance(refresh_token, str) or not refresh_token.strip(): raise AuthError( "Codex auth is missing refresh_token. Run `hermes login` to re-authenticate.", @@ -961,8 +1003,8 @@ def _refresh_codex_auth_tokens( relogin_required=True, ) from exc - access_token = refresh_payload.get("access_token") - if not isinstance(access_token, str) or not access_token.strip(): + refreshed_access = refresh_payload.get("access_token") + if not isinstance(refreshed_access, str) or not refreshed_access.strip(): raise AuthError( "Codex token refresh response was missing access_token.", provider="openai-codex", @@ -970,11 +1012,33 @@ def _refresh_codex_auth_tokens( relogin_required=True, ) - updated_tokens = dict(tokens) - updated_tokens["access_token"] = access_token.strip() + updated = { + "access_token": refreshed_access.strip(), + "refresh_token": refresh_token.strip(), + "last_refresh": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"), + } next_refresh = refresh_payload.get("refresh_token") if isinstance(next_refresh, str) and next_refresh.strip(): - updated_tokens["refresh_token"] = next_refresh.strip() + updated["refresh_token"] = next_refresh.strip() + return updated + + +def _refresh_codex_auth_tokens( + tokens: Dict[str, str], + timeout_seconds: float, +) -> Dict[str, str]: + """Refresh Codex access token using the refresh token. + + Saves the new tokens to Hermes auth store automatically. + """ + refreshed = refresh_codex_oauth_pure( + str(tokens.get("access_token", "") or ""), + str(tokens.get("refresh_token", "") or ""), + timeout_seconds=timeout_seconds, + ) + updated_tokens = dict(tokens) + updated_tokens["access_token"] = refreshed["access_token"] + updated_tokens["refresh_token"] = refreshed["refresh_token"] _save_codex_tokens(updated_tokens) return updated_tokens @@ -1313,6 +1377,122 @@ def _agent_key_is_usable(state: Dict[str, Any], min_ttl_seconds: int) -> bool: return not _is_expiring(state.get("agent_key_expires_at"), min_ttl_seconds) +def refresh_nous_oauth_pure( + access_token: str, + refresh_token: str, + client_id: str, + portal_base_url: str, + inference_base_url: str, + *, + token_type: str = "Bearer", + scope: str = DEFAULT_NOUS_SCOPE, + obtained_at: Optional[str] = None, + expires_at: Optional[str] = None, + agent_key: Optional[str] = None, + agent_key_expires_at: Optional[str] = None, + min_key_ttl_seconds: int = DEFAULT_AGENT_KEY_MIN_TTL_SECONDS, + timeout_seconds: float = 15.0, + insecure: Optional[bool] = None, + ca_bundle: Optional[str] = None, + force_refresh: bool = False, + force_mint: bool = False, +) -> Dict[str, Any]: + """Refresh Nous OAuth state without mutating auth.json.""" + state: Dict[str, Any] = { + "access_token": access_token, + "refresh_token": refresh_token, + "client_id": client_id or DEFAULT_NOUS_CLIENT_ID, + "portal_base_url": (portal_base_url or DEFAULT_NOUS_PORTAL_URL).rstrip("/"), + "inference_base_url": (inference_base_url or DEFAULT_NOUS_INFERENCE_URL).rstrip("/"), + "token_type": token_type or "Bearer", + "scope": scope or DEFAULT_NOUS_SCOPE, + "obtained_at": obtained_at, + "expires_at": expires_at, + "agent_key": agent_key, + "agent_key_expires_at": agent_key_expires_at, + "tls": { + "insecure": bool(insecure), + "ca_bundle": ca_bundle, + }, + } + verify = _resolve_verify(insecure=insecure, ca_bundle=ca_bundle, auth_state=state) + timeout = httpx.Timeout(timeout_seconds if timeout_seconds else 15.0) + + with httpx.Client(timeout=timeout, headers={"Accept": "application/json"}, verify=verify) as client: + if force_refresh or _is_expiring(state.get("expires_at"), ACCESS_TOKEN_REFRESH_SKEW_SECONDS): + refreshed = _refresh_access_token( + client=client, + portal_base_url=state["portal_base_url"], + client_id=state["client_id"], + refresh_token=state["refresh_token"], + ) + now = datetime.now(timezone.utc) + access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in")) + state["access_token"] = refreshed["access_token"] + state["refresh_token"] = refreshed.get("refresh_token") or state["refresh_token"] + state["token_type"] = refreshed.get("token_type") or state.get("token_type") or "Bearer" + state["scope"] = refreshed.get("scope") or state.get("scope") + refreshed_url = _optional_base_url(refreshed.get("inference_base_url")) + if refreshed_url: + state["inference_base_url"] = refreshed_url + state["obtained_at"] = now.isoformat() + state["expires_in"] = access_ttl + state["expires_at"] = datetime.fromtimestamp( + now.timestamp() + access_ttl, tz=timezone.utc + ).isoformat() + + if force_mint or not _agent_key_is_usable(state, max(60, int(min_key_ttl_seconds))): + mint_payload = _mint_agent_key( + client=client, + portal_base_url=state["portal_base_url"], + access_token=state["access_token"], + min_ttl_seconds=min_key_ttl_seconds, + ) + now = datetime.now(timezone.utc) + state["agent_key"] = mint_payload.get("api_key") + state["agent_key_id"] = mint_payload.get("key_id") + state["agent_key_expires_at"] = mint_payload.get("expires_at") + state["agent_key_expires_in"] = mint_payload.get("expires_in") + state["agent_key_reused"] = bool(mint_payload.get("reused", False)) + state["agent_key_obtained_at"] = now.isoformat() + minted_url = _optional_base_url(mint_payload.get("inference_base_url")) + if minted_url: + state["inference_base_url"] = minted_url + + return state + + +def refresh_nous_oauth_from_state( + state: Dict[str, Any], + *, + min_key_ttl_seconds: int = DEFAULT_AGENT_KEY_MIN_TTL_SECONDS, + timeout_seconds: float = 15.0, + force_refresh: bool = False, + force_mint: bool = False, +) -> Dict[str, Any]: + """Refresh Nous OAuth from a state dict. Thin wrapper around refresh_nous_oauth_pure.""" + tls = state.get("tls") or {} + return refresh_nous_oauth_pure( + state.get("access_token", ""), + state.get("refresh_token", ""), + state.get("client_id", "hermes-cli"), + state.get("portal_base_url", DEFAULT_NOUS_PORTAL_URL), + state.get("inference_base_url", DEFAULT_NOUS_INFERENCE_URL), + token_type=state.get("token_type", "Bearer"), + scope=state.get("scope", DEFAULT_NOUS_SCOPE), + obtained_at=state.get("obtained_at"), + expires_at=state.get("expires_at"), + agent_key=state.get("agent_key"), + agent_key_expires_at=state.get("agent_key_expires_at"), + min_key_ttl_seconds=min_key_ttl_seconds, + timeout_seconds=timeout_seconds, + insecure=tls.get("insecure"), + ca_bundle=tls.get("ca_bundle"), + force_refresh=force_refresh, + force_mint=force_mint, + ) + + def resolve_nous_runtime_credentials( *, min_key_ttl_seconds: int = DEFAULT_AGENT_KEY_MIN_TTL_SECONDS, @@ -2180,34 +2360,36 @@ def _codex_device_code_login() -> Dict[str, Any]: } -def _login_nous(args, pconfig: ProviderConfig) -> None: - """Nous Portal device authorization flow.""" +def _nous_device_code_login( + *, + portal_base_url: Optional[str] = None, + inference_base_url: Optional[str] = None, + client_id: Optional[str] = None, + scope: Optional[str] = None, + open_browser: bool = True, + timeout_seconds: float = 15.0, + insecure: bool = False, + ca_bundle: Optional[str] = None, + min_key_ttl_seconds: int = 5 * 60, +) -> Dict[str, Any]: + """Run the Nous device-code flow and return full OAuth state without persisting.""" + pconfig = PROVIDER_REGISTRY["nous"] portal_base_url = ( - getattr(args, "portal_url", None) + portal_base_url or os.getenv("HERMES_PORTAL_BASE_URL") or os.getenv("NOUS_PORTAL_BASE_URL") or pconfig.portal_base_url ).rstrip("/") requested_inference_url = ( - getattr(args, "inference_url", None) + inference_base_url or os.getenv("NOUS_INFERENCE_BASE_URL") or pconfig.inference_base_url ).rstrip("/") - client_id = getattr(args, "client_id", None) or pconfig.client_id - scope = getattr(args, "scope", None) or pconfig.scope - open_browser = not getattr(args, "no_browser", False) - timeout_seconds = getattr(args, "timeout", None) or 15.0 + client_id = client_id or pconfig.client_id + scope = scope or pconfig.scope timeout = httpx.Timeout(timeout_seconds) - - insecure = bool(getattr(args, "insecure", False)) - ca_bundle = ( - getattr(args, "ca_bundle", None) - or os.getenv("HERMES_CA_BUNDLE") - or os.getenv("SSL_CERT_FILE") - ) verify: bool | str = False if insecure else (ca_bundle if ca_bundle else True) - # Skip browser open in SSH sessions if _is_remote_session(): open_browser = False @@ -2218,74 +2400,109 @@ def _login_nous(args, pconfig: ProviderConfig) -> None: elif ca_bundle: print(f"TLS verification: custom CA bundle ({ca_bundle})") - try: - with httpx.Client(timeout=timeout, headers={"Accept": "application/json"}, verify=verify) as client: - device_data = _request_device_code( - client=client, portal_base_url=portal_base_url, - client_id=client_id, scope=scope, - ) - - verification_url = str(device_data["verification_uri_complete"]) - user_code = str(device_data["user_code"]) - expires_in = int(device_data["expires_in"]) - interval = int(device_data["interval"]) - - print() - print("To continue:") - print(f" 1. Open: {verification_url}") - print(f" 2. If prompted, enter code: {user_code}") - - if open_browser: - opened = webbrowser.open(verification_url) - if opened: - print(" (Opened browser for verification)") - else: - print(" Could not open browser automatically — use the URL above.") - - effective_interval = max(1, min(interval, DEVICE_AUTH_POLL_INTERVAL_CAP_SECONDS)) - print(f"Waiting for approval (polling every {effective_interval}s)...") - - token_data = _poll_for_token( - client=client, portal_base_url=portal_base_url, - client_id=client_id, device_code=str(device_data["device_code"]), - expires_in=expires_in, poll_interval=interval, - ) - - # Process token response - now = datetime.now(timezone.utc) - token_expires_in = _coerce_ttl_seconds(token_data.get("expires_in", 0)) - expires_at = now.timestamp() + token_expires_in - inference_base_url = ( - _optional_base_url(token_data.get("inference_base_url")) - or requested_inference_url + with httpx.Client(timeout=timeout, headers={"Accept": "application/json"}, verify=verify) as client: + device_data = _request_device_code( + client=client, + portal_base_url=portal_base_url, + client_id=client_id, + scope=scope, ) - if inference_base_url != requested_inference_url: - print(f"Using portal-provided inference URL: {inference_base_url}") - auth_state = { - "portal_base_url": portal_base_url, - "inference_base_url": inference_base_url, - "client_id": client_id, - "scope": token_data.get("scope") or scope, - "token_type": token_data.get("token_type", "Bearer"), - "access_token": token_data["access_token"], - "refresh_token": token_data.get("refresh_token"), - "obtained_at": now.isoformat(), - "expires_at": datetime.fromtimestamp(expires_at, tz=timezone.utc).isoformat(), - "expires_in": token_expires_in, - "tls": { - "insecure": verify is False, - "ca_bundle": verify if isinstance(verify, str) else None, - }, - "agent_key": None, - "agent_key_id": None, - "agent_key_expires_at": None, - "agent_key_expires_in": None, - "agent_key_reused": None, - "agent_key_obtained_at": None, - } + verification_url = str(device_data["verification_uri_complete"]) + user_code = str(device_data["user_code"]) + expires_in = int(device_data["expires_in"]) + interval = int(device_data["interval"]) + + print() + print("To continue:") + print(f" 1. Open: {verification_url}") + print(f" 2. If prompted, enter code: {user_code}") + + if open_browser: + opened = webbrowser.open(verification_url) + if opened: + print(" (Opened browser for verification)") + else: + print(" Could not open browser automatically — use the URL above.") + + effective_interval = max(1, min(interval, DEVICE_AUTH_POLL_INTERVAL_CAP_SECONDS)) + print(f"Waiting for approval (polling every {effective_interval}s)...") + + token_data = _poll_for_token( + client=client, + portal_base_url=portal_base_url, + client_id=client_id, + device_code=str(device_data["device_code"]), + expires_in=expires_in, + poll_interval=interval, + ) + + now = datetime.now(timezone.utc) + token_expires_in = _coerce_ttl_seconds(token_data.get("expires_in", 0)) + expires_at = now.timestamp() + token_expires_in + resolved_inference_url = ( + _optional_base_url(token_data.get("inference_base_url")) + or requested_inference_url + ) + if resolved_inference_url != requested_inference_url: + print(f"Using portal-provided inference URL: {resolved_inference_url}") + + auth_state = { + "portal_base_url": portal_base_url, + "inference_base_url": resolved_inference_url, + "client_id": client_id, + "scope": token_data.get("scope") or scope, + "token_type": token_data.get("token_type", "Bearer"), + "access_token": token_data["access_token"], + "refresh_token": token_data.get("refresh_token"), + "obtained_at": now.isoformat(), + "expires_at": datetime.fromtimestamp(expires_at, tz=timezone.utc).isoformat(), + "expires_in": token_expires_in, + "tls": { + "insecure": verify is False, + "ca_bundle": verify if isinstance(verify, str) else None, + }, + "agent_key": None, + "agent_key_id": None, + "agent_key_expires_at": None, + "agent_key_expires_in": None, + "agent_key_reused": None, + "agent_key_obtained_at": None, + } + return refresh_nous_oauth_from_state( + auth_state, + min_key_ttl_seconds=min_key_ttl_seconds, + timeout_seconds=timeout_seconds, + force_refresh=False, + force_mint=True, + ) + + +def _login_nous(args, pconfig: ProviderConfig) -> None: + """Nous Portal device authorization flow.""" + timeout_seconds = getattr(args, "timeout", None) or 15.0 + insecure = bool(getattr(args, "insecure", False)) + ca_bundle = ( + getattr(args, "ca_bundle", None) + or os.getenv("HERMES_CA_BUNDLE") + or os.getenv("SSL_CERT_FILE") + ) + + try: + auth_state = _nous_device_code_login( + portal_base_url=getattr(args, "portal_url", None) or pconfig.portal_base_url, + inference_base_url=getattr(args, "inference_url", None) or pconfig.inference_base_url, + client_id=getattr(args, "client_id", None) or pconfig.client_id, + scope=getattr(args, "scope", None) or pconfig.scope, + open_browser=not getattr(args, "no_browser", False), + timeout_seconds=timeout_seconds, + insecure=insecure, + ca_bundle=ca_bundle, + min_key_ttl_seconds=5 * 60, + ) + inference_base_url = auth_state["inference_base_url"] + verify: bool | str = False if insecure else (ca_bundle if ca_bundle else True) - # Save auth state with _auth_store_lock(): auth_store = _load_auth_store() _save_provider_state(auth_store, "nous", auth_state) @@ -2297,18 +2514,14 @@ def _login_nous(args, pconfig: ProviderConfig) -> None: print(f" Auth state: {saved_to}") print(f" Config updated: {config_path} (model.provider=nous)") - # Mint an initial agent key and list available models try: - runtime_creds = resolve_nous_runtime_credentials( - min_key_ttl_seconds=5 * 60, - timeout_seconds=timeout_seconds, - insecure=insecure, ca_bundle=ca_bundle, - ) - runtime_key = runtime_creds.get("api_key") - runtime_base_url = runtime_creds.get("base_url") or inference_base_url + runtime_key = auth_state.get("agent_key") or auth_state.get("access_token") if not isinstance(runtime_key, str) or not runtime_key: - raise AuthError("No runtime API key available to fetch models", - provider="nous", code="invalid_token") + raise AuthError( + "No runtime API key available to fetch models", + provider="nous", + code="invalid_token", + ) # Use curated model list (same as OpenRouter defaults) instead # of the full /models dump which returns hundreds of models. diff --git a/hermes_cli/auth_commands.py b/hermes_cli/auth_commands.py new file mode 100644 index 000000000..096387746 --- /dev/null +++ b/hermes_cli/auth_commands.py @@ -0,0 +1,470 @@ +"""Credential-pool auth subcommands.""" + +from __future__ import annotations + +from getpass import getpass +import math +import time +from types import SimpleNamespace +import uuid + +from agent.credential_pool import ( + AUTH_TYPE_API_KEY, + AUTH_TYPE_OAUTH, + CUSTOM_POOL_PREFIX, + SOURCE_MANUAL, + STATUS_EXHAUSTED, + STRATEGY_FILL_FIRST, + STRATEGY_ROUND_ROBIN, + STRATEGY_RANDOM, + STRATEGY_LEAST_USED, + SUPPORTED_POOL_STRATEGIES, + PooledCredential, + _normalize_custom_pool_name, + get_pool_strategy, + label_from_token, + list_custom_pool_providers, + load_pool, + _exhausted_ttl, +) +import hermes_cli.auth as auth_mod +from hermes_cli.auth import PROVIDER_REGISTRY +from hermes_constants import OPENROUTER_BASE_URL + + +# Providers that support OAuth login in addition to API keys. +_OAUTH_CAPABLE_PROVIDERS = {"anthropic", "nous", "openai-codex"} + + +def _get_custom_provider_names() -> list: + """Return list of (display_name, pool_key) tuples for custom_providers in config.""" + try: + from hermes_cli.config import load_config + + config = load_config() + except Exception: + return [] + custom_providers = config.get("custom_providers") + if not isinstance(custom_providers, list): + return [] + result = [] + for entry in custom_providers: + if not isinstance(entry, dict): + continue + name = entry.get("name") + if not isinstance(name, str) or not name.strip(): + continue + pool_key = f"{CUSTOM_POOL_PREFIX}{_normalize_custom_pool_name(name)}" + result.append((name.strip(), pool_key)) + return result + + +def _resolve_custom_provider_input(raw: str) -> str | None: + """If raw input matches a custom_providers entry name (case-insensitive), return its pool key.""" + normalized = (raw or "").strip().lower().replace(" ", "-") + if not normalized: + return None + # Direct match on 'custom:name' format + if normalized.startswith(CUSTOM_POOL_PREFIX): + return normalized + for display_name, pool_key in _get_custom_provider_names(): + if _normalize_custom_pool_name(display_name) == normalized: + return pool_key + return None + + +def _normalize_provider(provider: str) -> str: + normalized = (provider or "").strip().lower() + if normalized in {"or", "open-router"}: + return "openrouter" + # Check if it matches a custom provider name + custom_key = _resolve_custom_provider_input(normalized) + if custom_key: + return custom_key + return normalized + + +def _provider_base_url(provider: str) -> str: + if provider == "openrouter": + return OPENROUTER_BASE_URL + if provider.startswith(CUSTOM_POOL_PREFIX): + from agent.credential_pool import _get_custom_provider_config + + cp_config = _get_custom_provider_config(provider) + if cp_config: + return str(cp_config.get("base_url") or "").strip() + return "" + pconfig = PROVIDER_REGISTRY.get(provider) + return pconfig.inference_base_url if pconfig else "" + + +def _oauth_default_label(provider: str, count: int) -> str: + return f"{provider}-oauth-{count}" + + +def _api_key_default_label(count: int) -> str: + return f"api-key-{count}" + + +def _display_source(source: str) -> str: + return source.split(":", 1)[1] if source.startswith("manual:") else source + + +def _format_exhausted_status(entry) -> str: + if entry.last_status != STATUS_EXHAUSTED: + return "" + code = f" ({entry.last_error_code})" if entry.last_error_code else "" + if not entry.last_status_at: + return f" exhausted{code}" + remaining = max(0, int(math.ceil((entry.last_status_at + _exhausted_ttl(entry.last_error_code)) - time.time()))) + if remaining <= 0: + return f" exhausted{code} (ready to retry)" + minutes, seconds = divmod(remaining, 60) + hours, minutes = divmod(minutes, 60) + if hours: + wait = f"{hours}h {minutes}m" + elif minutes: + wait = f"{minutes}m {seconds}s" + else: + wait = f"{seconds}s" + return f" exhausted{code} ({wait} left)" + + +def auth_add_command(args) -> None: + provider = _normalize_provider(getattr(args, "provider", "")) + if provider not in PROVIDER_REGISTRY and provider != "openrouter" and not provider.startswith(CUSTOM_POOL_PREFIX): + raise SystemExit(f"Unknown provider: {provider}") + + requested_type = str(getattr(args, "auth_type", "") or "").strip().lower() + if requested_type in {AUTH_TYPE_API_KEY, "api-key"}: + requested_type = AUTH_TYPE_API_KEY + if not requested_type: + if provider.startswith(CUSTOM_POOL_PREFIX): + requested_type = AUTH_TYPE_API_KEY + else: + requested_type = AUTH_TYPE_OAUTH if provider in {"anthropic", "nous", "openai-codex"} else AUTH_TYPE_API_KEY + + pool = load_pool(provider) + + if requested_type == AUTH_TYPE_API_KEY: + token = (getattr(args, "api_key", None) or "").strip() + if not token: + token = getpass("Paste your API key: ").strip() + if not token: + raise SystemExit("No API key provided.") + default_label = _api_key_default_label(len(pool.entries()) + 1) + label = (getattr(args, "label", None) or "").strip() + if not label: + label = input(f"Label (optional, default: {default_label}): ").strip() or default_label + entry = PooledCredential( + provider=provider, + id=uuid.uuid4().hex[:6], + label=label, + auth_type=AUTH_TYPE_API_KEY, + priority=0, + source=SOURCE_MANUAL, + access_token=token, + base_url=_provider_base_url(provider), + ) + pool.add_entry(entry) + print(f'Added {provider} credential #{len(pool.entries())}: "{label}"') + return + + if provider == "anthropic": + from agent import anthropic_adapter as anthropic_mod + + creds = anthropic_mod.run_hermes_oauth_login_pure() + if not creds: + raise SystemExit("Anthropic OAuth login did not return credentials.") + label = (getattr(args, "label", None) or "").strip() or label_from_token( + creds["access_token"], + _oauth_default_label(provider, len(pool.entries()) + 1), + ) + entry = PooledCredential( + provider=provider, + id=uuid.uuid4().hex[:6], + label=label, + auth_type=AUTH_TYPE_OAUTH, + priority=0, + source=f"{SOURCE_MANUAL}:hermes_pkce", + access_token=creds["access_token"], + refresh_token=creds.get("refresh_token"), + expires_at_ms=creds.get("expires_at_ms"), + base_url=_provider_base_url(provider), + ) + pool.add_entry(entry) + print(f'Added {provider} OAuth credential #{len(pool.entries())}: "{entry.label}"') + return + + if provider == "nous": + creds = auth_mod._nous_device_code_login( + portal_base_url=getattr(args, "portal_url", None), + inference_base_url=getattr(args, "inference_url", None), + client_id=getattr(args, "client_id", None), + scope=getattr(args, "scope", None), + open_browser=not getattr(args, "no_browser", False), + timeout_seconds=getattr(args, "timeout", None) or 15.0, + insecure=bool(getattr(args, "insecure", False)), + ca_bundle=getattr(args, "ca_bundle", None), + min_key_ttl_seconds=max(60, int(getattr(args, "min_key_ttl_seconds", 5 * 60))), + ) + label = (getattr(args, "label", None) or "").strip() or label_from_token( + creds.get("access_token", ""), + _oauth_default_label(provider, len(pool.entries()) + 1), + ) + entry = PooledCredential.from_dict(provider, { + **creds, + "label": label, + "auth_type": AUTH_TYPE_OAUTH, + "source": f"{SOURCE_MANUAL}:device_code", + "base_url": creds.get("inference_base_url"), + }) + pool.add_entry(entry) + print(f'Added {provider} OAuth credential #{len(pool.entries())}: "{entry.label}"') + return + + if provider == "openai-codex": + creds = auth_mod._codex_device_code_login() + label = (getattr(args, "label", None) or "").strip() or label_from_token( + creds["tokens"]["access_token"], + _oauth_default_label(provider, len(pool.entries()) + 1), + ) + entry = PooledCredential( + provider=provider, + id=uuid.uuid4().hex[:6], + label=label, + auth_type=AUTH_TYPE_OAUTH, + priority=0, + source=f"{SOURCE_MANUAL}:device_code", + access_token=creds["tokens"]["access_token"], + refresh_token=creds["tokens"].get("refresh_token"), + base_url=creds.get("base_url"), + last_refresh=creds.get("last_refresh"), + ) + pool.add_entry(entry) + print(f'Added {provider} OAuth credential #{len(pool.entries())}: "{entry.label}"') + return + + raise SystemExit(f"`hermes auth add {provider}` is not implemented for auth type {requested_type} yet.") + + +def auth_list_command(args) -> None: + provider_filter = _normalize_provider(getattr(args, "provider", "") or "") + if provider_filter: + providers = [provider_filter] + else: + providers = sorted({ + *PROVIDER_REGISTRY.keys(), + "openrouter", + *list_custom_pool_providers(), + }) + for provider in providers: + pool = load_pool(provider) + entries = pool.entries() + if not entries: + continue + current = pool.peek() + print(f"{provider} ({len(entries)} credentials):") + for idx, entry in enumerate(entries, start=1): + marker = " " + if current is not None and entry.id == current.id: + marker = "← " + status = _format_exhausted_status(entry) + source = _display_source(entry.source) + print(f" #{idx} {entry.label:<20} {entry.auth_type:<7} {source}{status} {marker}".rstrip()) + print() + + +def auth_remove_command(args) -> None: + provider = _normalize_provider(getattr(args, "provider", "")) + index = int(getattr(args, "index")) + pool = load_pool(provider) + removed = pool.remove_index(index) + if removed is None: + raise SystemExit(f"No credential #{index} for provider {provider}.") + print(f"Removed {provider} credential #{index} ({removed.label})") + + +def auth_reset_command(args) -> None: + provider = _normalize_provider(getattr(args, "provider", "")) + pool = load_pool(provider) + count = pool.reset_statuses() + print(f"Reset status on {count} {provider} credentials") + + +def _interactive_auth() -> None: + """Interactive credential pool management when `hermes auth` is called bare.""" + # Show current pool status first + print("Credential Pool Status") + print("=" * 50) + + auth_list_command(SimpleNamespace(provider=None)) + print() + + # Main menu + choices = [ + "Add a credential", + "Remove a credential", + "Reset cooldowns for a provider", + "Set rotation strategy for a provider", + "Exit", + ] + print("What would you like to do?") + for i, choice in enumerate(choices, 1): + print(f" {i}. {choice}") + + try: + raw = input("\nChoice: ").strip() + except (EOFError, KeyboardInterrupt): + return + + if not raw or raw == str(len(choices)): + return + + if raw == "1": + _interactive_add() + elif raw == "2": + _interactive_remove() + elif raw == "3": + _interactive_reset() + elif raw == "4": + _interactive_strategy() + + +def _pick_provider(prompt: str = "Provider") -> str: + """Prompt for a provider name with auto-complete hints.""" + known = sorted(set(list(PROVIDER_REGISTRY.keys()) + ["openrouter"])) + custom_names = _get_custom_provider_names() + if custom_names: + custom_display = [name for name, _key in custom_names] + print(f"\nKnown providers: {', '.join(known)}") + print(f"Custom endpoints: {', '.join(custom_display)}") + else: + print(f"\nKnown providers: {', '.join(known)}") + try: + raw = input(f"{prompt}: ").strip() + except (EOFError, KeyboardInterrupt): + raise SystemExit() + return _normalize_provider(raw) + + +def _interactive_add() -> None: + provider = _pick_provider("Provider to add credential for") + if provider not in PROVIDER_REGISTRY and provider != "openrouter" and not provider.startswith(CUSTOM_POOL_PREFIX): + raise SystemExit(f"Unknown provider: {provider}") + + # For OAuth-capable providers, ask which type + if provider in _OAUTH_CAPABLE_PROVIDERS: + print(f"\n{provider} supports both API keys and OAuth login.") + print(" 1. API key (paste a key from the provider dashboard)") + print(" 2. OAuth login (authenticate via browser)") + try: + type_choice = input("Type [1/2]: ").strip() + except (EOFError, KeyboardInterrupt): + return + if type_choice == "2": + auth_type = "oauth" + else: + auth_type = "api_key" + else: + auth_type = "api_key" + + auth_add_command(SimpleNamespace( + provider=provider, auth_type=auth_type, label=None, api_key=None, + portal_url=None, inference_url=None, client_id=None, scope=None, + no_browser=False, timeout=None, insecure=False, ca_bundle=None, + )) + + +def _interactive_remove() -> None: + provider = _pick_provider("Provider to remove credential from") + pool = load_pool(provider) + if not pool.has_credentials(): + print(f"No credentials for {provider}.") + return + + # Show entries with indices + for i, e in enumerate(pool.entries(), 1): + exhausted = _format_exhausted_status(e) + print(f" #{i} {e.label:25s} {e.auth_type:10s} {e.source}{exhausted}") + + try: + raw = input("Remove # (or blank to cancel): ").strip() + except (EOFError, KeyboardInterrupt): + return + if not raw: + return + + try: + index = int(raw) + except ValueError: + print("Invalid number.") + return + + auth_remove_command(SimpleNamespace(provider=provider, index=index)) + + +def _interactive_reset() -> None: + provider = _pick_provider("Provider to reset cooldowns for") + + auth_reset_command(SimpleNamespace(provider=provider)) + + +def _interactive_strategy() -> None: + provider = _pick_provider("Provider to set strategy for") + current = get_pool_strategy(provider) + strategies = [STRATEGY_FILL_FIRST, STRATEGY_ROUND_ROBIN, STRATEGY_LEAST_USED, STRATEGY_RANDOM] + + print(f"\nCurrent strategy for {provider}: {current}") + print() + descriptions = { + STRATEGY_FILL_FIRST: "Use first key until exhausted, then next", + STRATEGY_ROUND_ROBIN: "Cycle through keys evenly", + STRATEGY_LEAST_USED: "Always pick the least-used key", + STRATEGY_RANDOM: "Random selection", + } + for i, s in enumerate(strategies, 1): + marker = " ←" if s == current else "" + print(f" {i}. {s:15s} — {descriptions.get(s, '')}{marker}") + + try: + raw = input("\nStrategy [1-4]: ").strip() + except (EOFError, KeyboardInterrupt): + return + if not raw: + return + + try: + idx = int(raw) - 1 + strategy = strategies[idx] + except (ValueError, IndexError): + print("Invalid choice.") + return + + from hermes_cli.config import load_config, save_config + cfg = load_config() + pool_strategies = cfg.get("credential_pool_strategies") or {} + if not isinstance(pool_strategies, dict): + pool_strategies = {} + pool_strategies[provider] = strategy + cfg["credential_pool_strategies"] = pool_strategies + save_config(cfg) + print(f"Set {provider} strategy to: {strategy}") + + +def auth_command(args) -> None: + action = getattr(args, "auth_action", "") + if action == "add": + auth_add_command(args) + return + if action == "list": + auth_list_command(args) + return + if action == "remove": + auth_remove_command(args) + return + if action == "reset": + auth_reset_command(args) + return + # No subcommand — launch interactive mode + _interactive_auth() diff --git a/hermes_cli/banner.py b/hermes_cli/banner.py index 5ecc94acf..7435750bc 100644 --- a/hermes_cli/banner.py +++ b/hermes_cli/banner.py @@ -432,10 +432,11 @@ def build_welcome_banner(console: Console, model: str, cwd: str, try: behind = get_update_result(timeout=0.5) if behind and behind > 0: + from hermes_cli.config import recommended_update_command commits_word = "commit" if behind == 1 else "commits" right_lines.append( f"[bold yellow]⚠ {behind} {commits_word} behind[/]" - f"[dim yellow] — run [bold]hermes update[/bold] to update[/]" + f"[dim yellow] — run [bold]{recommended_update_command()}[/bold] to update[/]" ) except Exception: pass # Never break the banner over an update check diff --git a/hermes_cli/claw.py b/hermes_cli/claw.py index 014a2abeb..87735f931 100644 --- a/hermes_cli/claw.py +++ b/hermes_cli/claw.py @@ -4,14 +4,19 @@ Usage: hermes claw migrate # Interactive migration from ~/.openclaw hermes claw migrate --dry-run # Preview what would be migrated hermes claw migrate --preset full --overwrite # Full migration, overwrite conflicts + hermes claw cleanup # Archive leftover OpenClaw directories + hermes claw cleanup --dry-run # Preview what would be archived """ import importlib.util import logging +import shutil import sys +from datetime import datetime from pathlib import Path from hermes_cli.config import get_hermes_home, get_config_path, load_config, save_config +from hermes_constants import get_optional_skills_dir from hermes_cli.setup import ( Colors, color, @@ -19,6 +24,7 @@ from hermes_cli.setup import ( print_info, print_success, print_error, + print_warning, prompt_yes_no, ) @@ -27,8 +33,7 @@ logger = logging.getLogger(__name__) PROJECT_ROOT = Path(__file__).parent.parent.resolve() _OPENCLAW_SCRIPT = ( - PROJECT_ROOT - / "optional-skills" + get_optional_skills_dir(PROJECT_ROOT / "optional-skills") / "migration" / "openclaw-migration" / "scripts" @@ -45,6 +50,18 @@ _OPENCLAW_SCRIPT_INSTALLED = ( / "openclaw_to_hermes.py" ) +# Known OpenClaw directory names (current + legacy) +_OPENCLAW_DIR_NAMES = (".openclaw", ".clawdbot", ".moldbot") + +# State files commonly found in OpenClaw workspace directories that cause +# confusion after migration (the agent discovers them and writes to them) +_WORKSPACE_STATE_GLOBS = ( + "*/todo.json", + "*/sessions/*", + "*/memory/*.json", + "*/logs/*", +) + def _find_migration_script() -> Path | None: """Find the openclaw_to_hermes.py script in known locations.""" @@ -71,19 +88,88 @@ def _load_migration_module(script_path: Path): return mod +def _find_openclaw_dirs() -> list[Path]: + """Find all OpenClaw directories on disk.""" + found = [] + for name in _OPENCLAW_DIR_NAMES: + candidate = Path.home() / name + if candidate.is_dir(): + found.append(candidate) + return found + + +def _scan_workspace_state(source_dir: Path) -> list[tuple[Path, str]]: + """Scan an OpenClaw directory for workspace state files that cause confusion. + + Returns a list of (path, description) tuples. + """ + findings: list[tuple[Path, str]] = [] + + # Direct state files in the root + for name in ("todo.json", "sessions", "logs"): + candidate = source_dir / name + if candidate.exists(): + kind = "directory" if candidate.is_dir() else "file" + findings.append((candidate, f"Root {kind}: {name}")) + + # State files inside workspace directories + for child in sorted(source_dir.iterdir()): + if not child.is_dir() or child.name.startswith("."): + continue + # Check for workspace-like subdirectories + for state_name in ("todo.json", "sessions", "logs", "memory"): + state_path = child / state_name + if state_path.exists(): + kind = "directory" if state_path.is_dir() else "file" + rel = state_path.relative_to(source_dir) + findings.append((state_path, f"Workspace {kind}: {rel}")) + + return findings + + +def _archive_directory(source_dir: Path, dry_run: bool = False) -> Path: + """Rename an OpenClaw directory to .pre-migration. + + Returns the archive path. + """ + timestamp = datetime.now().strftime("%Y%m%d") + archive_name = f"{source_dir.name}.pre-migration" + archive_path = source_dir.parent / archive_name + + # If archive already exists, add timestamp + if archive_path.exists(): + archive_name = f"{source_dir.name}.pre-migration-{timestamp}" + archive_path = source_dir.parent / archive_name + + # If still exists (multiple runs same day), add counter + counter = 2 + while archive_path.exists(): + archive_name = f"{source_dir.name}.pre-migration-{timestamp}-{counter}" + archive_path = source_dir.parent / archive_name + counter += 1 + + if not dry_run: + source_dir.rename(archive_path) + + return archive_path + + def claw_command(args): """Route hermes claw subcommands.""" action = getattr(args, "claw_action", None) if action == "migrate": _cmd_migrate(args) + elif action in ("cleanup", "clean"): + _cmd_cleanup(args) else: - print("Usage: hermes claw migrate [options]") + print("Usage: hermes claw [options]") print() print("Commands:") print(" migrate Migrate settings from OpenClaw to Hermes") + print(" cleanup Archive leftover OpenClaw directories after migration") print() - print("Run 'hermes claw migrate --help' for migration options.") + print("Run 'hermes claw --help' for options.") def _cmd_migrate(args): @@ -210,6 +296,168 @@ def _cmd_migrate(args): # Print results _print_migration_report(report, dry_run) + # After successful non-dry-run migration, offer to archive the source directory + if not dry_run and report.get("summary", {}).get("migrated", 0) > 0: + _offer_source_archival(source_dir, getattr(args, "yes", False)) + + +def _offer_source_archival(source_dir: Path, auto_yes: bool = False): + """After migration, offer to rename the source directory to prevent state fragmentation. + + OpenClaw workspace directories contain state files (todo.json, sessions, etc.) + that the agent may discover and write to, causing confusion. Renaming the + directory prevents this. + """ + if not source_dir.is_dir(): + return + + # Scan for state files that could cause problems + state_files = _scan_workspace_state(source_dir) + + print() + print_header("Post-Migration Cleanup") + print_info("The OpenClaw directory still exists and contains workspace state files") + print_info("that can confuse the agent (todo lists, sessions, logs).") + if state_files: + print() + print(color(" Found state files:", Colors.YELLOW)) + # Show up to 10 most relevant findings + for path, desc in state_files[:10]: + print(f" {desc}") + if len(state_files) > 10: + print(f" ... and {len(state_files) - 10} more") + print() + print_info(f"Recommend: rename {source_dir.name}/ to {source_dir.name}.pre-migration/") + print_info("This prevents the agent from discovering old workspace directories.") + print_info("You can always rename it back if needed.") + print() + + if auto_yes or prompt_yes_no(f"Archive {source_dir} now?", default=True): + try: + archive_path = _archive_directory(source_dir) + print_success(f"Archived: {source_dir} → {archive_path}") + print_info("The original directory has been renamed, not deleted.") + print_info(f"To undo: mv {archive_path} {source_dir}") + except OSError as e: + print_error(f"Could not archive: {e}") + print_info(f"You can do it manually: mv {source_dir} {source_dir}.pre-migration") + else: + print_info("Skipped. You can archive later with: hermes claw cleanup") + + +def _cmd_cleanup(args): + """Archive leftover OpenClaw directories after migration. + + Scans for OpenClaw directories that still exist after migration and offers + to rename them to .pre-migration to prevent state fragmentation. + """ + dry_run = getattr(args, "dry_run", False) + auto_yes = getattr(args, "yes", False) + explicit_source = getattr(args, "source", None) + + print() + print( + color( + "┌─────────────────────────────────────────────────────────┐", + Colors.MAGENTA, + ) + ) + print( + color( + "│ ⚕ Hermes — OpenClaw Cleanup │", + Colors.MAGENTA, + ) + ) + print( + color( + "└─────────────────────────────────────────────────────────┘", + Colors.MAGENTA, + ) + ) + + # Find OpenClaw directories + if explicit_source: + dirs_to_check = [Path(explicit_source)] + else: + dirs_to_check = _find_openclaw_dirs() + + if not dirs_to_check: + print() + print_success("No OpenClaw directories found. Nothing to clean up.") + return + + total_archived = 0 + + for source_dir in dirs_to_check: + print() + print_header(f"Found: {source_dir}") + + # Scan for state files + state_files = _scan_workspace_state(source_dir) + + # Show directory stats + try: + workspace_dirs = [ + d for d in source_dir.iterdir() + if d.is_dir() and not d.name.startswith(".") + and any((d / name).exists() for name in ("todo.json", "SOUL.md", "MEMORY.md", "USER.md")) + ] + except OSError: + workspace_dirs = [] + + if workspace_dirs: + print_info(f"Workspace directories: {len(workspace_dirs)}") + for ws in workspace_dirs[:5]: + items = [] + if (ws / "todo.json").exists(): + items.append("todo.json") + if (ws / "sessions").is_dir(): + items.append("sessions/") + if (ws / "SOUL.md").exists(): + items.append("SOUL.md") + if (ws / "MEMORY.md").exists(): + items.append("MEMORY.md") + detail = ", ".join(items) if items else "empty" + print(f" {ws.name}/ ({detail})") + if len(workspace_dirs) > 5: + print(f" ... and {len(workspace_dirs) - 5} more") + + if state_files: + print() + print(color(f" {len(state_files)} state file(s) that could cause confusion:", Colors.YELLOW)) + for path, desc in state_files[:8]: + print(f" {desc}") + if len(state_files) > 8: + print(f" ... and {len(state_files) - 8} more") + + print() + + if dry_run: + archive_path = _archive_directory(source_dir, dry_run=True) + print_info(f"Would archive: {source_dir} → {archive_path}") + else: + if auto_yes or prompt_yes_no(f"Archive {source_dir}?", default=True): + try: + archive_path = _archive_directory(source_dir) + print_success(f"Archived: {source_dir} → {archive_path}") + total_archived += 1 + except OSError as e: + print_error(f"Could not archive: {e}") + print_info(f"Try manually: mv {source_dir} {source_dir}.pre-migration") + else: + print_info("Skipped.") + + # Summary + print() + if dry_run: + print_info(f"Dry run complete. {len(dirs_to_check)} directory(ies) would be archived.") + print_info("Run without --dry-run to archive them.") + elif total_archived: + print_success(f"Cleaned up {total_archived} OpenClaw directory(ies).") + print_info("Directories were renamed, not deleted. You can undo by renaming them back.") + else: + print_info("No directories were archived.") + def _print_migration_report(report: dict, dry_run: bool): """Print a formatted migration report.""" diff --git a/hermes_cli/colors.py b/hermes_cli/colors.py index d30f99c62..8c85b4c0b 100644 --- a/hermes_cli/colors.py +++ b/hermes_cli/colors.py @@ -1,8 +1,24 @@ """Shared ANSI color utilities for Hermes CLI modules.""" +import os import sys +def should_use_color() -> bool: + """Return True when colored output is appropriate. + + Respects the NO_COLOR environment variable (https://no-color.org/) + and TERM=dumb, in addition to the existing TTY check. + """ + if os.environ.get("NO_COLOR") is not None: + return False + if os.environ.get("TERM") == "dumb": + return False + if not sys.stdout.isatty(): + return False + return True + + class Colors: RESET = "\033[0m" BOLD = "\033[1m" @@ -16,7 +32,7 @@ class Colors: def color(text: str, *codes) -> str: - """Apply color codes to text (only when output is a TTY).""" - if not sys.stdout.isatty(): + """Apply color codes to text (only when color output is appropriate).""" + if not should_use_color(): return text return "".join(codes) + text + Colors.RESET diff --git a/hermes_cli/commands.py b/hermes_cli/commands.py index d9de67175..c67d4e9db 100644 --- a/hermes_cli/commands.py +++ b/hermes_cli/commands.py @@ -67,6 +67,8 @@ COMMAND_REGISTRY: list[CommandDef] = [ gateway_only=True), CommandDef("background", "Run a prompt in the background", "Session", aliases=("bg",), args_hint=""), + CommandDef("btw", "Ephemeral side question using session context (no tools, not persisted)", "Session", + args_hint=""), CommandDef("queue", "Queue a prompt for the next turn (doesn't interrupt)", "Session", aliases=("q",), args_hint=""), CommandDef("status", "Show session info", "Session", @@ -366,6 +368,42 @@ def telegram_bot_commands() -> list[tuple[str, str]]: return result +_TG_NAME_LIMIT = 32 + + +def _clamp_telegram_names( + entries: list[tuple[str, str]], + reserved: set[str], +) -> list[tuple[str, str]]: + """Enforce Telegram's 32-char command name limit with collision avoidance. + + Names exceeding 32 chars are truncated. If truncation creates a duplicate + (against *reserved* names or earlier entries in the same batch), the name is + shortened to 31 chars and a digit ``0``-``9`` is appended to differentiate. + If all 10 digit slots are taken the entry is silently dropped. + """ + used: set[str] = set(reserved) + result: list[tuple[str, str]] = [] + for name, desc in entries: + if len(name) > _TG_NAME_LIMIT: + candidate = name[:_TG_NAME_LIMIT] + if candidate in used: + prefix = name[:_TG_NAME_LIMIT - 1] + for digit in range(10): + candidate = f"{prefix}{digit}" + if candidate not in used: + break + else: + # All 10 digit slots exhausted — skip entry + continue + name = candidate + if name in used: + continue + used.add(name) + result.append((name, desc)) + return result + + def telegram_menu_commands(max_commands: int = 100) -> tuple[list[tuple[str, str]], int]: """Return Telegram menu commands capped to the Bot API limit. @@ -381,9 +419,13 @@ def telegram_menu_commands(max_commands: int = 100) -> tuple[list[tuple[str, str (menu_commands, hidden_count) where hidden_count is the number of skill commands omitted due to the cap. """ - all_commands = list(telegram_bot_commands()) + core_commands = list(telegram_bot_commands()) + # Reserve core names so plugin/skill truncation can't collide with them + reserved_names = {n for n, _ in core_commands} + all_commands = list(core_commands) # Plugin slash commands get priority over skills + plugin_entries: list[tuple[str, str]] = [] try: from hermes_cli.plugins import get_plugin_manager pm = get_plugin_manager() @@ -393,10 +435,15 @@ def telegram_menu_commands(max_commands: int = 100) -> tuple[list[tuple[str, str desc = "Plugin command" if len(desc) > 40: desc = desc[:37] + "..." - all_commands.append((tg_name, desc)) + plugin_entries.append((tg_name, desc)) except Exception: pass + # Clamp plugin names to 32 chars with collision avoidance + plugin_entries = _clamp_telegram_names(plugin_entries, reserved_names) + reserved_names.update(n for n, _ in plugin_entries) + all_commands.extend(plugin_entries) + # Remaining slots go to built-in skill commands (not hub-installed). skill_entries: list[tuple[str, str]] = [] try: @@ -422,6 +469,9 @@ def telegram_menu_commands(max_commands: int = 100) -> tuple[list[tuple[str, str except Exception: pass + # Clamp skill names to 32 chars with collision avoidance + skill_entries = _clamp_telegram_names(skill_entries, reserved_names) + # Skills fill remaining slots — they're the only tier that gets trimmed remaining_slots = max(0, max_commands - len(all_commands)) hidden_count = max(0, len(skill_entries) - remaining_slots) diff --git a/hermes_cli/config.py b/hermes_cli/config.py index 56d102692..51b8b9af7 100644 --- a/hermes_cli/config.py +++ b/hermes_cli/config.py @@ -52,26 +52,86 @@ from hermes_cli.default_soul import DEFAULT_SOUL_MD # Managed mode (NixOS declarative config) # ============================================================================= +_MANAGED_TRUE_VALUES = ("true", "1", "yes") +_MANAGED_SYSTEM_NAMES = { + "brew": "Homebrew", + "homebrew": "Homebrew", + "nix": "NixOS", + "nixos": "NixOS", +} + + +def get_managed_system() -> Optional[str]: + """Return the package manager owning this install, if any.""" + raw = os.getenv("HERMES_MANAGED", "").strip() + if raw: + normalized = raw.lower() + if normalized in _MANAGED_TRUE_VALUES: + return "NixOS" + return _MANAGED_SYSTEM_NAMES.get(normalized, raw) + + managed_marker = get_hermes_home() / ".managed" + if managed_marker.exists(): + return "NixOS" + return None + + def is_managed() -> bool: - """Check if hermes is running in Nix-managed mode. + """Check if Hermes is running in package-manager-managed mode. Two signals: the HERMES_MANAGED env var (set by the systemd service), or a .managed marker file in HERMES_HOME (set by the NixOS activation script, so interactive shells also see it). """ - if os.getenv("HERMES_MANAGED", "").lower() in ("true", "1", "yes"): - return True - managed_marker = get_hermes_home() / ".managed" - return managed_marker.exists() + return get_managed_system() is not None + + +def get_managed_update_command() -> Optional[str]: + """Return the preferred upgrade command for a managed install.""" + managed_system = get_managed_system() + if managed_system == "Homebrew": + return "brew upgrade hermes-agent" + if managed_system == "NixOS": + return "sudo nixos-rebuild switch" + return None + + +def recommended_update_command() -> str: + """Return the best update command for the current installation.""" + return get_managed_update_command() or "hermes update" + + +def format_managed_message(action: str = "modify this Hermes installation") -> str: + """Build a user-facing error for managed installs.""" + managed_system = get_managed_system() or "a package manager" + raw = os.getenv("HERMES_MANAGED", "").strip().lower() + + if managed_system == "NixOS": + env_hint = "true" if raw in _MANAGED_TRUE_VALUES else raw or "true" + return ( + f"Cannot {action}: this Hermes installation is managed by NixOS " + f"(HERMES_MANAGED={env_hint}).\n" + "Edit services.hermes-agent.settings in your configuration.nix and run:\n" + " sudo nixos-rebuild switch" + ) + + if managed_system == "Homebrew": + env_hint = raw or "homebrew" + return ( + f"Cannot {action}: this Hermes installation is managed by Homebrew " + f"(HERMES_MANAGED={env_hint}).\n" + "Use:\n" + " brew upgrade hermes-agent" + ) + + return ( + f"Cannot {action}: this Hermes installation is managed by {managed_system}.\n" + "Use your package manager to upgrade or reinstall Hermes." + ) def managed_error(action: str = "modify configuration"): """Print user-friendly error for managed mode.""" - print( - f"Cannot {action}: configuration is managed by NixOS (HERMES_MANAGED=true).\n" - "Edit services.hermes-agent.settings in your configuration.nix and run:\n" - " sudo nixos-rebuild switch", - file=sys.stderr, - ) + print(format_managed_message(action), file=sys.stderr) # ============================================================================= @@ -138,6 +198,7 @@ def ensure_hermes_home(): DEFAULT_CONFIG = { "model": "anthropic/claude-opus-4.6", "fallback_providers": [], + "credential_pool_strategies": {}, "toolsets": ["hermes-cli"], "agent": { "max_turns": 90, @@ -185,6 +246,7 @@ DEFAULT_CONFIG = { "inactivity_timeout": 120, "command_timeout": 30, # Timeout for browser commands in seconds (screenshot, navigate, etc.) "record_sessions": False, # Auto-record browser sessions as WebM videos + "allow_private_urls": False, # Allow navigating to private/internal IPs (localhost, 192.168.x.x, etc.) }, # Filesystem checkpoints — automatic snapshots before destructive file ops. @@ -392,6 +454,7 @@ DEFAULT_CONFIG = { "require_mention": True, # Require @mention to respond in server channels "free_response_channels": "", # Comma-separated channel IDs where bot responds without mention "auto_thread": True, # Auto-create threads on @mention in channels (like Slack) + "reactions": True, # Add 👀/✅/❌ reactions to messages during processing }, # WhatsApp platform settings (gateway mode) @@ -441,7 +504,7 @@ DEFAULT_CONFIG = { }, # Config schema version - bump this when adding new required fields - "_config_version": 10, + "_config_version": 11, } # ============================================================================= diff --git a/hermes_cli/cron.py b/hermes_cli/cron.py index 97a225794..f6da8a2d2 100644 --- a/hermes_cli/cron.py +++ b/hermes_cli/cron.py @@ -56,7 +56,7 @@ def cron_list(show_all: bool = False): print() for job in jobs: - job_id = job.get("id", "?")[:8] + job_id = job.get("id", "?") name = job.get("name", "(unnamed)") schedule = job.get("schedule_display", job.get("schedule", {}).get("value", "?")) state = job.get("state", "scheduled" if job.get("enabled", True) else "paused") diff --git a/hermes_cli/main.py b/hermes_cli/main.py index f6d7d7c71..9b4b3ccac 100644 --- a/hermes_cli/main.py +++ b/hermes_cli/main.py @@ -173,9 +173,25 @@ def _relative_time(ts) -> str: def _has_any_provider_configured() -> bool: """Check if at least one inference provider is usable.""" - from hermes_cli.config import get_env_path, get_hermes_home + from hermes_cli.config import get_env_path, get_hermes_home, load_config from hermes_cli.auth import get_auth_status + # Determine whether Hermes itself has been explicitly configured (model + # in config that isn't the hardcoded default). Used below to gate external + # tool credentials (Claude Code, Codex CLI) that shouldn't silently skip + # the setup wizard on a fresh install. + from hermes_cli.config import DEFAULT_CONFIG + _DEFAULT_MODEL = DEFAULT_CONFIG.get("model", "") + cfg = load_config() + model_cfg = cfg.get("model") + if isinstance(model_cfg, dict): + _model_name = (model_cfg.get("default") or "").strip() + elif isinstance(model_cfg, str): + _model_name = model_cfg.strip() + else: + _model_name = "" + _has_hermes_config = _model_name and _model_name != _DEFAULT_MODEL + # Check env vars (may be set by .env or shell). # OPENAI_BASE_URL alone counts — local models (vLLM, llama.cpp, etc.) # often don't require an API key. @@ -231,15 +247,16 @@ def _has_any_provider_configured() -> bool: # Check for Claude Code OAuth credentials (~/.claude/.credentials.json) - # These are used by resolve_anthropic_token() at runtime but were missing - # from this startup gate check. - try: - from agent.anthropic_adapter import read_claude_code_credentials, is_claude_code_token_valid - creds = read_claude_code_credentials() - if creds and (is_claude_code_token_valid(creds) or creds.get("refreshToken")): - return True - except Exception: - pass + # Only count these if Hermes has been explicitly configured — Claude Code + # being installed doesn't mean the user wants Hermes to use their tokens. + if _has_hermes_config: + try: + from agent.anthropic_adapter import read_claude_code_credentials, is_claude_code_token_valid + creds = read_claude_code_credentials() + if creds and (is_claude_code_token_valid(creds) or creds.get("refreshToken")): + return True + except Exception: + pass return False @@ -829,6 +846,17 @@ def cmd_setup(args): def cmd_model(args): """Select default model — starts with provider selection, then model picker.""" _require_tty("model") + select_provider_and_model() + + +def select_provider_and_model(): + """Core provider selection + model picking logic. + + Shared by ``cmd_model`` (``hermes model``) and the setup wizard + (``setup_model_provider`` in setup.py). Handles the full flow: + provider picker, credential prompting, model selection, and config + persistence. + """ from hermes_cli.auth import ( resolve_provider, AuthError, format_auth_error, ) @@ -858,7 +886,10 @@ def cmd_model(args): except AuthError as exc: warning = format_auth_error(exc) print(f"Warning: {warning} Falling back to auto provider detection.") - active = resolve_provider("auto") + try: + active = resolve_provider("auto") + except AuthError: + active = "openrouter" # no provider yet; show full picker # Detect custom endpoint if active == "openrouter" and get_env_value("OPENAI_BASE_URL"): @@ -1050,10 +1081,6 @@ def _model_flow_openrouter(config, current_model=""): selected = _prompt_model_selection(openrouter_models, current_model=current_model) if selected: - # Clear any custom endpoint and set provider to openrouter - if get_env_value("OPENAI_BASE_URL"): - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") _save_model_choice(selected) # Update config provider and deactivate any OAuth provider @@ -1143,10 +1170,6 @@ def _model_flow_nous(config, current_model=""): # Reactivate Nous as the provider and update config inference_url = creds.get("base_url", "") _update_config_for_provider("nous", inference_url) - # Clear any custom endpoint that might conflict - if get_env_value("OPENAI_BASE_URL"): - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") print(f"Default model set to: {selected} (via Nous Portal)") else: print("No change.") @@ -1191,10 +1214,6 @@ def _model_flow_openai_codex(config, current_model=""): if selected: _save_model_choice(selected) _update_config_for_provider("openai-codex", DEFAULT_CODEX_BASE_URL) - # Clear custom endpoint env vars that would otherwise override Codex. - if get_env_value("OPENAI_BASE_URL"): - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") print(f"Default model set to: {selected} (via OpenAI Codex)") else: print("No change.") @@ -1223,22 +1242,10 @@ def _model_flow_custom(config): try: base_url = input(f"API base URL [{current_url or 'e.g. https://api.example.com/v1'}]: ").strip() api_key = input(f"API key [{current_key[:8] + '...' if current_key else 'optional'}]: ").strip() - model_name = input("Model name (e.g. gpt-4, llama-3-70b): ").strip() - context_length_str = input("Context length in tokens [leave blank for auto-detect]: ").strip() except (KeyboardInterrupt, EOFError): print("\nCancelled.") return - context_length = None - if context_length_str: - try: - context_length = int(context_length_str.replace(",", "").replace("k", "000").replace("K", "000")) - if context_length <= 0: - context_length = None - except ValueError: - print(f"Invalid context length: {context_length_str} — will auto-detect.") - context_length = None - if not base_url and not current_url: print("No URL provided. Cancelled.") return @@ -1275,10 +1282,43 @@ def _model_flow_custom(config): if probe.get("suggested_base_url"): print(f" If this server expects /v1, try base URL: {probe['suggested_base_url']}") - if base_url: - save_env_value("OPENAI_BASE_URL", effective_url) - if api_key: - save_env_value("OPENAI_API_KEY", api_key) + # Select model — use probe results when available, fall back to manual input + model_name = "" + detected_models = probe.get("models") or [] + try: + if len(detected_models) == 1: + print(f" Detected model: {detected_models[0]}") + confirm = input(" Use this model? [Y/n]: ").strip().lower() + if confirm in ("", "y", "yes"): + model_name = detected_models[0] + else: + model_name = input("Model name (e.g. gpt-4, llama-3-70b): ").strip() + elif len(detected_models) > 1: + print(" Available models:") + for i, m in enumerate(detected_models, 1): + print(f" {i}. {m}") + pick = input(f" Select model [1-{len(detected_models)}] or type name: ").strip() + if pick.isdigit() and 1 <= int(pick) <= len(detected_models): + model_name = detected_models[int(pick) - 1] + elif pick: + model_name = pick + else: + model_name = input("Model name (e.g. gpt-4, llama-3-70b): ").strip() + + context_length_str = input("Context length in tokens [leave blank for auto-detect]: ").strip() + except (KeyboardInterrupt, EOFError): + print("\nCancelled.") + return + + context_length = None + if context_length_str: + try: + context_length = int(context_length_str.replace(",", "").replace("k", "000").replace("K", "000")) + if context_length <= 0: + context_length = None + except ValueError: + print(f"Invalid context length: {context_length_str} — will auto-detect.") + context_length = None if model_name: _save_model_choice(model_name) @@ -1291,14 +1331,33 @@ def _model_flow_custom(config): cfg["model"] = model model["provider"] = "custom" model["base_url"] = effective_url + if effective_key: + model["api_key"] = effective_key model.pop("api_mode", None) # let runtime auto-detect from URL save_config(cfg) deactivate_provider() + # Sync the caller's config dict so the setup wizard's final + # save_config(config) preserves our model settings. Without + # this, the wizard overwrites model.provider/base_url with + # the stale values from its own config dict (#4172). + config["model"] = dict(model) + print(f"Default model set to: {model_name} (via {effective_url})") else: if base_url or api_key: deactivate_provider() + # Even without a model name, persist the custom endpoint on the + # caller's config dict so the setup wizard doesn't lose it. + _caller_model = config.get("model") + if not isinstance(_caller_model, dict): + _caller_model = {"default": _caller_model} if _caller_model else {} + _caller_model["provider"] = "custom" + _caller_model["base_url"] = effective_url + if effective_key: + _caller_model["api_key"] = effective_key + _caller_model.pop("api_mode", None) + config["model"] = _caller_model print("Endpoint saved. Use `/model` in chat or `hermes model` to set a model.") # Auto-save to custom_providers so it appears in the menu next time @@ -1439,9 +1498,6 @@ def _model_flow_named_custom(config, provider_info): # If a model is saved, just activate immediately — no probing needed if saved_model: - save_env_value("OPENAI_BASE_URL", base_url) - if api_key: - save_env_value("OPENAI_API_KEY", api_key) _save_model_choice(saved_model) cfg = load_config() @@ -1451,6 +1507,8 @@ def _model_flow_named_custom(config, provider_info): cfg["model"] = model model["provider"] = "custom" model["base_url"] = base_url + if api_key: + model["api_key"] = api_key save_config(cfg) deactivate_provider() @@ -1513,9 +1571,6 @@ def _model_flow_named_custom(config, provider_info): return # Activate and save the model to the custom_providers entry - save_env_value("OPENAI_BASE_URL", base_url) - if api_key: - save_env_value("OPENAI_API_KEY", api_key) _save_model_choice(model_name) cfg = load_config() @@ -1525,6 +1580,8 @@ def _model_flow_named_custom(config, provider_info): cfg["model"] = model model["provider"] = "custom" model["base_url"] = base_url + if api_key: + model["api_key"] = api_key save_config(cfg) deactivate_provider() @@ -1577,11 +1634,15 @@ _PROVIDER_MODELS = { "kimi-k2-0905-preview", ], "minimax": [ + "MiniMax-M2.7", + "MiniMax-M2.7-highspeed", "MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1", ], "minimax-cn": [ + "MiniMax-M2.7", + "MiniMax-M2.7-highspeed", "MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1", @@ -1829,11 +1890,6 @@ def _model_flow_copilot(config, current_model=""): catalog=catalog, api_key=api_key, ) or selected - # Clear stale custom-endpoint overrides so the Copilot provider wins cleanly. - if get_env_value("OPENAI_BASE_URL"): - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - initial_cfg = load_config() current_effort = _current_reasoning_effort(initial_cfg) reasoning_efforts = github_model_reasoning_efforts( @@ -2058,11 +2114,6 @@ def _model_flow_kimi(config, current_model=""): selected = None if selected: - # Clear custom endpoint if set (avoid confusion) - if get_env_value("OPENAI_BASE_URL"): - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _save_model_choice(selected) # Update config with provider and base URL @@ -2165,11 +2216,6 @@ def _model_flow_api_key_provider(config, provider_id, current_model=""): selected = None if selected: - # Clear custom endpoint if set (avoid confusion) - if get_env_value("OPENAI_BASE_URL"): - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _save_model_choice(selected) # Update config with provider and base URL @@ -2381,11 +2427,6 @@ def _model_flow_anthropic(config, current_model=""): selected = None if selected: - # Clear custom endpoint if set - if get_env_value("OPENAI_BASE_URL"): - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _save_model_choice(selected) # Update config with provider — clear base_url since @@ -2419,6 +2460,12 @@ def cmd_logout(args): logout_command(args) +def cmd_auth(args): + """Manage pooled credentials.""" + from hermes_cli.auth_commands import auth_command + auth_command(args) + + def cmd_status(args): """Show status of all components.""" from hermes_cli.status import show_status @@ -2467,10 +2514,14 @@ def cmd_version(args): # Show update status (synchronous — acceptable since user asked for version info) try: from hermes_cli.banner import check_for_updates + from hermes_cli.config import recommended_update_command behind = check_for_updates() if behind and behind > 0: commits_word = "commit" if behind == 1 else "commits" - print(f"Update available: {behind} {commits_word} behind — run 'hermes update'") + print( + f"Update available: {behind} {commits_word} behind — " + f"run '{recommended_update_command()}'" + ) elif behind == 0: print("Up to date") except Exception: @@ -2821,6 +2872,11 @@ def _invalidate_update_cache(): def cmd_update(args): """Update Hermes Agent to the latest version.""" import shutil + from hermes_cli.config import is_managed, managed_error + + if is_managed(): + managed_error("update Hermes Agent") + return print("⚕ Updating Hermes Agent...") print() @@ -3156,6 +3212,7 @@ def cmd_update(args): _gw_service_name = get_service_name() existing_pid = get_running_pid() has_systemd_service = False + has_system_service = False has_launchd_service = False try: @@ -3168,6 +3225,19 @@ def cmd_update(args): except (FileNotFoundError, subprocess.TimeoutExpired): pass + # Also check for a system-level service (hermes gateway install --system). + # This covers gateways running under system systemd where --user + # fails due to missing D-Bus session. + if not has_systemd_service and is_linux(): + try: + check = subprocess.run( + ["systemctl", "is-active", _gw_service_name], + capture_output=True, text=True, timeout=5, + ) + has_system_service = check.stdout.strip() == "active" + except (FileNotFoundError, subprocess.TimeoutExpired): + pass + # Check for macOS launchd service if is_macos(): try: @@ -3182,7 +3252,7 @@ def cmd_update(args): except (FileNotFoundError, subprocess.TimeoutExpired): pass - if existing_pid or has_systemd_service or has_launchd_service: + if existing_pid or has_systemd_service or has_system_service or has_launchd_service: print() # When a service manager is handling the gateway, let it @@ -3223,6 +3293,21 @@ def cmd_update(args): print(" hermes gateway restart") else: print(" Try manually: hermes gateway restart") + elif has_system_service: + # System-level service (hermes gateway install --system). + # No D-Bus session needed — systemctl without --user talks + # directly to the system manager over /run/systemd/private. + print("→ Restarting system gateway service...") + restart = subprocess.run( + ["systemctl", "restart", _gw_service_name], + capture_output=True, text=True, timeout=15, + ) + if restart.returncode == 0: + print("✓ Gateway restarted (system service).") + else: + print(f"⚠ Gateway restart failed: {restart.stderr.strip()}") + print(" System services may require root. Try:") + print(f" sudo systemctl restart {_gw_service_name}") elif has_launchd_service: # Refresh the plist first (picks up --replace and other # changes from the update we just pulled). @@ -3286,7 +3371,7 @@ def _coalesce_session_name_args(argv: list) -> list: or a known top-level subcommand. """ _SUBCOMMANDS = { - "chat", "model", "gateway", "setup", "whatsapp", "login", "logout", + "chat", "model", "gateway", "setup", "whatsapp", "login", "logout", "auth", "status", "cron", "doctor", "config", "pairing", "skills", "tools", "mcp", "sessions", "insights", "version", "update", "uninstall", "profile", @@ -3575,6 +3660,10 @@ Examples: hermes --resume Resume a specific session by ID hermes setup Run setup wizard hermes logout Clear stored authentication + hermes auth add Add a pooled credential + hermes auth list List pooled credentials + hermes auth remove

Remove pooled credential by index + hermes auth reset Clear exhaustion status for a provider hermes model Select default model hermes config View configuration hermes config edit Edit config in $EDITOR @@ -3893,6 +3982,33 @@ For more help on a command: ) logout_parser.set_defaults(func=cmd_logout) + auth_parser = subparsers.add_parser( + "auth", + help="Manage pooled provider credentials", + ) + auth_subparsers = auth_parser.add_subparsers(dest="auth_action") + auth_add = auth_subparsers.add_parser("add", help="Add a pooled credential") + auth_add.add_argument("provider", help="Provider id (for example: anthropic, openai-codex, openrouter)") + auth_add.add_argument("--type", dest="auth_type", choices=["oauth", "api-key", "api_key"], help="Credential type to add") + auth_add.add_argument("--label", help="Optional display label") + auth_add.add_argument("--api-key", help="API key value (otherwise prompted securely)") + auth_add.add_argument("--portal-url", help="Nous portal base URL") + auth_add.add_argument("--inference-url", help="Nous inference base URL") + auth_add.add_argument("--client-id", help="OAuth client id") + auth_add.add_argument("--scope", help="OAuth scope override") + auth_add.add_argument("--no-browser", action="store_true", help="Do not auto-open a browser for OAuth login") + auth_add.add_argument("--timeout", type=float, help="OAuth/network timeout in seconds") + auth_add.add_argument("--insecure", action="store_true", help="Disable TLS verification for OAuth login") + auth_add.add_argument("--ca-bundle", help="Custom CA bundle for OAuth login") + auth_list = auth_subparsers.add_parser("list", help="List pooled credentials") + auth_list.add_argument("provider", nargs="?", help="Optional provider filter") + auth_remove = auth_subparsers.add_parser("remove", help="Remove a pooled credential by index") + auth_remove.add_argument("provider", help="Provider id") + auth_remove.add_argument("index", type=int, help="1-based credential index") + auth_reset = auth_subparsers.add_parser("reset", help="Clear exhaustion status for all credentials for a provider") + auth_reset.add_argument("provider", help="Provider id") + auth_parser.set_defaults(func=cmd_auth) + # ========================================================================= # status command # ========================================================================= @@ -4703,6 +4819,28 @@ For more help on a command: help="Skip confirmation prompts" ) + # claw cleanup + claw_cleanup = claw_subparsers.add_parser( + "cleanup", + aliases=["clean"], + help="Archive leftover OpenClaw directories after migration", + description="Scan for and archive leftover OpenClaw directories to prevent state fragmentation" + ) + claw_cleanup.add_argument( + "--source", + help="Path to a specific OpenClaw directory to clean up" + ) + claw_cleanup.add_argument( + "--dry-run", + action="store_true", + help="Preview what would be archived without making changes" + ) + claw_cleanup.add_argument( + "--yes", "-y", + action="store_true", + help="Skip confirmation prompts" + ) + def cmd_claw(args): from hermes_cli.claw import claw_command claw_command(args) diff --git a/hermes_cli/models.py b/hermes_cli/models.py index ef2b3deb4..c8bd106b6 100644 --- a/hermes_cli/models.py +++ b/hermes_cli/models.py @@ -27,6 +27,7 @@ GITHUB_MODELS_CATALOG_URL = COPILOT_MODELS_URL # (model_id, display description shown in menus) OPENROUTER_MODELS: list[tuple[str, str]] = [ ("anthropic/claude-opus-4.6", "recommended"), + ("anthropic/claude-sonnet-4.6", ""), ("anthropic/claude-sonnet-4.5", ""), ("anthropic/claude-haiku-4.5", ""), ("openai/gpt-5.4", ""), @@ -56,6 +57,7 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [ _PROVIDER_MODELS: dict[str, list[str]] = { "nous": [ "anthropic/claude-opus-4.6", + "anthropic/claude-sonnet-4.6", "anthropic/claude-sonnet-4.5", "anthropic/claude-haiku-4.5", "openai/gpt-5.4", @@ -189,7 +191,7 @@ _PROVIDER_MODELS: dict[str, list[str]] = { "opencode-go": [ "glm-5", "kimi-k2.5", - "minimax-m2.5", + "minimax-m2.7", ], "ai-gateway": [ "anthropic/claude-opus-4.6", @@ -347,7 +349,7 @@ def list_available_providers() -> list[dict[str, str]]: try: from hermes_cli.auth import get_auth_status, has_usable_secret if pid == "custom": - custom_base_url = _get_custom_base_url() or os.getenv("OPENAI_BASE_URL", "") + custom_base_url = _get_custom_base_url() or "" has_creds = bool(custom_base_url.strip()) elif pid == "openrouter": has_creds = has_usable_secret(os.getenv("OPENROUTER_API_KEY", "")) diff --git a/hermes_cli/plugins_cmd.py b/hermes_cli/plugins_cmd.py index e53f5c94b..c3717bfa3 100644 --- a/hermes_cli/plugins_cmd.py +++ b/hermes_cli/plugins_cmd.py @@ -265,10 +265,11 @@ def cmd_install(identifier: str, force: bool = False) -> None: ) sys.exit(1) if mv_int > _SUPPORTED_MANIFEST_VERSION: + from hermes_cli.config import recommended_update_command console.print( f"[red]Error:[/red] Plugin '{plugin_name}' requires manifest_version " f"{mv}, but this installer only supports up to {_SUPPORTED_MANIFEST_VERSION}.\n" - f"Run [bold]hermes update[/bold] to get a newer installer." + f"Run [bold]{recommended_update_command()}[/bold] to get a newer installer." ) sys.exit(1) diff --git a/hermes_cli/profiles.py b/hermes_cli/profiles.py index 7ef39d105..30da7eb1a 100644 --- a/hermes_cli/profiles.py +++ b/hermes_cli/profiles.py @@ -241,7 +241,7 @@ def _read_config_model(profile_dir: Path) -> tuple: if isinstance(model_cfg, str): return model_cfg, None if isinstance(model_cfg, dict): - return model_cfg.get("model"), model_cfg.get("provider") + return model_cfg.get("default") or model_cfg.get("model"), model_cfg.get("provider") return None, None except Exception: return None, None diff --git a/hermes_cli/runtime_provider.py b/hermes_cli/runtime_provider.py index 0c82805d5..bb5f4758a 100644 --- a/hermes_cli/runtime_provider.py +++ b/hermes_cli/runtime_provider.py @@ -6,8 +6,10 @@ import os from typing import Any, Dict, Optional from hermes_cli import auth as auth_mod +from agent.credential_pool import CredentialPool, PooledCredential, get_custom_provider_pool_key, load_pool from hermes_cli.auth import ( AuthError, + DEFAULT_CODEX_BASE_URL, PROVIDER_REGISTRY, format_auth_error, resolve_provider, @@ -109,6 +111,50 @@ def _parse_api_mode(raw: Any) -> Optional[str]: return None +def _resolve_runtime_from_pool_entry( + *, + provider: str, + entry: PooledCredential, + requested_provider: str, + model_cfg: Optional[Dict[str, Any]] = None, + pool: Optional[CredentialPool] = None, +) -> Dict[str, Any]: + model_cfg = model_cfg or _get_model_config() + base_url = (getattr(entry, "runtime_base_url", None) or getattr(entry, "base_url", None) or "").rstrip("/") + api_key = getattr(entry, "runtime_api_key", None) or getattr(entry, "access_token", "") + api_mode = "chat_completions" + if provider == "openai-codex": + api_mode = "codex_responses" + base_url = base_url or DEFAULT_CODEX_BASE_URL + elif provider == "anthropic": + api_mode = "anthropic_messages" + cfg_provider = str(model_cfg.get("provider") or "").strip().lower() + cfg_base_url = "" + if cfg_provider == "anthropic": + cfg_base_url = str(model_cfg.get("base_url") or "").strip().rstrip("/") + base_url = cfg_base_url or base_url or "https://api.anthropic.com" + elif provider == "nous": + api_mode = "chat_completions" + elif provider == "copilot": + api_mode = _copilot_runtime_api_mode(model_cfg, getattr(entry, "runtime_api_key", "")) + else: + configured_mode = _parse_api_mode(model_cfg.get("api_mode")) + if configured_mode: + api_mode = configured_mode + elif base_url.rstrip("/").endswith("/anthropic"): + api_mode = "anthropic_messages" + + return { + "provider": provider, + "api_mode": api_mode, + "base_url": base_url, + "api_key": api_key, + "source": getattr(entry, "source", "pool"), + "credential_pool": pool, + "requested_provider": requested_provider, + } + + def resolve_requested_provider(requested: Optional[str] = None) -> str: """Resolve provider request from explicit arg, config, then env.""" if requested and requested.strip(): @@ -128,6 +174,37 @@ def resolve_requested_provider(requested: Optional[str] = None) -> str: return "auto" +def _try_resolve_from_custom_pool( + base_url: str, + provider_label: str, + api_mode_override: Optional[str] = None, +) -> Optional[Dict[str, Any]]: + """Check if a credential pool exists for a custom endpoint and return a runtime dict if so.""" + pool_key = get_custom_provider_pool_key(base_url) + if not pool_key: + return None + try: + pool = load_pool(pool_key) + if not pool.has_credentials(): + return None + entry = pool.select() + if entry is None: + return None + pool_api_key = getattr(entry, "runtime_api_key", None) or getattr(entry, "access_token", "") + if not pool_api_key: + return None + return { + "provider": provider_label, + "api_mode": api_mode_override or _detect_api_mode_for_url(base_url) or "chat_completions", + "base_url": base_url, + "api_key": pool_api_key, + "source": f"pool:{pool_key}", + "credential_pool": pool, + } + except Exception: + return None + + def _get_named_custom_provider(requested_provider: str) -> Optional[Dict[str, Any]]: requested_norm = _normalize_custom_provider_name(requested_provider or "") if not requested_norm or requested_norm == "custom": @@ -192,6 +269,11 @@ def _resolve_named_custom_runtime( if not base_url: return None + # Check if a credential pool exists for this custom endpoint + pool_result = _try_resolve_from_custom_pool(base_url, "custom", custom_provider.get("api_mode")) + if pool_result: + return pool_result + api_key_candidates = [ (explicit_api_key or "").strip(), str(custom_provider.get("api_key", "") or "").strip(), @@ -229,28 +311,22 @@ def _resolve_openrouter_runtime( requested_norm = (requested_provider or "").strip().lower() cfg_provider = cfg_provider.strip().lower() - env_openai_base_url = os.getenv("OPENAI_BASE_URL", "").strip() env_openrouter_base_url = os.getenv("OPENROUTER_BASE_URL", "").strip() + # Use config base_url when available and the provider context matches. + # OPENAI_BASE_URL env var is no longer consulted — config.yaml is + # the single source of truth for endpoint URLs. use_config_base_url = False if cfg_base_url.strip() and not explicit_base_url: if requested_norm == "auto": - if (not cfg_provider or cfg_provider == "auto") and not env_openai_base_url: + if not cfg_provider or cfg_provider == "auto": use_config_base_url = True elif requested_norm == "custom" and cfg_provider == "custom": - # provider: custom — use base_url from config (Fixes #1760). use_config_base_url = True - # When the user explicitly requested the openrouter provider, skip - # OPENAI_BASE_URL — it typically points to a custom / non-OpenRouter - # endpoint and would prevent switching back to OpenRouter (#874). - skip_openai_base = requested_norm == "openrouter" - - # For custom, prefer config base_url over env so config.yaml is honored (#1760). base_url = ( (explicit_base_url or "").strip() or (cfg_base_url.strip() if use_config_base_url else "") - or ("" if skip_openai_base else env_openai_base_url) or env_openrouter_base_url or OPENROUTER_BASE_URL ).rstrip("/") @@ -287,6 +363,15 @@ def _resolve_openrouter_runtime( # Also provide a placeholder API key for local servers that don't require # authentication — the OpenAI SDK requires a non-empty api_key string. effective_provider = "custom" if requested_norm == "custom" else "openrouter" + + # For custom endpoints, check if a credential pool exists + if effective_provider == "custom" and base_url: + pool_result = _try_resolve_from_custom_pool( + base_url, effective_provider, _parse_api_mode(model_cfg.get("api_mode")), + ) + if pool_result: + return pool_result + if effective_provider == "custom" and not api_key and not _is_openrouter_url: api_key = "no-key-required" @@ -301,6 +386,134 @@ def _resolve_openrouter_runtime( } +def _resolve_explicit_runtime( + *, + provider: str, + requested_provider: str, + model_cfg: Dict[str, Any], + explicit_api_key: Optional[str] = None, + explicit_base_url: Optional[str] = None, +) -> Optional[Dict[str, Any]]: + explicit_api_key = str(explicit_api_key or "").strip() + explicit_base_url = str(explicit_base_url or "").strip().rstrip("/") + if not explicit_api_key and not explicit_base_url: + return None + + if provider == "anthropic": + cfg_provider = str(model_cfg.get("provider") or "").strip().lower() + cfg_base_url = "" + if cfg_provider == "anthropic": + cfg_base_url = str(model_cfg.get("base_url") or "").strip().rstrip("/") + base_url = explicit_base_url or cfg_base_url or "https://api.anthropic.com" + api_key = explicit_api_key + if not api_key: + from agent.anthropic_adapter import resolve_anthropic_token + + api_key = resolve_anthropic_token() + if not api_key: + raise AuthError( + "No Anthropic credentials found. Set ANTHROPIC_TOKEN or ANTHROPIC_API_KEY, " + "run 'claude setup-token', or authenticate with 'claude /login'." + ) + return { + "provider": "anthropic", + "api_mode": "anthropic_messages", + "base_url": base_url, + "api_key": api_key, + "source": "explicit", + "requested_provider": requested_provider, + } + + if provider == "openai-codex": + base_url = explicit_base_url or DEFAULT_CODEX_BASE_URL + api_key = explicit_api_key + last_refresh = None + if not api_key: + creds = resolve_codex_runtime_credentials() + api_key = creds.get("api_key", "") + last_refresh = creds.get("last_refresh") + if not explicit_base_url: + base_url = creds.get("base_url", "").rstrip("/") or base_url + return { + "provider": "openai-codex", + "api_mode": "codex_responses", + "base_url": base_url, + "api_key": api_key, + "source": "explicit", + "last_refresh": last_refresh, + "requested_provider": requested_provider, + } + + if provider == "nous": + state = auth_mod.get_provider_auth_state("nous") or {} + base_url = ( + explicit_base_url + or str(state.get("inference_base_url") or auth_mod.DEFAULT_NOUS_INFERENCE_URL).strip().rstrip("/") + ) + api_key = explicit_api_key or str(state.get("agent_key") or state.get("access_token") or "").strip() + expires_at = state.get("agent_key_expires_at") or state.get("expires_at") + if not api_key: + creds = resolve_nous_runtime_credentials( + min_key_ttl_seconds=max(60, int(os.getenv("HERMES_NOUS_MIN_KEY_TTL_SECONDS", "1800"))), + timeout_seconds=float(os.getenv("HERMES_NOUS_TIMEOUT_SECONDS", "15")), + ) + api_key = creds.get("api_key", "") + expires_at = creds.get("expires_at") + if not explicit_base_url: + base_url = creds.get("base_url", "").rstrip("/") or base_url + return { + "provider": "nous", + "api_mode": "chat_completions", + "base_url": base_url, + "api_key": api_key, + "source": "explicit", + "expires_at": expires_at, + "requested_provider": requested_provider, + } + + pconfig = PROVIDER_REGISTRY.get(provider) + if pconfig and pconfig.auth_type == "api_key": + env_url = "" + if pconfig.base_url_env_var: + env_url = os.getenv(pconfig.base_url_env_var, "").strip().rstrip("/") + + base_url = explicit_base_url + if not base_url: + if provider == "kimi-coding": + creds = resolve_api_key_provider_credentials(provider) + base_url = creds.get("base_url", "").rstrip("/") + else: + base_url = env_url or pconfig.inference_base_url + + api_key = explicit_api_key + if not api_key: + creds = resolve_api_key_provider_credentials(provider) + api_key = creds.get("api_key", "") + if not base_url: + base_url = creds.get("base_url", "").rstrip("/") + + api_mode = "chat_completions" + if provider == "copilot": + api_mode = _copilot_runtime_api_mode(model_cfg, api_key) + else: + configured_mode = _parse_api_mode(model_cfg.get("api_mode")) + if configured_mode: + api_mode = configured_mode + elif base_url.rstrip("/").endswith("/anthropic"): + api_mode = "anthropic_messages" + + return { + "provider": provider, + "api_mode": api_mode, + "base_url": base_url.rstrip("/"), + "api_key": api_key, + "source": "explicit", + "requested_provider": requested_provider, + } + + return None + + def resolve_runtime_provider( *, requested: Optional[str] = None, @@ -324,6 +537,57 @@ def resolve_runtime_provider( explicit_api_key=explicit_api_key, explicit_base_url=explicit_base_url, ) + model_cfg = _get_model_config() + explicit_runtime = _resolve_explicit_runtime( + provider=provider, + requested_provider=requested_provider, + model_cfg=model_cfg, + explicit_api_key=explicit_api_key, + explicit_base_url=explicit_base_url, + ) + if explicit_runtime: + return explicit_runtime + + should_use_pool = provider != "openrouter" + if provider == "openrouter": + cfg_provider = str(model_cfg.get("provider") or "").strip().lower() + cfg_base_url = str(model_cfg.get("base_url") or "").strip() + env_openai_base_url = os.getenv("OPENAI_BASE_URL", "").strip() + env_openrouter_base_url = os.getenv("OPENROUTER_BASE_URL", "").strip() + has_custom_endpoint = bool( + explicit_base_url + or env_openai_base_url + or env_openrouter_base_url + ) + if cfg_base_url and cfg_provider in {"auto", "custom"}: + has_custom_endpoint = True + has_runtime_override = bool(explicit_api_key or explicit_base_url) + should_use_pool = ( + requested_provider in {"openrouter", "auto"} + and not has_custom_endpoint + and not has_runtime_override + ) + + try: + pool = load_pool(provider) if should_use_pool else None + except Exception: + pool = None + if pool and pool.has_credentials(): + entry = pool.select() + pool_api_key = "" + if entry is not None: + pool_api_key = ( + getattr(entry, "runtime_api_key", None) + or getattr(entry, "access_token", "") + ) + if entry is not None and pool_api_key: + return _resolve_runtime_from_pool_entry( + provider=provider, + entry=entry, + requested_provider=requested_provider, + model_cfg=model_cfg, + pool=pool, + ) if provider == "nous": creds = resolve_nous_runtime_credentials( @@ -377,7 +641,6 @@ def resolve_runtime_provider( # Allow base URL override from config.yaml model.base_url, but only # when the configured provider is anthropic — otherwise a non-Anthropic # base_url (e.g. Codex endpoint) would leak into Anthropic requests. - model_cfg = _get_model_config() cfg_provider = str(model_cfg.get("provider") or "").strip().lower() cfg_base_url = "" if cfg_provider == "anthropic": @@ -396,7 +659,6 @@ def resolve_runtime_provider( pconfig = PROVIDER_REGISTRY.get(provider) if pconfig and pconfig.auth_type == "api_key": creds = resolve_api_key_provider_credentials(provider) - model_cfg = _get_model_config() base_url = creds.get("base_url", "").rstrip("/") api_mode = "chat_completions" if provider == "copilot": diff --git a/hermes_cli/setup.py b/hermes_cli/setup.py index 304f34f56..bd64c75f8 100644 --- a/hermes_cli/setup.py +++ b/hermes_cli/setup.py @@ -18,6 +18,8 @@ import sys from pathlib import Path from typing import Optional, Dict, Any +from hermes_constants import get_optional_skills_dir + logger = logging.getLogger(__name__) PROJECT_ROOT = Path(__file__).parent.parent.resolve() @@ -52,6 +54,32 @@ def _set_default_model(config: Dict[str, Any], model_name: str) -> None: config["model"] = model_cfg +def _get_credential_pool_strategies(config: Dict[str, Any]) -> Dict[str, str]: + strategies = config.get("credential_pool_strategies") + return dict(strategies) if isinstance(strategies, dict) else {} + + +def _set_credential_pool_strategy(config: Dict[str, Any], provider: str, strategy: str) -> None: + if not provider: + return + strategies = _get_credential_pool_strategies(config) + strategies[provider] = strategy + config["credential_pool_strategies"] = strategies + + +def _supports_same_provider_pool_setup(provider: str) -> bool: + if not provider or provider == "custom": + return False + if provider == "openrouter": + return True + from hermes_cli.auth import PROVIDER_REGISTRY + + pconfig = PROVIDER_REGISTRY.get(provider) + if not pconfig: + return False + return pconfig.auth_type in {"api_key", "oauth_device_code"} + + # Default model lists per provider — used as fallback when the live # /models endpoint can't be reached. _DEFAULT_PROVIDER_MODELS = { @@ -803,772 +831,128 @@ def _prompt_container_resources(config: dict): # ============================================================================= + def setup_model_provider(config: dict): - """Configure the inference provider and default model.""" - from hermes_cli.auth import ( - get_active_provider, - PROVIDER_REGISTRY, - fetch_nous_models, - resolve_nous_runtime_credentials, - _update_config_for_provider, - _login_openai_codex, - resolve_codex_runtime_credentials, - DEFAULT_CODEX_BASE_URL, - detect_external_credentials, - get_auth_status, - resolve_api_key_provider_credentials, - ) + """Configure the inference provider and default model. + + Delegates to ``cmd_model()`` (the same flow used by ``hermes model``) + for provider selection, credential prompting, and model picking. + This ensures a single code path for all provider setup — any new + provider added to ``hermes model`` is automatically available here. + """ + from hermes_cli.config import load_config, save_config print_header("Inference Provider") print_info("Choose how to connect to your main chat model.") print() - existing_or = get_env_value("OPENROUTER_API_KEY") - active_oauth = get_active_provider() - existing_custom = get_env_value("OPENAI_BASE_URL") - copilot_status = get_auth_status("copilot") - copilot_acp_status = get_auth_status("copilot-acp") - - model_cfg = config.get("model") if isinstance(config.get("model"), dict) else {} - current_config_provider = str(model_cfg.get("provider") or "").strip().lower() or None - if current_config_provider == "auto": - current_config_provider = None - current_config_base_url = str(model_cfg.get("base_url") or "").strip() - - # Detect credentials from other CLI tools - detected_creds = detect_external_credentials() - if detected_creds: - print_info("Detected existing credentials:") - for cred in detected_creds: - if cred["provider"] == "openai-codex": - print_success(f' * {cred["label"]} -- select "OpenAI Codex" to use it') - else: - print_info(f" * {cred['label']}") + # Delegate to the shared hermes model flow — handles provider picker, + # credential prompting, model selection, and config persistence. + from hermes_cli.main import select_provider_and_model + try: + select_provider_and_model() + except (SystemExit, KeyboardInterrupt): print() + print_info("Provider setup skipped.") + except Exception as exc: + logger.debug("select_provider_and_model error during setup: %s", exc) + print_warning(f"Provider setup encountered an error: {exc}") + print_info("You can try again later with: hermes model") - # Detect if any provider is already configured - has_any_provider = bool( - current_config_provider - or active_oauth - or existing_custom - or existing_or - or copilot_status.get("logged_in") - or copilot_acp_status.get("logged_in") - ) + # Re-sync the wizard's config dict from what cmd_model saved to disk. + # This is critical: cmd_model writes to disk via its own load/save cycle, + # and the wizard's final save_config(config) must not overwrite those + # changes with stale values (#4172). + _refreshed = load_config() + config["model"] = _refreshed.get("model", config.get("model")) + if _refreshed.get("custom_providers"): + config["custom_providers"] = _refreshed["custom_providers"] - # Build "keep current" label - if current_config_provider == "custom": - custom_label = current_config_base_url or existing_custom - keep_label = ( - f"Keep current (Custom: {custom_label})" - if custom_label - else "Keep current (Custom)" - ) - elif current_config_provider == "openrouter": - keep_label = "Keep current (OpenRouter)" - elif current_config_provider and current_config_provider in PROVIDER_REGISTRY: - keep_label = f"Keep current ({PROVIDER_REGISTRY[current_config_provider].name})" - elif active_oauth and active_oauth in PROVIDER_REGISTRY: - keep_label = f"Keep current ({PROVIDER_REGISTRY[active_oauth].name})" - elif existing_custom: - keep_label = f"Keep current (Custom: {existing_custom})" - elif existing_or: - keep_label = "Keep current (OpenRouter)" - else: - keep_label = None # No provider configured — don't show "Keep current" + # Derive the selected provider for downstream steps (vision setup). + selected_provider = None + _m = config.get("model") + if isinstance(_m, dict): + selected_provider = _m.get("provider") - provider_choices = [ - "OpenRouter API key (100+ models, pay-per-use)", - "Login with Nous Portal (Nous Research subscription — OAuth)", - "Login with OpenAI Codex", - "Custom OpenAI-compatible endpoint (self-hosted / VLLM / etc.)", - "Z.AI / GLM (Zhipu AI models)", - "Kimi / Moonshot (Kimi coding models)", - "MiniMax (global endpoint)", - "MiniMax China (mainland China endpoint)", - "Kilo Code (Kilo Gateway API)", - "Anthropic (Claude models — API key or Claude Code subscription)", - "AI Gateway (Vercel — 200+ models, pay-per-use)", - "Alibaba Cloud / DashScope (Qwen models via Anthropic-compatible API)", - "OpenCode Zen (35+ curated models, pay-as-you-go)", - "OpenCode Go (open models, $10/month subscription)", - "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)", - "GitHub Copilot ACP (spawns `copilot --acp --stdio`)", - "Hugging Face Inference Providers (20+ open models)", - ] - if keep_label: - provider_choices.append(keep_label) - # Default to "Keep current" if a provider exists, otherwise OpenRouter (most common) - default_provider = len(provider_choices) - 1 if has_any_provider else 0 - - if not has_any_provider: - print_warning("An inference provider is required for Hermes to work.") - print() - - provider_idx = prompt_choice( - "Select your inference provider:", provider_choices, default_provider - ) - - # Track which provider was selected for model step - selected_provider = ( - None # "nous", "openai-codex", "openrouter", "custom", or None (keep) - ) - selected_base_url = None # deferred until after model selection - nous_models = [] # populated if Nous login succeeds - - if provider_idx == 0: # OpenRouter - selected_provider = "openrouter" - print() - print_header("OpenRouter API Key") - print_info("OpenRouter provides access to 100+ models from multiple providers.") - print_info("Get your API key at: https://openrouter.ai/keys") - - if existing_or: - print_info(f"Current: {existing_or[:8]}... (configured)") - if prompt_yes_no("Update OpenRouter API key?", False): - api_key = prompt(" OpenRouter API key", password=True) - if api_key: - save_env_value("OPENROUTER_API_KEY", api_key) - print_success("OpenRouter API key updated") - else: - api_key = prompt(" OpenRouter API key", password=True) - if api_key: - save_env_value("OPENROUTER_API_KEY", api_key) - print_success("OpenRouter API key saved") - else: - print_warning("Skipped - agent won't work without an API key") - - # Clear any custom endpoint if switching to OpenRouter - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - - # Update config.yaml and deactivate any OAuth provider so the - # resolver doesn't keep returning the old provider (e.g. Codex). + # ── Same-provider fallback & rotation setup ── + if _supports_same_provider_pool_setup(selected_provider): try: - from hermes_cli.auth import deactivate_provider + from types import SimpleNamespace + from agent.credential_pool import load_pool + from hermes_cli.auth_commands import auth_add_command - deactivate_provider() - except Exception: - pass - import yaml - - config_path = ( - Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes")) / "config.yaml" - ) - try: - disk_cfg = {} - if config_path.exists(): - disk_cfg = yaml.safe_load(config_path.read_text()) or {} - model_section = disk_cfg.get("model", {}) - if isinstance(model_section, str): - model_section = {"default": model_section} - model_section["provider"] = "openrouter" - model_section.pop("base_url", None) # OpenRouter uses default URL - disk_cfg["model"] = model_section - config_path.write_text(yaml.safe_dump(disk_cfg, sort_keys=False)) - _set_model_provider(config, "openrouter") - except Exception as e: - logger.debug("Could not save provider to config.yaml: %s", e) - - elif provider_idx == 1: # Nous Portal (OAuth) - selected_provider = "nous" - print() - print_header("Nous Portal Login") - print_info("This will open your browser to authenticate with Nous Portal.") - print_info("You'll need a Nous Research account with an active subscription.") - print() - - try: - from hermes_cli.auth import _login_nous - import argparse - - mock_args = argparse.Namespace( - portal_url=None, - inference_url=None, - client_id=None, - scope=None, - no_browser=False, - timeout=15.0, - ca_bundle=None, - insecure=False, - ) - pconfig = PROVIDER_REGISTRY["nous"] - _login_nous(mock_args, pconfig) - _sync_model_from_disk(config) - - # Fetch models for the selection step - try: - creds = resolve_nous_runtime_credentials( - min_key_ttl_seconds=5 * 60, - timeout_seconds=15.0, - ) - # Use curated model list instead of full /models dump - from hermes_cli.models import _PROVIDER_MODELS - nous_models = _PROVIDER_MODELS.get("nous", []) - except Exception as e: - logger.debug("Could not fetch Nous models after login: %s", e) - - except SystemExit: - print_warning("Nous Portal login was cancelled or failed.") - print_info("You can try again later with: hermes model") - selected_provider = None - except Exception as e: - print_error(f"Login failed: {e}") - print_info("You can try again later with: hermes model") - selected_provider = None - - elif provider_idx == 2: # OpenAI Codex - selected_provider = "openai-codex" - print() - print_header("OpenAI Codex Login") - print() - - try: - import argparse - - mock_args = argparse.Namespace() - _login_openai_codex(mock_args, PROVIDER_REGISTRY["openai-codex"]) - # Clear custom endpoint vars that would override provider routing. - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _update_config_for_provider("openai-codex", DEFAULT_CODEX_BASE_URL) - _set_model_provider(config, "openai-codex", DEFAULT_CODEX_BASE_URL) - except SystemExit: - print_warning("OpenAI Codex login was cancelled or failed.") - print_info("You can try again later with: hermes model") - selected_provider = None - except Exception as e: - print_error(f"Login failed: {e}") - print_info("You can try again later with: hermes model") - selected_provider = None - - elif provider_idx == 3: # Custom endpoint - selected_provider = "custom" - print() - print_header("Custom OpenAI-Compatible Endpoint") - print_info("Works with any API that follows OpenAI's chat completions spec") - print() - - # Reuse the shared custom endpoint flow from `hermes model`. - # This handles: URL/key/model/context-length prompts, endpoint probing, - # env saving, config.yaml updates, and custom_providers persistence. - from hermes_cli.main import _model_flow_custom - _model_flow_custom(config) - # _model_flow_custom handles model selection, config, env vars, - # and custom_providers. Keep selected_provider = "custom" so - # the model selection step below is skipped (line 1631 check) - # but vision and TTS setup still run. - - elif provider_idx == 4: # Z.AI / GLM - selected_provider = "zai" - print() - print_header("Z.AI / GLM API Key") - pconfig = PROVIDER_REGISTRY["zai"] - print_info(f"Provider: {pconfig.name}") - print_info("Get your API key at: https://open.bigmodel.cn/") - print() - - existing_key = get_env_value("GLM_API_KEY") or get_env_value("ZAI_API_KEY") - api_key = existing_key # will be overwritten if user enters a new one - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - new_key = prompt(" GLM API key", password=True) - if new_key: - api_key = new_key - save_env_value("GLM_API_KEY", api_key) - print_success("GLM API key updated") - else: - api_key = prompt(" GLM API key", password=True) - if api_key: - save_env_value("GLM_API_KEY", api_key) - print_success("GLM API key saved") - else: - print_warning("Skipped - agent won't work without an API key") - - # Detect the correct z.ai endpoint for this key. - # Z.AI has separate billing for general vs coding plans and - # global vs China endpoints — we probe to find the right one. - zai_base_url = pconfig.inference_base_url - if api_key: + pool = load_pool(selected_provider) + entries = pool.entries() + entry_count = len(entries) + manual_count = sum(1 for entry in entries if str(getattr(entry, "source", "")).startswith("manual")) + auto_count = entry_count - manual_count print() - print_info("Detecting your z.ai endpoint...") - from hermes_cli.auth import detect_zai_endpoint - - detected = detect_zai_endpoint(api_key) - if detected: - zai_base_url = detected["base_url"] - print_success(f"Detected: {detected['label']} endpoint") - print_info(f" URL: {detected['base_url']}") - if detected["id"].startswith("coding"): - print_info( - f" Note: Coding Plan endpoint detected (default model: {detected['model']}). " - f"GLM-5 may still be available depending on your plan tier." - ) - save_env_value("GLM_BASE_URL", zai_base_url) - else: - print_warning("Could not verify any z.ai endpoint with this key.") - print_info(f" Using default: {zai_base_url}") + print_header("Same-Provider Fallback & Rotation") + print_info( + "Hermes can keep multiple credentials for one provider and rotate between" + ) + print_info( + "them when a credential is exhausted or rate-limited. This preserves" + ) + print_info( + "your primary provider while reducing interruptions from quota issues." + ) + print() + if auto_count > 0: print_info( - " If you get billing errors, check your plan at https://open.bigmodel.cn/" + f"Current pooled credentials for {selected_provider}: {entry_count} " + f"({manual_count} manual, {auto_count} auto-detected from env/shared auth)" ) - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "zai", zai_base_url) - selected_base_url = zai_base_url - - elif provider_idx == 5: # Kimi / Moonshot - selected_provider = "kimi-coding" - print() - print_header("Kimi / Moonshot API Key") - pconfig = PROVIDER_REGISTRY["kimi-coding"] - print_info(f"Provider: {pconfig.name}") - print_info(f"Base URL: {pconfig.inference_base_url}") - print_info("Get your API key at: https://platform.moonshot.cn/") - print() - - existing_key = get_env_value("KIMI_API_KEY") - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - api_key = prompt(" Kimi API key", password=True) - if api_key: - save_env_value("KIMI_API_KEY", api_key) - print_success("Kimi API key updated") - else: - api_key = prompt(" Kimi API key", password=True) - if api_key: - save_env_value("KIMI_API_KEY", api_key) - print_success("Kimi API key saved") else: - print_warning("Skipped - agent won't work without an API key") + print_info(f"Current pooled credentials for {selected_provider}: {entry_count}") - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "kimi-coding", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url + while prompt_yes_no("Add another credential for same-provider fallback?", False): + auth_add_command( + SimpleNamespace( + provider=selected_provider, + auth_type="", + label=None, + api_key=None, + portal_url=None, + inference_url=None, + client_id=None, + scope=None, + no_browser=False, + timeout=15.0, + insecure=False, + ca_bundle=None, + min_key_ttl_seconds=5 * 60, + ) + ) + pool = load_pool(selected_provider) + entry_count = len(pool.entries()) + print_info(f"Provider pool now has {entry_count} credential(s).") - elif provider_idx == 6: # MiniMax - selected_provider = "minimax" - print() - print_header("MiniMax API Key") - pconfig = PROVIDER_REGISTRY["minimax"] - print_info(f"Provider: {pconfig.name}") - print_info(f"Base URL: {pconfig.inference_base_url}") - print_info("Get your API key at: https://platform.minimaxi.com/") - print() - - existing_key = get_env_value("MINIMAX_API_KEY") - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - api_key = prompt(" MiniMax API key", password=True) - if api_key: - save_env_value("MINIMAX_API_KEY", api_key) - print_success("MiniMax API key updated") - else: - api_key = prompt(" MiniMax API key", password=True) - if api_key: - save_env_value("MINIMAX_API_KEY", api_key) - print_success("MiniMax API key saved") + if entry_count > 1: + strategy_labels = [ + "Fill-first / sticky — keep using the first healthy credential until it is exhausted", + "Round robin — rotate to the next healthy credential after each selection", + "Random — pick a random healthy credential each time", + ] + current_strategy = _get_credential_pool_strategies(config).get(selected_provider, "fill_first") + default_strategy_idx = { + "fill_first": 0, + "round_robin": 1, + "random": 2, + }.get(current_strategy, 0) + strategy_idx = prompt_choice( + "Select same-provider rotation strategy:", + strategy_labels, + default_strategy_idx, + ) + strategy_value = ["fill_first", "round_robin", "random"][strategy_idx] + _set_credential_pool_strategy(config, selected_provider, strategy_value) + print_success(f"Saved {selected_provider} rotation strategy: {strategy_value}") else: - print_warning("Skipped - agent won't work without an API key") - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "minimax", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url - - elif provider_idx == 7: # MiniMax China - selected_provider = "minimax-cn" - print() - print_header("MiniMax China API Key") - pconfig = PROVIDER_REGISTRY["minimax-cn"] - print_info(f"Provider: {pconfig.name}") - print_info(f"Base URL: {pconfig.inference_base_url}") - print_info("Get your API key at: https://platform.minimaxi.com/") - print() - - existing_key = get_env_value("MINIMAX_CN_API_KEY") - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - api_key = prompt(" MiniMax CN API key", password=True) - if api_key: - save_env_value("MINIMAX_CN_API_KEY", api_key) - print_success("MiniMax CN API key updated") - else: - api_key = prompt(" MiniMax CN API key", password=True) - if api_key: - save_env_value("MINIMAX_CN_API_KEY", api_key) - print_success("MiniMax CN API key saved") - else: - print_warning("Skipped - agent won't work without an API key") - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "minimax-cn", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url - - elif provider_idx == 8: # Kilo Code - selected_provider = "kilocode" - print() - print_header("Kilo Code API Key") - pconfig = PROVIDER_REGISTRY["kilocode"] - print_info(f"Provider: {pconfig.name}") - print_info(f"Base URL: {pconfig.inference_base_url}") - print_info("Get your API key at: https://kilo.ai") - print() - - existing_key = get_env_value("KILOCODE_API_KEY") - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - api_key = prompt(" Kilo Code API key", password=True) - if api_key: - save_env_value("KILOCODE_API_KEY", api_key) - print_success("Kilo Code API key updated") - else: - api_key = prompt(" Kilo Code API key", password=True) - if api_key: - save_env_value("KILOCODE_API_KEY", api_key) - print_success("Kilo Code API key saved") - else: - print_warning("Skipped - agent won't work without an API key") - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "kilocode", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url - - elif provider_idx == 9: # Anthropic - selected_provider = "anthropic" - print() - print_header("Anthropic Authentication") - from hermes_cli.auth import PROVIDER_REGISTRY - from hermes_cli.config import save_anthropic_api_key, save_anthropic_oauth_token - pconfig = PROVIDER_REGISTRY["anthropic"] - - # Check ALL credential sources - import os as _os - from agent.anthropic_adapter import ( - read_claude_code_credentials, is_claude_code_token_valid, - run_oauth_setup_token, - ) - cc_creds = read_claude_code_credentials() - cc_valid = bool(cc_creds and is_claude_code_token_valid(cc_creds)) - - existing_key = ( - get_env_value("ANTHROPIC_TOKEN") - or get_env_value("ANTHROPIC_API_KEY") - or _os.getenv("CLAUDE_CODE_OAUTH_TOKEN", "") - ) - - has_creds = bool(existing_key) or cc_valid - needs_auth = not has_creds - - if has_creds: - if existing_key: - print_info(f"Current credentials: {existing_key[:12]}...") - elif cc_valid: - print_success("Found valid Claude Code credentials (auto-detected)") - - auth_choices = [ - "Use existing credentials", - "Reauthenticate (new OAuth login)", - "Cancel", - ] - choice_idx = prompt_choice("What would you like to do?", auth_choices, 0) - if choice_idx == 1: - needs_auth = True - elif choice_idx == 2: - pass # fall through to provider config - - if needs_auth: - auth_choices = [ - "Claude Pro/Max subscription (OAuth login)", - "Anthropic API key (pay-per-token)", - ] - auth_idx = prompt_choice("Choose authentication method:", auth_choices, 0) - - if auth_idx == 0: - # OAuth setup-token flow - try: - print() - print_info("Running 'claude setup-token' — follow the prompts below.") - print_info("A browser window will open for you to authorize access.") - print() - token = run_oauth_setup_token() - if token: - save_anthropic_oauth_token(token, save_fn=save_env_value) - print_success("OAuth credentials saved") - else: - # Subprocess completed but no token auto-detected - print() - token = prompt("Paste setup-token here (if displayed above)", password=True) - if token: - save_anthropic_oauth_token(token, save_fn=save_env_value) - print_success("Setup-token saved") - else: - print_warning("Skipped — agent won't work without credentials") - except FileNotFoundError: - print() - print_info("The 'claude' CLI is required for OAuth login.") - print() - print_info("To install: npm install -g @anthropic-ai/claude-code") - print_info("Then run: claude setup-token") - print_info("Or paste an existing setup-token below:") - print() - token = prompt("Setup-token (sk-ant-oat-...)", password=True) - if token: - save_anthropic_oauth_token(token, save_fn=save_env_value) - print_success("Setup-token saved") - else: - print_warning("Skipped — install Claude Code and re-run setup") - else: - print() - print_info("Get an API key at: https://console.anthropic.com/settings/keys") - print() - api_key = prompt("API key (sk-ant-...)", password=True) - if api_key: - save_anthropic_api_key(api_key, save_fn=save_env_value) - print_success("API key saved") - else: - print_warning("Skipped — agent won't work without credentials") - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - # Don't save base_url for Anthropic — resolve_runtime_provider() - # always hardcodes it. Stale base_urls contaminate other providers. - _set_model_provider(config, "anthropic") - selected_base_url = "" - - elif provider_idx == 10: # AI Gateway - selected_provider = "ai-gateway" - print() - print_header("AI Gateway API Key") - pconfig = PROVIDER_REGISTRY["ai-gateway"] - print_info(f"Provider: {pconfig.name}") - print_info("Get your API key at: https://vercel.com/docs/ai-gateway") - print() - - existing_key = get_env_value("AI_GATEWAY_API_KEY") - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - api_key = prompt(" AI Gateway API key", password=True) - if api_key: - save_env_value("AI_GATEWAY_API_KEY", api_key) - print_success("AI Gateway API key updated") - else: - api_key = prompt(" AI Gateway API key", password=True) - if api_key: - save_env_value("AI_GATEWAY_API_KEY", api_key) - print_success("AI Gateway API key saved") - else: - print_warning("Skipped - agent won't work without an API key") - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _update_config_for_provider("ai-gateway", pconfig.inference_base_url, default_model="anthropic/claude-opus-4.6") - _set_model_provider(config, "ai-gateway", pconfig.inference_base_url) - - elif provider_idx == 11: # Alibaba Cloud / DashScope - selected_provider = "alibaba" - print() - print_header("Alibaba Cloud / DashScope API Key") - pconfig = PROVIDER_REGISTRY["alibaba"] - print_info(f"Provider: {pconfig.name}") - print_info("Get your API key at: https://modelstudio.console.alibabacloud.com/") - print() - - existing_key = get_env_value("DASHSCOPE_API_KEY") - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - new_key = prompt(" DashScope API key", password=True) - if new_key: - save_env_value("DASHSCOPE_API_KEY", new_key) - print_success("DashScope API key updated") - else: - new_key = prompt(" DashScope API key", password=True) - if new_key: - save_env_value("DASHSCOPE_API_KEY", new_key) - print_success("DashScope API key saved") - else: - print_warning("Skipped - agent won't work without an API key") - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _update_config_for_provider("alibaba", pconfig.inference_base_url, default_model="qwen3.5-plus") - _set_model_provider(config, "alibaba", pconfig.inference_base_url) - - elif provider_idx == 12: # OpenCode Zen - selected_provider = "opencode-zen" - print() - print_header("OpenCode Zen API Key") - pconfig = PROVIDER_REGISTRY["opencode-zen"] - print_info(f"Provider: {pconfig.name}") - print_info(f"Base URL: {pconfig.inference_base_url}") - print_info("Get your API key at: https://opencode.ai/auth") - print() - - existing_key = get_env_value("OPENCODE_ZEN_API_KEY") - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - api_key = prompt(" OpenCode Zen API key", password=True) - if api_key: - save_env_value("OPENCODE_ZEN_API_KEY", api_key) - print_success("OpenCode Zen API key updated") - else: - api_key = prompt(" OpenCode Zen API key", password=True) - if api_key: - save_env_value("OPENCODE_ZEN_API_KEY", api_key) - print_success("OpenCode Zen API key saved") - else: - print_warning("Skipped - agent won't work without an API key") - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "opencode-zen", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url - - elif provider_idx == 13: # OpenCode Go - selected_provider = "opencode-go" - print() - print_header("OpenCode Go API Key") - pconfig = PROVIDER_REGISTRY["opencode-go"] - print_info(f"Provider: {pconfig.name}") - print_info(f"Base URL: {pconfig.inference_base_url}") - print_info("Get your API key at: https://opencode.ai/auth") - print() - - existing_key = get_env_value("OPENCODE_GO_API_KEY") - if existing_key: - print_info(f"Current: {existing_key[:8]}... (configured)") - if prompt_yes_no("Update API key?", False): - api_key = prompt(" OpenCode Go API key", password=True) - if api_key: - save_env_value("OPENCODE_GO_API_KEY", api_key) - print_success("OpenCode Go API key updated") - else: - api_key = prompt(" OpenCode Go API key", password=True) - if api_key: - save_env_value("OPENCODE_GO_API_KEY", api_key) - print_success("OpenCode Go API key saved") - else: - print_warning("Skipped - agent won't work without an API key") - - # Clear custom endpoint vars if switching - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "opencode-go", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url - - elif provider_idx == 14: # GitHub Copilot - selected_provider = "copilot" - print() - print_header("GitHub Copilot") - pconfig = PROVIDER_REGISTRY["copilot"] - print_info("Hermes can use GITHUB_TOKEN, GH_TOKEN, or your gh CLI login.") - print_info(f"Base URL: {pconfig.inference_base_url}") - print() - - copilot_creds = resolve_api_key_provider_credentials("copilot") - source = copilot_creds.get("source", "") - token = copilot_creds.get("api_key", "") - if token: - if source in ("GITHUB_TOKEN", "GH_TOKEN"): - print_info(f"Current: {token[:8]}... ({source})") - elif source == "gh auth token": - print_info("Current: authenticated via `gh auth token`") - else: - print_info("Current: GitHub token configured") - else: - api_key = prompt(" GitHub token", password=True) - if api_key: - save_env_value("GITHUB_TOKEN", api_key) - print_success("GitHub token saved") - else: - print_warning("Skipped - agent won't work without a GitHub token or gh auth login") - - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "copilot", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url - - elif provider_idx == 15: # GitHub Copilot ACP - selected_provider = "copilot-acp" - print() - print_header("GitHub Copilot ACP") - pconfig = PROVIDER_REGISTRY["copilot-acp"] - print_info("Hermes will start `copilot --acp --stdio` for each request.") - print_info("Use HERMES_COPILOT_ACP_COMMAND or COPILOT_CLI_PATH to override the command.") - print_info(f"Base marker: {pconfig.inference_base_url}") - print() - - if existing_custom: - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "copilot-acp", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url - - elif provider_idx == 16: # Hugging Face Inference Providers - selected_provider = "huggingface" - print() - print_header("Hugging Face API Token") - pconfig = PROVIDER_REGISTRY["huggingface"] - print_info(f"Provider: {pconfig.name}") - print_info("Get your token at: https://huggingface.co/settings/tokens") - print_info("Required permission: 'Make calls to Inference Providers'") - print() - - api_key = prompt(" HF Token", password=True) - if api_key: - save_env_value("HF_TOKEN", api_key) - # Clear OpenRouter env vars to prevent routing confusion - save_env_value("OPENAI_BASE_URL", "") - save_env_value("OPENAI_API_KEY", "") - _set_model_provider(config, "huggingface", pconfig.inference_base_url) - selected_base_url = pconfig.inference_base_url - - # else: provider_idx == 17 (Keep current) — only shown when a provider already exists - # Normalize "keep current" to an explicit provider so downstream logic - # doesn't fall back to the generic OpenRouter/static-model path. - if selected_provider is None: - if current_config_provider: - selected_provider = current_config_provider - elif active_oauth and active_oauth in PROVIDER_REGISTRY: - selected_provider = active_oauth - elif existing_custom: - selected_provider = "custom" - elif existing_or: - selected_provider = "openrouter" + _set_credential_pool_strategy(config, selected_provider, "fill_first") + except Exception as exc: + logger.debug("Could not configure same-provider fallback in setup: %s", exc) # ── Vision & Image Analysis Setup ── # Keep setup aligned with the actual runtime resolver the vision tools use. @@ -1630,7 +1014,9 @@ def setup_model_provider(config: dict): _oai_key = prompt(_api_key_label, password=True).strip() if _oai_key: save_env_value("OPENAI_API_KEY", _oai_key) - save_env_value("OPENAI_BASE_URL", _base_url) + # Save vision base URL to config (not .env — only secrets go there) + _vaux = config.setdefault("auxiliary", {}).setdefault("vision", {}) + _vaux["base_url"] = _base_url if "api.openai.com" in _base_url.lower(): _oai_vision_models = ["gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano"] _vm_choices = _oai_vision_models + ["Use default (gpt-4o-mini)"] @@ -1652,155 +1038,6 @@ def setup_model_provider(config: dict): else: print_info("Skipped — add later with 'hermes setup' or configure AUXILIARY_VISION_* settings") - # ── Model Selection (adapts based on provider) ── - if selected_provider != "custom": # Custom already prompted for model name - print_header("Default Model") - - _raw_model = config.get("model", "anthropic/claude-opus-4.6") - current_model = ( - _raw_model.get("default", "anthropic/claude-opus-4.6") - if isinstance(_raw_model, dict) - else (_raw_model or "anthropic/claude-opus-4.6") - ) - print_info(f"Current: {current_model}") - - if selected_provider == "nous" and nous_models: - # Dynamic model list from Nous Portal - model_choices = [f"{m}" for m in nous_models] - model_choices.append("Custom model") - model_choices.append(f"Keep current ({current_model})") - - # Post-login validation: warn if current model might not be available - if current_model and current_model not in nous_models: - print_warning( - f"Your current model ({current_model}) may not be available via Nous Portal." - ) - print_info( - "Select a model from the list, or keep current to use it anyway." - ) - print() - - model_idx = prompt_choice( - "Select default model:", model_choices, len(model_choices) - 1 - ) - - if model_idx < len(nous_models): - _set_default_model(config, nous_models[model_idx]) - elif model_idx == len(model_choices) - 2: # Custom - model_name = prompt(" Model name") - if model_name: - _set_default_model(config, model_name) - # else: keep current - - elif selected_provider == "nous": - # Nous login succeeded but model fetch failed — prompt manually - # instead of falling through to the OpenRouter static list. - print_warning("Could not fetch available models from Nous Portal.") - print_info("Enter a Nous model name manually (e.g., claude-opus-4-6).") - custom = prompt(f" Model name (Enter to keep '{current_model}')") - if custom: - _set_default_model(config, custom) - elif selected_provider == "openai-codex": - from hermes_cli.codex_models import get_codex_model_ids - - codex_token = None - try: - codex_creds = resolve_codex_runtime_credentials() - codex_token = codex_creds.get("api_key") - except Exception as exc: - logger.debug("Could not resolve Codex runtime credentials for model list: %s", exc) - - codex_models = get_codex_model_ids(access_token=codex_token) - - model_choices = codex_models + [f"Keep current ({current_model})"] - default_codex = 0 - if current_model in codex_models: - default_codex = codex_models.index(current_model) - elif current_model: - default_codex = len(model_choices) - 1 - - model_idx = prompt_choice( - "Select default model:", model_choices, default_codex - ) - if model_idx < len(codex_models): - _set_default_model(config, codex_models[model_idx]) - elif model_idx == len(codex_models): - custom = prompt("Enter model name") - if custom: - _set_default_model(config, custom) - _update_config_for_provider("openai-codex", DEFAULT_CODEX_BASE_URL) - _set_model_provider(config, "openai-codex", DEFAULT_CODEX_BASE_URL) - elif selected_provider == "copilot-acp": - _setup_provider_model_selection( - config, selected_provider, current_model, - prompt_choice, prompt, - ) - model_cfg = _model_config_dict(config) - model_cfg["api_mode"] = "chat_completions" - config["model"] = model_cfg - elif selected_provider in ("copilot", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "ai-gateway", "opencode-zen", "opencode-go", "alibaba"): - _setup_provider_model_selection( - config, selected_provider, current_model, - prompt_choice, prompt, - ) - elif selected_provider == "anthropic": - # Try live model list first, fall back to static - from hermes_cli.models import provider_model_ids - live_models = provider_model_ids("anthropic") - anthropic_models = live_models if live_models else [ - "claude-opus-4-6", - "claude-sonnet-4-6", - "claude-haiku-4-5-20251001", - ] - model_choices = list(anthropic_models) - model_choices.append("Custom model") - model_choices.append(f"Keep current ({current_model})") - - keep_idx = len(model_choices) - 1 - model_idx = prompt_choice("Select default model:", model_choices, keep_idx) - - if model_idx < len(anthropic_models): - _set_default_model(config, anthropic_models[model_idx]) - elif model_idx == len(anthropic_models): - custom = prompt("Enter model name (e.g., claude-sonnet-4-20250514)") - if custom: - _set_default_model(config, custom) - # else: keep current - else: - # Static list for OpenRouter / fallback (from canonical list) - from hermes_cli.models import model_ids, menu_labels - - ids = model_ids() - model_choices = menu_labels() + [ - "Custom model", - f"Keep current ({current_model})", - ] - - keep_idx = len(model_choices) - 1 - model_idx = prompt_choice("Select default model:", model_choices, keep_idx) - - if model_idx < len(ids): - _set_default_model(config, ids[model_idx]) - elif model_idx == len(ids): # Custom - custom = prompt("Enter model name (e.g., anthropic/claude-opus-4.6)") - if custom: - _set_default_model(config, custom) - # else: Keep current - - _final_model = config.get("model", "") - if _final_model: - _display = ( - _final_model.get("default", _final_model) - if isinstance(_final_model, dict) - else _final_model - ) - print_success(f"Model set to: {_display}") - - # Write provider+base_url to config.yaml only after model selection is complete. - # This prevents a race condition where the gateway picks up a new provider - # before the model name has been updated to match. - if selected_provider in ("copilot-acp", "copilot", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic") and selected_base_url is not None: - _update_config_for_provider(selected_provider, selected_base_url) save_config(config) @@ -3121,8 +2358,7 @@ def _skip_configured_section( _OPENCLAW_SCRIPT = ( - PROJECT_ROOT - / "optional-skills" + get_optional_skills_dir(PROJECT_ROOT / "optional-skills") / "migration" / "openclaw-migration" / "scripts" diff --git a/hermes_cli/tools_config.py b/hermes_cli/tools_config.py index 63e26d362..8b443d5dc 100644 --- a/hermes_cli/tools_config.py +++ b/hermes_cli/tools_config.py @@ -597,7 +597,9 @@ def _toolset_has_keys(ts_key: str) -> bool: if cat: for provider in cat.get("providers", []): env_vars = provider.get("env_vars", []) - if env_vars and all(get_env_value(e["key"]) for e in env_vars): + if not env_vars: + return True # No-key provider (e.g. Local Browser, Edge TTS) + if all(get_env_value(e["key"]) for e in env_vars): return True return False @@ -981,8 +983,13 @@ def _configure_simple_requirements(ts_key: str): key_label = " OPENAI_API_KEY" if "api.openai.com" in base_url.lower() else " API key" api_key = _prompt(key_label, password=True) if api_key and api_key.strip(): - save_env_value("OPENAI_BASE_URL", base_url) save_env_value("OPENAI_API_KEY", api_key.strip()) + # Save vision base URL to config (not .env — only secrets go there) + from hermes_cli.config import load_config, save_config + _cfg = load_config() + _aux = _cfg.setdefault("auxiliary", {}).setdefault("vision", {}) + _aux["base_url"] = base_url + save_config(_cfg) if "api.openai.com" in base_url.lower(): save_env_value("AUXILIARY_VISION_MODEL", "gpt-4o-mini") _print_success(" Saved") diff --git a/hermes_constants.py b/hermes_constants.py index 2bfc0a8c7..c28f6dc8f 100644 --- a/hermes_constants.py +++ b/hermes_constants.py @@ -17,6 +17,20 @@ def get_hermes_home() -> Path: return Path(os.getenv("HERMES_HOME", Path.home() / ".hermes")) +def get_optional_skills_dir(default: Path | None = None) -> Path: + """Return the optional-skills directory, honoring package-manager wrappers. + + Packaged installs may ship ``optional-skills`` outside the Python package + tree and expose it via ``HERMES_OPTIONAL_SKILLS``. + """ + override = os.getenv("HERMES_OPTIONAL_SKILLS", "").strip() + if override: + return Path(override) + if default is not None: + return default + return get_hermes_home() / "optional-skills" + + def get_hermes_dir(new_subpath: str, old_name: str) -> Path: """Resolve a Hermes subdirectory with backward compatibility. diff --git a/honcho_integration/cli.py b/honcho_integration/cli.py index ae09c3713..f6cbcedf6 100644 --- a/honcho_integration/cli.py +++ b/honcho_integration/cli.py @@ -10,16 +10,27 @@ import os import sys from pathlib import Path +from hermes_constants import get_hermes_home from honcho_integration.client import resolve_config_path, GLOBAL_CONFIG_PATH HOST = "hermes" def _config_path() -> Path: - """Return the active Honcho config path (instance-local or global).""" + """Return the active Honcho config path for reading (instance-local or global).""" return resolve_config_path() +def _local_config_path() -> Path: + """Return the instance-local Honcho config path for writing. + + Always returns $HERMES_HOME/honcho.json so each profile/instance gets + its own config file. The global ~/.honcho/config.json is only used as + a read fallback (via resolve_config_path) for cross-app interop. + """ + return get_hermes_home() / "honcho.json" + + def _read_config() -> dict: path = _config_path() if path.exists(): @@ -31,7 +42,7 @@ def _read_config() -> dict: def _write_config(cfg: dict, path: Path | None = None) -> None: - path = path or _config_path() + path = path or _local_config_path() path.parent.mkdir(parents=True, exist_ok=True) path.write_text( json.dumps(cfg, indent=2, ensure_ascii=False) + "\n", @@ -95,13 +106,13 @@ def cmd_setup(args) -> None: """Interactive Honcho setup wizard.""" cfg = _read_config() - active_path = _config_path() + write_path = _local_config_path() + read_path = _config_path() print("\nHoncho memory setup\n" + "─" * 40) print(" Honcho gives Hermes persistent cross-session memory.") - if active_path != GLOBAL_CONFIG_PATH: - print(f" Instance config: {active_path}") - else: - print(" Config is shared with other hosts at ~/.honcho/config.json") + print(f" Config: {write_path}") + if read_path != write_path and read_path.exists(): + print(f" (seeding from existing config at {read_path})") print() if not _ensure_sdk_installed(): @@ -189,7 +200,7 @@ def cmd_setup(args) -> None: hermes_host.setdefault("saveMessages", True) _write_config(cfg) - print(f"\n Config written to {active_path}") + print(f"\n Config written to {write_path}") # Test connection print(" Testing connection... ", end="", flush=True) @@ -237,6 +248,7 @@ def cmd_status(args) -> None: cfg = _read_config() active_path = _config_path() + write_path = _local_config_path() if not cfg: print(f" No Honcho config found at {active_path}") @@ -259,6 +271,8 @@ def cmd_status(args) -> None: print(f" Workspace: {hcfg.workspace_id}") print(f" Host: {hcfg.host}") print(f" Config path: {active_path}") + if write_path != active_path: + print(f" Write path: {write_path} (instance-local)") print(f" AI peer: {hcfg.ai_peer}") print(f" User peer: {hcfg.peer_name or 'not set'}") print(f" Session key: {hcfg.resolve_session_name()}") diff --git a/model_tools.py b/model_tools.py index c651d93ed..15b8852bc 100644 --- a/model_tools.py +++ b/model_tools.py @@ -252,7 +252,7 @@ def get_tool_definitions( # Determine which tool names the caller wants tools_to_include: set = set() - if enabled_toolsets: + if enabled_toolsets is not None: for toolset_name in enabled_toolsets: if validate_toolset(toolset_name): resolved = resolve_toolset(toolset_name) diff --git a/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py b/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py index ac99e2a6f..74e9d7dac 100644 --- a/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py +++ b/optional-skills/migration/openclaw-migration/scripts/openclaw_to_hermes.py @@ -2455,9 +2455,24 @@ class Migrator: notes.append("") notes.extend([ + "## IMPORTANT: Archive the OpenClaw Directory", + "", + "After migration, your OpenClaw directory still exists on disk with workspace", + "state files (todo.json, sessions, logs). If the Hermes agent discovers these", + "directories, it may read/write to them instead of the Hermes state, causing", + "confusion (e.g., cron jobs reading a different todo list than interactive sessions).", + "", + "**Strongly recommended:** Run `hermes claw cleanup` to rename the OpenClaw", + "directory to `.openclaw.pre-migration`. This prevents the agent from finding it.", + "The directory is renamed, not deleted — you can undo this at any time.", + "", + "If you skip this step and notice the agent getting confused about workspaces", + "or todo lists, run `hermes claw cleanup` to fix it.", + "", "## Hermes-Specific Setup", "", "After migration, you may want to:", + "- Run `hermes claw cleanup` to archive the OpenClaw directory (prevents state confusion)", "- Run `hermes setup` to configure any remaining settings", "- Run `hermes mcp list` to verify MCP servers were imported correctly", "- Run `hermes cron` to recreate scheduled tasks (see archive/cron-config.json)", diff --git a/packaging/homebrew/README.md b/packaging/homebrew/README.md new file mode 100644 index 000000000..e53d3fd0b --- /dev/null +++ b/packaging/homebrew/README.md @@ -0,0 +1,14 @@ +Homebrew packaging notes for Hermes Agent. + +Use `packaging/homebrew/hermes-agent.rb` as a tap or `homebrew-core` starting point. + +Key choices: +- Stable builds should target the semver-named sdist asset attached to each GitHub release, not the CalVer tag tarball. +- `faster-whisper` now lives in the `voice` extra, which keeps wheel-only transitive dependencies out of the base Homebrew formula. +- The wrapper exports `HERMES_BUNDLED_SKILLS`, `HERMES_OPTIONAL_SKILLS`, and `HERMES_MANAGED=homebrew` so packaged installs keep runtime assets and defer upgrades to Homebrew. + +Typical update flow: +1. Bump the formula `url`, `version`, and `sha256`. +2. Refresh Python resources with `brew update-python-resources --print-only hermes-agent`. +3. Keep `ignore_packages: %w[certifi cryptography pydantic]`. +4. Verify `brew audit --new --strict hermes-agent` and `brew test hermes-agent`. diff --git a/packaging/homebrew/hermes-agent.rb b/packaging/homebrew/hermes-agent.rb new file mode 100644 index 000000000..7c00fc6ac --- /dev/null +++ b/packaging/homebrew/hermes-agent.rb @@ -0,0 +1,48 @@ +class HermesAgent < Formula + include Language::Python::Virtualenv + + desc "Self-improving AI agent that creates skills from experience" + homepage "https://hermes-agent.nousresearch.com" + # Stable source should point at the semver-named sdist asset attached by + # scripts/release.py, not the CalVer tag tarball. + url "https://github.com/NousResearch/hermes-agent/releases/download/v2026.3.30/hermes_agent-0.6.0.tar.gz" + sha256 "" + license "MIT" + + depends_on "certifi" => :no_linkage + depends_on "cryptography" => :no_linkage + depends_on "libyaml" + depends_on "python@3.14" + + pypi_packages ignore_packages: %w[certifi cryptography pydantic] + + # Refresh resource stanzas after bumping the source url/version: + # brew update-python-resources --print-only hermes-agent + + def install + venv = virtualenv_create(libexec, "python3.14") + venv.pip_install resources + venv.pip_install buildpath + + pkgshare.install "skills", "optional-skills" + + %w[hermes hermes-agent hermes-acp].each do |exe| + next unless (libexec/"bin"/exe).exist? + + (bin/exe).write_env_script( + libexec/"bin"/exe, + HERMES_BUNDLED_SKILLS: pkgshare/"skills", + HERMES_OPTIONAL_SKILLS: pkgshare/"optional-skills", + HERMES_MANAGED: "homebrew" + ) + end + end + + test do + assert_match "Hermes Agent v#{version}", shell_output("#{bin}/hermes version") + + managed = shell_output("#{bin}/hermes update 2>&1") + assert_match "managed by Homebrew", managed + assert_match "brew upgrade hermes-agent", managed + end +end diff --git a/pyproject.toml b/pyproject.toml index c3154d1ae..3cf339845 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,6 @@ dependencies = [ "fal-client>=0.13.1,<1", # Text-to-speech (Edge TTS is free, no API key needed) "edge-tts>=7.2.7,<8", - "faster-whisper>=1.0.0,<2", # Skills Hub (GitHub App JWT auth — optional, only needed for bot identity) "PyJWT[crypto]>=2.12.0,<3", # CVE-2026-32597 ] @@ -47,7 +46,13 @@ slack = ["slack-bolt>=1.18.0,<2", "slack-sdk>=3.27.0,<4"] matrix = ["matrix-nio[e2e]>=0.24.0,<1"] cli = ["simple-term-menu>=1.0,<2"] tts-premium = ["elevenlabs>=1.0,<2"] -voice = ["sounddevice>=0.4.6,<1", "numpy>=1.24.0,<3"] +voice = [ + # Local STT pulls in wheel-only transitive deps (ctranslate2, onnxruntime), + # so keep it out of the base install for source-build packagers like Homebrew. + "faster-whisper>=1.0.0,<2", + "sounddevice>=0.4.6,<1", + "numpy>=1.24.0,<3", +] pty = [ "ptyprocess>=0.7.0,<1; sys_platform != 'win32'", "pywinpty>=2.0.0,<3; sys_platform == 'win32'", diff --git a/run_agent.py b/run_agent.py index 794c9f67a..13278d94c 100644 --- a/run_agent.py +++ b/run_agent.py @@ -505,9 +505,11 @@ class AIAgent: honcho_config=None, iteration_budget: "IterationBudget" = None, fallback_model: Dict[str, Any] = None, + credential_pool=None, checkpoints_enabled: bool = False, checkpoint_max_snapshots: int = 50, pass_session_id: bool = False, + persist_session: bool = True, ): """ Initialize the AI Agent. @@ -573,6 +575,8 @@ class AIAgent: self.background_review_callback = None # Optional sync callback for gateway delivery self.skip_context_files = skip_context_files self.pass_session_id = pass_session_id + self.persist_session = persist_session + self._credential_pool = credential_pool self.log_prefix_chars = log_prefix_chars self.log_prefix = f"{log_prefix} " if log_prefix else "" # Store effective base URL for feature detection (prompt caching, reasoning, etc.) @@ -1700,7 +1704,10 @@ class AIAgent: """Save session state to both JSON log and SQLite on any exit path. Ensures conversations are never lost, even on errors or early returns. + Skipped when ``persist_session=False`` (ephemeral helper flows). """ + if not self.persist_session: + return self._apply_persist_user_message_override(messages) self._session_messages = messages self._save_session_log(messages) @@ -3770,6 +3777,93 @@ class AIAgent: self._is_anthropic_oauth = _is_oauth_token(new_token) return True + def _apply_client_headers_for_base_url(self, base_url: str) -> None: + from agent.auxiliary_client import _OR_HEADERS + + normalized = (base_url or "").lower() + if "openrouter" in normalized: + self._client_kwargs["default_headers"] = dict(_OR_HEADERS) + elif "api.githubcopilot.com" in normalized: + from hermes_cli.models import copilot_default_headers + + self._client_kwargs["default_headers"] = copilot_default_headers() + elif "api.kimi.com" in normalized: + self._client_kwargs["default_headers"] = {"User-Agent": "KimiCLI/1.3"} + else: + self._client_kwargs.pop("default_headers", None) + + def _swap_credential(self, entry) -> None: + runtime_key = getattr(entry, "runtime_api_key", None) or getattr(entry, "access_token", "") + runtime_base = getattr(entry, "runtime_base_url", None) or getattr(entry, "base_url", None) or self.base_url + + if self.api_mode == "anthropic_messages": + from agent.anthropic_adapter import build_anthropic_client, _is_oauth_token + + try: + self._anthropic_client.close() + except Exception: + pass + + self._anthropic_api_key = runtime_key + self._anthropic_base_url = runtime_base + self._anthropic_client = build_anthropic_client(runtime_key, runtime_base) + self._is_anthropic_oauth = _is_oauth_token(runtime_key) if self.provider == "anthropic" else False + self.api_key = runtime_key + self.base_url = runtime_base + return + + self.api_key = runtime_key + self.base_url = runtime_base.rstrip("/") if isinstance(runtime_base, str) else runtime_base + self._client_kwargs["api_key"] = self.api_key + self._client_kwargs["base_url"] = self.base_url + self._apply_client_headers_for_base_url(self.base_url) + self._replace_primary_openai_client(reason="credential_rotation") + + def _recover_with_credential_pool( + self, + *, + status_code: Optional[int], + has_retried_429: bool, + ) -> tuple[bool, bool]: + """Attempt credential recovery via pool rotation. + + Returns (recovered, has_retried_429). + On 429: first occurrence retries same credential (sets flag True). + second consecutive 429 rotates to next credential (resets flag). + On 402: immediately rotates (billing exhaustion won't resolve with retry). + On 401: attempts token refresh before rotating. + """ + pool = self._credential_pool + if pool is None or status_code is None: + return False, has_retried_429 + + if status_code == 402: + next_entry = pool.mark_exhausted_and_rotate(status_code=402) + if next_entry is not None: + logger.info(f"Credential 402 (billing) — rotated to pool entry {getattr(next_entry, 'id', '?')}") + self._swap_credential(next_entry) + return True, False + return False, has_retried_429 + + if status_code == 429: + if not has_retried_429: + return False, True + next_entry = pool.mark_exhausted_and_rotate(status_code=429) + if next_entry is not None: + logger.info(f"Credential 429 (rate limit) — rotated to pool entry {getattr(next_entry, 'id', '?')}") + self._swap_credential(next_entry) + return True, False + return False, True + + if status_code == 401: + refreshed = pool.try_refresh_current() + if refreshed is not None: + logger.info(f"Credential 401 — refreshed pool entry {getattr(refreshed, 'id', '?')}") + self._swap_credential(refreshed) + return True, has_retried_429 + + return False, has_retried_429 + def _anthropic_messages_create(self, api_kwargs: dict): if self.api_mode == "anthropic_messages": self._try_refresh_anthropic_client_credentials() @@ -6250,6 +6344,12 @@ class AIAgent: ) if len(messages) >= _orig_len: break # Cannot compress further + # Compression created a new session — clear the history + # reference so _flush_messages_to_session_db writes ALL + # compressed messages to the new session's SQLite, not + # skipping them because conversation_history is still the + # pre-compression length. + conversation_history = None # Re-estimate after compression _preflight_tokens = estimate_request_tokens_rough( messages, @@ -6449,6 +6549,7 @@ class AIAgent: codex_auth_retry_attempted = False anthropic_auth_retry_attempted = False nous_auth_retry_attempted = False + has_retried_429 = False restart_with_compressed_messages = False restart_with_length_continuation = False @@ -6884,6 +6985,7 @@ class AIAgent: if not self.quiet_mode: self._vprint(f"{self.log_prefix} 💾 Cache: {cached:,}/{prompt:,} tokens ({hit_pct:.0f}% hit, {written:,} written)") + has_retried_429 = False # Reset on success break # Success, exit retry loop except InterruptedError: @@ -6926,6 +7028,12 @@ class AIAgent: # prompt or prefill. Fall through to normal error path. status_code = getattr(api_error, "status_code", None) + recovered_with_pool, has_retried_429 = self._recover_with_credential_pool( + status_code=status_code, + has_retried_429=has_retried_429, + ) + if recovered_with_pool: + continue if ( self.api_mode == "codex_responses" and self.provider == "openai-codex" @@ -7050,6 +7158,7 @@ class AIAgent: compression_attempts += 1 if compression_attempts > max_compression_attempts: self._vprint(f"{self.log_prefix}❌ Max compression attempts ({max_compression_attempts}) reached for payload-too-large error.", force=True) + self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True) logging.error(f"{self.log_prefix}413 compression failed after {max_compression_attempts} attempts.") self._persist_session(messages, conversation_history) return { @@ -7074,6 +7183,7 @@ class AIAgent: break else: self._vprint(f"{self.log_prefix}❌ Payload too large and cannot compress further.", force=True) + self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True) logging.error(f"{self.log_prefix}413 payload too large. Cannot compress further.") self._persist_session(messages, conversation_history) return { @@ -7150,6 +7260,7 @@ class AIAgent: compression_attempts += 1 if compression_attempts > max_compression_attempts: self._vprint(f"{self.log_prefix}❌ Max compression attempts ({max_compression_attempts}) reached.", force=True) + self._vprint(f"{self.log_prefix} 💡 Try /new to start a fresh conversation, or /compress to retry compression.", force=True) logging.error(f"{self.log_prefix}Context compression failed after {max_compression_attempts} attempts.") self._persist_session(messages, conversation_history) return { @@ -7176,7 +7287,7 @@ class AIAgent: else: # Can't compress further and already at minimum tier self._vprint(f"{self.log_prefix}❌ Context length exceeded and cannot compress further.", force=True) - self._vprint(f"{self.log_prefix} 💡 The conversation has accumulated too much content.", force=True) + self._vprint(f"{self.log_prefix} 💡 The conversation has accumulated too much content. Try /new to start fresh, or /compress to manually trigger compression.", force=True) logging.error(f"{self.log_prefix}Context length exceeded: {approx_tokens:,} tokens. Cannot compress further.") self._persist_session(messages, conversation_history) return { @@ -7765,6 +7876,10 @@ class AIAgent: approx_tokens=self.context_compressor.last_prompt_tokens, task_id=effective_task_id, ) + # Compression created a new session — clear history so + # _flush_messages_to_session_db writes compressed messages + # to the new session (see preflight compression comment). + conversation_history = None # Save session log incrementally (so progress is visible even if interrupted) self._session_messages = messages diff --git a/scripts/release.py b/scripts/release.py index cafb30321..cfe360064 100755 --- a/scripts/release.py +++ b/scripts/release.py @@ -24,6 +24,7 @@ import argparse import json import os import re +import shutil import subprocess import sys from collections import defaultdict @@ -128,6 +129,16 @@ def git(*args, cwd=None): return result.stdout.strip() +def git_result(*args, cwd=None): + """Run a git command and return the full CompletedProcess.""" + return subprocess.run( + ["git"] + list(args), + capture_output=True, + text=True, + cwd=cwd or str(REPO_ROOT), + ) + + def get_last_tag(): """Get the most recent CalVer tag.""" tags = git("tag", "--list", "v20*", "--sort=-v:refname") @@ -136,6 +147,18 @@ def get_last_tag(): return None +def next_available_tag(base_tag: str) -> tuple[str, str]: + """Return a tag/calver pair, suffixing same-day releases when needed.""" + if not git("tag", "--list", base_tag): + return base_tag, base_tag.removeprefix("v") + + suffix = 2 + while git("tag", "--list", f"{base_tag}.{suffix}"): + suffix += 1 + tag_name = f"{base_tag}.{suffix}" + return tag_name, tag_name.removeprefix("v") + + def get_current_version(): """Read current semver from __init__.py.""" content = VERSION_FILE.read_text() @@ -192,6 +215,41 @@ def update_version_files(semver: str, calver_date: str): PYPROJECT_FILE.write_text(pyproject) +def build_release_artifacts(semver: str) -> list[Path]: + """Build sdist/wheel artifacts for the current release. + + Returns the artifact paths when the local environment has ``python -m build`` + available. If build tooling is missing or the build fails, returns an empty + list and lets the release proceed without attached Python artifacts. + """ + dist_dir = REPO_ROOT / "dist" + shutil.rmtree(dist_dir, ignore_errors=True) + + result = subprocess.run( + [sys.executable, "-m", "build", "--sdist", "--wheel"], + cwd=str(REPO_ROOT), + capture_output=True, + text=True, + ) + if result.returncode != 0: + print(" ⚠ Could not build Python release artifacts.") + stderr = result.stderr.strip() + stdout = result.stdout.strip() + if stderr: + print(f" {stderr.splitlines()[-1]}") + elif stdout: + print(f" {stdout.splitlines()[-1]}") + print(" Install the 'build' package to attach semver-named sdist/wheel assets.") + return [] + + artifacts = sorted(p for p in dist_dir.iterdir() if p.is_file()) + matching = [p for p in artifacts if semver in p.name] + if not matching: + print(" ⚠ Built artifacts did not match the expected release version.") + return [] + return matching + + def resolve_author(name: str, email: str) -> str: """Resolve a git author to a GitHub @mention.""" # Try email lookup first @@ -424,18 +482,10 @@ def main(): now = datetime.now() calver_date = f"{now.year}.{now.month}.{now.day}" - tag_name = f"v{calver_date}" - - # Check for existing tag with same date - existing = git("tag", "--list", tag_name) - if existing and not args.publish: - # Append a suffix for same-day releases - suffix = 2 - while git("tag", "--list", f"{tag_name}.{suffix}"): - suffix += 1 - tag_name = f"{tag_name}.{suffix}" - calver_date = f"{calver_date}.{suffix}" - print(f"Note: Tag {tag_name[:-2]} already exists, using {tag_name}") + base_tag = f"v{calver_date}" + tag_name, calver_date = next_available_tag(base_tag) + if tag_name != base_tag: + print(f"Note: Tag {base_tag} already exists, using {tag_name}") # Determine semver current_version = get_current_version() @@ -494,41 +544,83 @@ def main(): print(f" ✓ Updated version files to v{new_version} ({calver_date})") # Commit version bump - git("add", str(VERSION_FILE), str(PYPROJECT_FILE)) - git("commit", "-m", f"chore: bump version to v{new_version} ({calver_date})") + add_result = git_result("add", str(VERSION_FILE), str(PYPROJECT_FILE)) + if add_result.returncode != 0: + print(f" ✗ Failed to stage version files: {add_result.stderr.strip()}") + return + + commit_result = git_result( + "commit", "-m", f"chore: bump version to v{new_version} ({calver_date})" + ) + if commit_result.returncode != 0: + print(f" ✗ Failed to commit version bump: {commit_result.stderr.strip()}") + return print(f" ✓ Committed version bump") # Create annotated tag - git("tag", "-a", tag_name, "-m", - f"Hermes Agent v{new_version} ({calver_date})\n\nWeekly release") + tag_result = git_result( + "tag", "-a", tag_name, "-m", + f"Hermes Agent v{new_version} ({calver_date})\n\nWeekly release" + ) + if tag_result.returncode != 0: + print(f" ✗ Failed to create tag {tag_name}: {tag_result.stderr.strip()}") + return print(f" ✓ Created tag {tag_name}") # Push - push_result = git("push", "origin", "HEAD", "--tags") - print(f" ✓ Pushed to origin") + push_result = git_result("push", "origin", "HEAD", "--tags") + if push_result.returncode == 0: + print(f" ✓ Pushed to origin") + else: + print(f" ✗ Failed to push to origin: {push_result.stderr.strip()}") + print(" Continue manually after fixing access:") + print(" git push origin HEAD --tags") + + # Build semver-named Python artifacts so downstream packagers + # (e.g. Homebrew) can target them without relying on CalVer tag names. + artifacts = build_release_artifacts(new_version) + if artifacts: + print(" ✓ Built release artifacts:") + for artifact in artifacts: + print(f" - {artifact.relative_to(REPO_ROOT)}") # Create GitHub release changelog_file = REPO_ROOT / ".release_notes.md" changelog_file.write_text(changelog) - result = subprocess.run( - ["gh", "release", "create", tag_name, - "--title", f"Hermes Agent v{new_version} ({calver_date})", - "--notes-file", str(changelog_file)], - capture_output=True, text=True, - cwd=str(REPO_ROOT), - ) + gh_cmd = [ + "gh", "release", "create", tag_name, + "--title", f"Hermes Agent v{new_version} ({calver_date})", + "--notes-file", str(changelog_file), + ] + gh_cmd.extend(str(path) for path in artifacts) - changelog_file.unlink(missing_ok=True) - - if result.returncode == 0: - print(f" ✓ GitHub release created: {result.stdout.strip()}") + gh_bin = shutil.which("gh") + if gh_bin: + result = subprocess.run( + gh_cmd, + capture_output=True, text=True, + cwd=str(REPO_ROOT), + ) else: - print(f" ✗ GitHub release failed: {result.stderr}") - print(f" Tag was created. Create the release manually:") - print(f" gh release create {tag_name} --title 'Hermes Agent v{new_version} ({calver_date})'") + result = None - print(f"\n 🎉 Release v{new_version} ({tag_name}) published!") + if result and result.returncode == 0: + changelog_file.unlink(missing_ok=True) + print(f" ✓ GitHub release created: {result.stdout.strip()}") + print(f"\n 🎉 Release v{new_version} ({tag_name}) published!") + else: + if result is None: + print(" ✗ GitHub release skipped: `gh` CLI not found.") + else: + print(f" ✗ GitHub release failed: {result.stderr.strip()}") + print(f" Release notes kept at: {changelog_file}") + print(f" Tag was created locally. Create the release manually:") + print( + f" gh release create {tag_name} --title 'Hermes Agent v{new_version} ({calver_date})' " + f"--notes-file .release_notes.md {' '.join(str(path) for path in artifacts)}" + ) + print(f"\n ✓ Release artifacts prepared for manual publish: v{new_version} ({tag_name})") else: print(f"\n{'='*60}") print(f" Dry run complete. To publish, add --publish") diff --git a/skills/creative/ascii-video/references/composition.md b/skills/creative/ascii-video/references/composition.md index 0028b93fa..f7e6eff89 100644 --- a/skills/creative/ascii-video/references/composition.md +++ b/skills/creative/ascii-video/references/composition.md @@ -744,3 +744,149 @@ class PixelBlendStack: result = blend_canvas(result, canvas, mode, opacity) return result ``` + +## Text Backdrop (Readability Mask) + +When placing readable text over busy multi-grid ASCII backgrounds, the text will blend into the background and become illegible. **Always apply a dark backdrop behind text regions.** + +The technique: compute the bounding box of all text glyphs, create a gaussian-blurred dark mask covering that area with padding, and multiply the background by `(1 - mask * darkness)` before rendering text on top. + +```python +from scipy.ndimage import gaussian_filter + +def apply_text_backdrop(canvas, glyphs, padding=80, darkness=0.75): + """Darken the background behind text for readability. + + Call AFTER rendering background, BEFORE rendering text. + + Args: + canvas: (VH, VW, 3) uint8 background + glyphs: list of {"x": float, "y": float, ...} glyph positions + padding: pixel padding around text bounding box + darkness: 0.0 = no darkening, 1.0 = fully black + Returns: + darkened canvas (uint8) + """ + if not glyphs: + return canvas + xs = [g['x'] for g in glyphs] + ys = [g['y'] for g in glyphs] + x0 = max(0, int(min(xs)) - padding) + y0 = max(0, int(min(ys)) - padding) + x1 = min(VW, int(max(xs)) + padding + 50) # extra for char width + y1 = min(VH, int(max(ys)) + padding + 60) # extra for char height + + # Soft dark mask with gaussian blur for feathered edges + mask = np.zeros((VH, VW), dtype=np.float32) + mask[y0:y1, x0:x1] = 1.0 + mask = gaussian_filter(mask, sigma=padding * 0.6) + + factor = 1.0 - mask * darkness + return (canvas.astype(np.float32) * factor[:, :, np.newaxis]).astype(np.uint8) +``` + +### Usage in render pipeline + +Insert between background rendering and text rendering: + +```python +# 1. Render background (multi-grid ASCII effects) +bg = render_background(cfg, t) + +# 2. Darken behind text region +bg = apply_text_backdrop(bg, frame_glyphs, padding=80, darkness=0.75) + +# 3. Render text on top (now readable against dark backdrop) +bg = text_renderer.render(bg, frame_glyphs, color=(255, 255, 255)) +``` + +Combine with **reverse vignette** (see shaders.md) for scenes where text is always centered — the reverse vignette provides a persistent center-dark zone, while the backdrop handles per-frame glyph positions. + +## External Layout Oracle Pattern + +For text-heavy videos where text needs to dynamically reflow around obstacles (shapes, icons, other text), use an external layout engine to pre-compute glyph positions and feed them into the Python renderer via JSON. + +### Architecture + +``` +Layout Engine (browser/Node.js) → layouts.json → Python ASCII Renderer + ↑ ↑ + Computes per-frame Reads glyph positions, + glyph (x,y) positions renders as ASCII chars + with obstacle-aware reflow with full effect pipeline +``` + +### JSON interchange format + +```json +{ + "meta": { + "canvas_width": 1080, "canvas_height": 1080, + "fps": 24, "total_frames": 1248, + "fonts": { + "body": {"charW": 12.04, "charH": 24, "fontSize": 20}, + "hero": {"charW": 24.08, "charH": 48, "fontSize": 40} + } + }, + "scenes": [ + { + "id": "scene_name", + "start_frame": 0, "end_frame": 96, + "frames": { + "0": { + "glyphs": [ + {"char": "H", "x": 287.1, "y": 400.0, "alpha": 1.0}, + {"char": "e", "x": 311.2, "y": 400.0, "alpha": 1.0} + ], + "obstacles": [ + {"type": "circle", "cx": 540, "cy": 540, "r": 80}, + {"type": "rect", "x": 300, "y": 500, "w": 120, "h": 80} + ] + } + } + } + ] +} +``` + +### When to use + +- Text that dynamically reflows around moving objects +- Per-glyph animation (reveal, scatter, physics) +- Variable typography that needs precise measurement +- Any case where Python's Pillow text layout is insufficient + +### When NOT to use + +- Static centered text (just use PIL `draw.text()` directly) +- Text that only fades in/out without spatial animation +- Simple typewriter effects (handle in Python with a character counter) + +### Running the oracle + +Use Playwright to run the layout engine in a headless browser: + +```javascript +// extract.mjs +import { chromium } from 'playwright'; +const browser = await chromium.launch({ headless: true }); +const page = await browser.newPage(); +await page.goto(`file://${oraclePath}`); +await page.waitForFunction(() => window.__ORACLE_DONE__ === true, null, { timeout: 60000 }); +const result = await page.evaluate(() => window.__ORACLE_RESULT__); +writeFileSync('layouts.json', JSON.stringify(result)); +await browser.close(); +``` + +### Consuming in Python + +```python +# In the renderer, map pixel positions to the canvas: +for glyph in frame_data['glyphs']: + char, px, py = glyph['char'], glyph['x'], glyph['y'] + alpha = glyph.get('alpha', 1.0) + # Render using PIL draw.text() at exact pixel position + draw.text((px, py), char, fill=(int(255*alpha),)*3, font=font) +``` + +Obstacles from the JSON can also be rendered as glowing ASCII shapes (circles, rectangles) to visualize the reflow zones. diff --git a/skills/creative/ascii-video/references/shaders.md b/skills/creative/ascii-video/references/shaders.md index fce436a4d..a4cf7a2e5 100644 --- a/skills/creative/ascii-video/references/shaders.md +++ b/skills/creative/ascii-video/references/shaders.md @@ -834,6 +834,39 @@ def sh_vignette(c, s=0.22): return np.clip(c * _vig_cache[k][:,:,None], 0, 255).astype(np.uint8) ``` +#### Reverse Vignette + +Inverted vignette: darkens the **center** and leaves edges bright. Useful when text is centered over busy backgrounds — creates a natural dark zone for readability without a hard-edged box. + +Combine with `apply_text_backdrop()` (see composition.md) for per-frame glyph-aware darkening. + +```python +_rvignette_cache = {} + +def sh_reverse_vignette(c, strength=0.5): + """Center darkening, edge brightening. Cached.""" + k = ('rv', c.shape[0], c.shape[1], round(strength, 2)) + if k not in _rvignette_cache: + h, w = c.shape[:2] + Y = np.linspace(-1, 1, h)[:, None] + X = np.linspace(-1, 1, w)[None, :] + d = np.sqrt(X**2 + Y**2) + # Invert: bright at edges, dark at center + mask = np.clip(1.0 - (1.0 - d * 0.7) * strength, 0.2, 1.0) + _rvignette_cache[k] = mask[:, :, np.newaxis].astype(np.float32) + return np.clip(c.astype(np.float32) * _rvignette_cache[k], 0, 255).astype(np.uint8) +``` + +| Param | Default | Effect | +|-------|---------|--------| +| `strength` | 0.5 | 0 = no effect, 1.0 = center nearly black | + +Add to ShaderChain dispatch: +```python +elif name == "reverse_vignette": + return sh_reverse_vignette(canvas, kwargs.get("strength", 0.5)) +``` + #### Contrast ```python def sh_contrast(c, factor=1.3): diff --git a/skills/creative/ascii-video/references/troubleshooting.md b/skills/creative/ascii-video/references/troubleshooting.md index 8c4bb0229..6b38382cd 100644 --- a/skills/creative/ascii-video/references/troubleshooting.md +++ b/skills/creative/ascii-video/references/troubleshooting.md @@ -14,6 +14,8 @@ | Random dark holes in output | Font missing Unicode glyphs | Validate palettes at init | | Audio-visual desync | Frame timing accumulation | Use integer frame counter, compute t fresh each frame | | Single-color flat output | Hue field shape mismatch | Ensure h,s,v arrays all (rows,cols) before hsv2rgb | +| Text unreadable over busy bg | No contrast between text and background | Use `apply_text_backdrop()` (composition.md) + `reverse_vignette` shader (shaders.md) | +| Text garbled/mirrored | Kaleidoscope or mirror shader applied to text scene | **Never apply kaleidoscope, mirror_h/v/quad/diag to scenes with readable text** — radial folding destroys legibility. Apply these only to background layers or text-free scenes | Common bugs, gotchas, and platform-specific issues encountered during ASCII video development. diff --git a/tests/agent/test_auxiliary_client.py b/tests/agent/test_auxiliary_client.py index 35dcee7ad..b9f71674a 100644 --- a/tests/agent/test_auxiliary_client.py +++ b/tests/agent/test_auxiliary_client.py @@ -198,7 +198,8 @@ class TestAnthropicOAuthFlag: def test_api_key_no_oauth_flag(self, monkeypatch): """Regular API keys (sk-ant-api-*) should create client with is_oauth=False.""" with patch("agent.anthropic_adapter.resolve_anthropic_token", return_value="sk-ant-api03-testkey1234"), \ - patch("agent.anthropic_adapter.build_anthropic_client") as mock_build: + patch("agent.anthropic_adapter.build_anthropic_client") as mock_build, \ + patch("agent.auxiliary_client._select_pool_entry", return_value=(False, None)): mock_build.return_value = MagicMock() from agent.auxiliary_client import _try_anthropic, AnthropicAuxiliaryClient client, model = _try_anthropic() @@ -207,6 +208,31 @@ class TestAnthropicOAuthFlag: adapter = client.chat.completions assert adapter._is_oauth is False + def test_pool_entry_takes_priority_over_legacy_resolution(self): + class _Entry: + access_token = "sk-ant-oat01-pooled" + base_url = "https://api.anthropic.com" + + class _Pool: + def has_credentials(self): + return True + + def select(self): + return _Entry() + + with ( + patch("agent.auxiliary_client.load_pool", return_value=_Pool()), + patch("agent.anthropic_adapter.resolve_anthropic_token", side_effect=AssertionError("legacy path should not run")), + patch("agent.anthropic_adapter.build_anthropic_client", return_value=MagicMock()) as mock_build, + ): + from agent.auxiliary_client import _try_anthropic + + client, model = _try_anthropic() + + assert client is not None + assert model == "claude-haiku-4-5-20251001" + assert mock_build.call_args.args[0] == "sk-ant-oat01-pooled" + class TestExpiredCodexFallback: """Test that expired Codex tokens don't block the auto chain.""" @@ -392,7 +418,8 @@ class TestExplicitProviderRouting: def test_explicit_anthropic_api_key(self, monkeypatch): """provider='anthropic' + regular API key should work with is_oauth=False.""" with patch("agent.anthropic_adapter.resolve_anthropic_token", return_value="sk-ant-api-regular-key"), \ - patch("agent.anthropic_adapter.build_anthropic_client") as mock_build: + patch("agent.anthropic_adapter.build_anthropic_client") as mock_build, \ + patch("agent.auxiliary_client._select_pool_entry", return_value=(False, None)): mock_build.return_value = MagicMock() client, model = resolve_provider_client("anthropic") assert client is not None @@ -465,9 +492,16 @@ class TestGetTextAuxiliaryClient: assert model == "google/gemini-3-flash-preview" def test_custom_endpoint_over_codex(self, monkeypatch, codex_auth_dir): - monkeypatch.setenv("OPENAI_BASE_URL", "http://localhost:1234/v1") + config = { + "model": { + "provider": "custom", + "base_url": "http://localhost:1234/v1", + "default": "my-local-model", + } + } monkeypatch.setenv("OPENAI_API_KEY", "lm-studio-key") - monkeypatch.setenv("OPENAI_MODEL", "my-local-model") + monkeypatch.setattr("hermes_cli.config.load_config", lambda: config) + monkeypatch.setattr("hermes_cli.runtime_provider.load_config", lambda: config) # Override the autouse monkeypatch for codex monkeypatch.setattr( "agent.auxiliary_client._read_codex_access_token", @@ -535,6 +569,32 @@ class TestGetTextAuxiliaryClient: from agent.auxiliary_client import CodexAuxiliaryClient assert isinstance(client, CodexAuxiliaryClient) + def test_codex_pool_entry_takes_priority_over_auth_store(self): + class _Entry: + access_token = "pooled-codex-token" + base_url = "https://chatgpt.com/backend-api/codex" + + class _Pool: + def has_credentials(self): + return True + + def select(self): + return _Entry() + + with ( + patch("agent.auxiliary_client.load_pool", return_value=_Pool()), + patch("agent.auxiliary_client.OpenAI"), + patch("hermes_cli.auth._read_codex_tokens", side_effect=AssertionError("legacy codex store should not run")), + ): + from agent.auxiliary_client import _try_codex + + client, model = _try_codex() + + from agent.auxiliary_client import CodexAuxiliaryClient + + assert isinstance(client, CodexAuxiliaryClient) + assert model == "gpt-5.2-codex" + def test_returns_none_when_nothing_available(self, monkeypatch): monkeypatch.delenv("OPENAI_BASE_URL", raising=False) monkeypatch.delenv("OPENAI_API_KEY", raising=False) @@ -583,6 +643,35 @@ class TestVisionClientFallback: assert client.__class__.__name__ == "AnthropicAuxiliaryClient" assert model == "claude-haiku-4-5-20251001" + +class TestAuxiliaryPoolAwareness: + def test_try_nous_uses_pool_entry(self): + class _Entry: + access_token = "pooled-access-token" + agent_key = "pooled-agent-key" + inference_base_url = "https://inference.pool.example/v1" + + class _Pool: + def has_credentials(self): + return True + + def select(self): + return _Entry() + + with ( + patch("agent.auxiliary_client.load_pool", return_value=_Pool()), + patch("agent.auxiliary_client.OpenAI") as mock_openai, + ): + from agent.auxiliary_client import _try_nous + + client, model = _try_nous() + + assert client is not None + assert model == "gemini-3-flash" + call_kwargs = mock_openai.call_args.kwargs + assert call_kwargs["api_key"] == "pooled-agent-key" + assert call_kwargs["base_url"] == "https://inference.pool.example/v1" + def test_resolve_provider_client_copilot_uses_runtime_credentials(self, monkeypatch): monkeypatch.delenv("GITHUB_TOKEN", raising=False) monkeypatch.delenv("GH_TOKEN", raising=False) @@ -726,10 +815,17 @@ class TestVisionClientFallback: def test_vision_forced_main_uses_custom_endpoint(self, monkeypatch): """When explicitly forced to 'main', vision CAN use custom endpoint.""" + config = { + "model": { + "provider": "custom", + "base_url": "http://localhost:1234/v1", + "default": "my-local-model", + } + } monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "main") - monkeypatch.setenv("OPENAI_BASE_URL", "http://localhost:1234/v1") monkeypatch.setenv("OPENAI_API_KEY", "local-key") - monkeypatch.setenv("OPENAI_MODEL", "my-local-model") + monkeypatch.setattr("hermes_cli.config.load_config", lambda: config) + monkeypatch.setattr("hermes_cli.runtime_provider.load_config", lambda: config) with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \ patch("agent.auxiliary_client.OpenAI") as mock_openai: client, model = get_vision_auxiliary_client() @@ -827,9 +923,16 @@ class TestResolveForcedProvider: assert model is None def test_forced_main_uses_custom(self, monkeypatch): - monkeypatch.setenv("OPENAI_BASE_URL", "http://local:8080/v1") + config = { + "model": { + "provider": "custom", + "base_url": "http://local:8080/v1", + "default": "my-local-model", + } + } monkeypatch.setenv("OPENAI_API_KEY", "local-key") - monkeypatch.setenv("OPENAI_MODEL", "my-local-model") + monkeypatch.setattr("hermes_cli.config.load_config", lambda: config) + monkeypatch.setattr("hermes_cli.runtime_provider.load_config", lambda: config) with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \ patch("agent.auxiliary_client.OpenAI") as mock_openai: client, model = _resolve_forced_provider("main") @@ -858,10 +961,17 @@ class TestResolveForcedProvider: def test_forced_main_skips_openrouter_nous(self, monkeypatch): """Even if OpenRouter key is set, 'main' skips it.""" + config = { + "model": { + "provider": "custom", + "base_url": "http://local:8080/v1", + "default": "my-local-model", + } + } monkeypatch.setenv("OPENROUTER_API_KEY", "or-key") - monkeypatch.setenv("OPENAI_BASE_URL", "http://local:8080/v1") monkeypatch.setenv("OPENAI_API_KEY", "local-key") - monkeypatch.setenv("OPENAI_MODEL", "my-local-model") + monkeypatch.setattr("hermes_cli.config.load_config", lambda: config) + monkeypatch.setattr("hermes_cli.runtime_provider.load_config", lambda: config) with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \ patch("agent.auxiliary_client.OpenAI") as mock_openai: client, model = _resolve_forced_provider("main") diff --git a/tests/gateway/test_api_server.py b/tests/gateway/test_api_server.py index 772dd8b1c..b48ac1af7 100644 --- a/tests/gateway/test_api_server.py +++ b/tests/gateway/test_api_server.py @@ -427,6 +427,81 @@ class TestChatCompletionsEndpoint: assert "Thinking" in body assert " about it..." in body + @pytest.mark.asyncio + async def test_stream_includes_tool_progress(self, adapter): + """tool_progress_callback fires → progress appears in the SSE stream.""" + import asyncio + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + async def _mock_run_agent(**kwargs): + cb = kwargs.get("stream_delta_callback") + tp_cb = kwargs.get("tool_progress_callback") + # Simulate tool progress before streaming content + if tp_cb: + tp_cb("terminal", "ls -la", {"command": "ls -la"}) + if cb: + await asyncio.sleep(0.05) + cb("Here are the files.") + return ( + {"final_response": "Here are the files.", "messages": [], "api_calls": 1}, + {"input_tokens": 10, "output_tokens": 5, "total_tokens": 15}, + ) + + with patch.object(adapter, "_run_agent", side_effect=_mock_run_agent): + resp = await cli.post( + "/v1/chat/completions", + json={ + "model": "test", + "messages": [{"role": "user", "content": "list files"}], + "stream": True, + }, + ) + assert resp.status == 200 + body = await resp.text() + assert "[DONE]" in body + # Tool progress message must appear in the stream + assert "ls -la" in body + # Final content must also be present + assert "Here are the files." in body + + @pytest.mark.asyncio + async def test_stream_tool_progress_skips_internal_events(self, adapter): + """Internal events (name starting with _) are not streamed.""" + import asyncio + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + async def _mock_run_agent(**kwargs): + cb = kwargs.get("stream_delta_callback") + tp_cb = kwargs.get("tool_progress_callback") + if tp_cb: + tp_cb("_thinking", "some internal state", {}) + tp_cb("web_search", "Python docs", {"query": "Python docs"}) + if cb: + await asyncio.sleep(0.05) + cb("Found it.") + return ( + {"final_response": "Found it.", "messages": [], "api_calls": 1}, + {"input_tokens": 10, "output_tokens": 5, "total_tokens": 15}, + ) + + with patch.object(adapter, "_run_agent", side_effect=_mock_run_agent): + resp = await cli.post( + "/v1/chat/completions", + json={ + "model": "test", + "messages": [{"role": "user", "content": "search"}], + "stream": True, + }, + ) + assert resp.status == 200 + body = await resp.text() + # Internal _thinking event should NOT appear + assert "some internal state" not in body + # Real tool progress should appear + assert "Python docs" in body + @pytest.mark.asyncio async def test_no_user_message_returns_400(self, adapter): app = _create_app(adapter) diff --git a/tests/gateway/test_discord_reactions.py b/tests/gateway/test_discord_reactions.py index c19913a4c..3988c67b5 100644 --- a/tests/gateway/test_discord_reactions.py +++ b/tests/gateway/test_discord_reactions.py @@ -168,3 +168,67 @@ async def test_reaction_helper_failures_do_not_break_message_flow(adapter): await adapter._process_message_background(event, build_session_key(event.source)) adapter.send.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_reactions_disabled_via_env(adapter, monkeypatch): + """When DISCORD_REACTIONS=false, no reactions should be added.""" + monkeypatch.setenv("DISCORD_REACTIONS", "false") + + raw_message = SimpleNamespace( + add_reaction=AsyncMock(), + remove_reaction=AsyncMock(), + ) + + async def handler(_event): + await asyncio.sleep(0) + return "ack" + + async def hold_typing(_chat_id, interval=2.0, metadata=None): + await asyncio.Event().wait() + + adapter.set_message_handler(handler) + adapter.send = AsyncMock(return_value=SendResult(success=True, message_id="999")) + adapter._keep_typing = hold_typing + + event = _make_event("4", raw_message) + await adapter._process_message_background(event, build_session_key(event.source)) + + raw_message.add_reaction.assert_not_awaited() + raw_message.remove_reaction.assert_not_awaited() + # Response should still be sent + adapter.send.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_reactions_disabled_via_env_zero(adapter, monkeypatch): + """DISCORD_REACTIONS=0 should also disable reactions.""" + monkeypatch.setenv("DISCORD_REACTIONS", "0") + + raw_message = SimpleNamespace( + add_reaction=AsyncMock(), + remove_reaction=AsyncMock(), + ) + + event = _make_event("5", raw_message) + await adapter.on_processing_start(event) + await adapter.on_processing_complete(event, success=True) + + raw_message.add_reaction.assert_not_awaited() + raw_message.remove_reaction.assert_not_awaited() + + +@pytest.mark.asyncio +async def test_reactions_enabled_by_default(adapter, monkeypatch): + """When DISCORD_REACTIONS is unset, reactions should still work (default: true).""" + monkeypatch.delenv("DISCORD_REACTIONS", raising=False) + + raw_message = SimpleNamespace( + add_reaction=AsyncMock(), + remove_reaction=AsyncMock(), + ) + + event = _make_event("6", raw_message) + await adapter.on_processing_start(event) + + raw_message.add_reaction.assert_awaited_once_with("👀") diff --git a/tests/gateway/test_matrix.py b/tests/gateway/test_matrix.py index 5a9879f60..9912eef00 100644 --- a/tests/gateway/test_matrix.py +++ b/tests/gateway/test_matrix.py @@ -643,3 +643,353 @@ class TestMatrixEncryptedSendFallback: assert fake_client.room_send.await_count == 2 second_call = fake_client.room_send.await_args_list[1] assert second_call.kwargs.get("ignore_unverified_devices") is True + + +# --------------------------------------------------------------------------- +# E2EE: Auto-trust devices +# --------------------------------------------------------------------------- + +class TestMatrixAutoTrustDevices: + def test_auto_trust_verifies_unverified_devices(self): + adapter = _make_adapter() + + # DeviceStore.__iter__ yields OlmDevice objects directly. + device_a = MagicMock() + device_a.device_id = "DEVICE_A" + device_a.verified = False + device_b = MagicMock() + device_b.device_id = "DEVICE_B" + device_b.verified = True # already trusted + device_c = MagicMock() + device_c.device_id = "DEVICE_C" + device_c.verified = False + + fake_client = MagicMock() + fake_client.device_id = "OWN_DEVICE" + fake_client.verify_device = MagicMock() + + # Simulate DeviceStore iteration (yields OlmDevice objects) + fake_client.device_store = MagicMock() + fake_client.device_store.__iter__ = MagicMock( + return_value=iter([device_a, device_b, device_c]) + ) + + adapter._client = fake_client + adapter._auto_trust_devices() + + # Should have verified device_a and device_c (not device_b, already verified) + assert fake_client.verify_device.call_count == 2 + verified_devices = [call.args[0] for call in fake_client.verify_device.call_args_list] + assert device_a in verified_devices + assert device_c in verified_devices + assert device_b not in verified_devices + + def test_auto_trust_skips_own_device(self): + adapter = _make_adapter() + + own_device = MagicMock() + own_device.device_id = "MY_DEVICE" + own_device.verified = False + + fake_client = MagicMock() + fake_client.device_id = "MY_DEVICE" + fake_client.verify_device = MagicMock() + + fake_client.device_store = MagicMock() + fake_client.device_store.__iter__ = MagicMock( + return_value=iter([own_device]) + ) + + adapter._client = fake_client + adapter._auto_trust_devices() + + fake_client.verify_device.assert_not_called() + + def test_auto_trust_handles_missing_device_store(self): + adapter = _make_adapter() + fake_client = MagicMock(spec=[]) # empty spec — no attributes + adapter._client = fake_client + # Should not raise + adapter._auto_trust_devices() + + +# --------------------------------------------------------------------------- +# E2EE: MegolmEvent key request + buffering +# --------------------------------------------------------------------------- + +class TestMatrixMegolmEventHandling: + @pytest.mark.asyncio + async def test_megolm_event_requests_room_key_and_buffers(self): + adapter = _make_adapter() + adapter._user_id = "@bot:example.org" + adapter._startup_ts = 0.0 + adapter._dm_rooms = {} + + fake_megolm = MagicMock() + fake_megolm.sender = "@alice:example.org" + fake_megolm.event_id = "$encrypted_event" + fake_megolm.server_timestamp = 9999999999000 # future + fake_megolm.session_id = "SESSION123" + + fake_room = MagicMock() + fake_room.room_id = "!room:example.org" + + fake_client = MagicMock() + fake_client.request_room_key = AsyncMock(return_value=MagicMock()) + adapter._client = fake_client + + # Create a MegolmEvent class for isinstance check + fake_nio = MagicMock() + FakeMegolmEvent = type("MegolmEvent", (), {}) + fake_megolm.__class__ = FakeMegolmEvent + fake_nio.MegolmEvent = FakeMegolmEvent + + with patch.dict("sys.modules", {"nio": fake_nio}): + await adapter._on_room_message(fake_room, fake_megolm) + + # Should have requested the room key + fake_client.request_room_key.assert_awaited_once_with(fake_megolm) + + # Should have buffered the event + assert len(adapter._pending_megolm) == 1 + room, event, ts = adapter._pending_megolm[0] + assert room is fake_room + assert event is fake_megolm + + @pytest.mark.asyncio + async def test_megolm_buffer_capped(self): + adapter = _make_adapter() + adapter._user_id = "@bot:example.org" + adapter._startup_ts = 0.0 + adapter._dm_rooms = {} + + fake_client = MagicMock() + fake_client.request_room_key = AsyncMock(return_value=MagicMock()) + adapter._client = fake_client + + FakeMegolmEvent = type("MegolmEvent", (), {}) + fake_nio = MagicMock() + fake_nio.MegolmEvent = FakeMegolmEvent + + # Fill the buffer past max + from gateway.platforms.matrix import _MAX_PENDING_EVENTS + with patch.dict("sys.modules", {"nio": fake_nio}): + for i in range(_MAX_PENDING_EVENTS + 10): + evt = MagicMock() + evt.__class__ = FakeMegolmEvent + evt.sender = "@alice:example.org" + evt.event_id = f"$event_{i}" + evt.server_timestamp = 9999999999000 + evt.session_id = f"SESSION_{i}" + room = MagicMock() + room.room_id = "!room:example.org" + await adapter._on_room_message(room, evt) + + assert len(adapter._pending_megolm) == _MAX_PENDING_EVENTS + + +# --------------------------------------------------------------------------- +# E2EE: Retry pending decryptions +# --------------------------------------------------------------------------- + +class TestMatrixRetryPendingDecryptions: + @pytest.mark.asyncio + async def test_successful_decryption_routes_to_text_handler(self): + import time as _time + + adapter = _make_adapter() + adapter._user_id = "@bot:example.org" + adapter._startup_ts = 0.0 + adapter._dm_rooms = {} + + # Create types + FakeMegolmEvent = type("MegolmEvent", (), {}) + FakeRoomMessageText = type("RoomMessageText", (), {}) + + decrypted_event = MagicMock() + decrypted_event.__class__ = FakeRoomMessageText + + fake_megolm = MagicMock() + fake_megolm.__class__ = FakeMegolmEvent + fake_megolm.event_id = "$encrypted" + + fake_room = MagicMock() + now = _time.time() + + adapter._pending_megolm = [(fake_room, fake_megolm, now)] + + fake_client = MagicMock() + fake_client.decrypt_event = MagicMock(return_value=decrypted_event) + adapter._client = fake_client + + fake_nio = MagicMock() + fake_nio.MegolmEvent = FakeMegolmEvent + fake_nio.RoomMessageText = FakeRoomMessageText + fake_nio.RoomMessageImage = type("RoomMessageImage", (), {}) + fake_nio.RoomMessageAudio = type("RoomMessageAudio", (), {}) + fake_nio.RoomMessageVideo = type("RoomMessageVideo", (), {}) + fake_nio.RoomMessageFile = type("RoomMessageFile", (), {}) + + with patch.dict("sys.modules", {"nio": fake_nio}): + with patch.object(adapter, "_on_room_message", AsyncMock()) as mock_handler: + await adapter._retry_pending_decryptions() + mock_handler.assert_awaited_once_with(fake_room, decrypted_event) + + # Buffer should be empty now + assert len(adapter._pending_megolm) == 0 + + @pytest.mark.asyncio + async def test_still_undecryptable_stays_in_buffer(self): + import time as _time + + adapter = _make_adapter() + + FakeMegolmEvent = type("MegolmEvent", (), {}) + + fake_megolm = MagicMock() + fake_megolm.__class__ = FakeMegolmEvent + fake_megolm.event_id = "$still_encrypted" + + now = _time.time() + adapter._pending_megolm = [(MagicMock(), fake_megolm, now)] + + fake_client = MagicMock() + # decrypt_event raises when key is still missing + fake_client.decrypt_event = MagicMock(side_effect=Exception("missing key")) + adapter._client = fake_client + + fake_nio = MagicMock() + fake_nio.MegolmEvent = FakeMegolmEvent + + with patch.dict("sys.modules", {"nio": fake_nio}): + await adapter._retry_pending_decryptions() + + assert len(adapter._pending_megolm) == 1 + + @pytest.mark.asyncio + async def test_expired_events_dropped(self): + import time as _time + + adapter = _make_adapter() + + from gateway.platforms.matrix import _PENDING_EVENT_TTL + + fake_megolm = MagicMock() + fake_megolm.event_id = "$old_event" + fake_megolm.__class__ = type("MegolmEvent", (), {}) + + # Timestamp well past TTL + old_ts = _time.time() - _PENDING_EVENT_TTL - 60 + adapter._pending_megolm = [(MagicMock(), fake_megolm, old_ts)] + + fake_client = MagicMock() + adapter._client = fake_client + + fake_nio = MagicMock() + fake_nio.MegolmEvent = type("MegolmEvent", (), {}) + + with patch.dict("sys.modules", {"nio": fake_nio}): + await adapter._retry_pending_decryptions() + + # Should have been dropped + assert len(adapter._pending_megolm) == 0 + # Should NOT have tried to decrypt + fake_client.decrypt_event.assert_not_called() + + @pytest.mark.asyncio + async def test_media_event_routes_to_media_handler(self): + import time as _time + + adapter = _make_adapter() + adapter._user_id = "@bot:example.org" + adapter._startup_ts = 0.0 + + FakeMegolmEvent = type("MegolmEvent", (), {}) + FakeRoomMessageImage = type("RoomMessageImage", (), {}) + + decrypted_image = MagicMock() + decrypted_image.__class__ = FakeRoomMessageImage + + fake_megolm = MagicMock() + fake_megolm.__class__ = FakeMegolmEvent + fake_megolm.event_id = "$encrypted_image" + + fake_room = MagicMock() + now = _time.time() + adapter._pending_megolm = [(fake_room, fake_megolm, now)] + + fake_client = MagicMock() + fake_client.decrypt_event = MagicMock(return_value=decrypted_image) + adapter._client = fake_client + + fake_nio = MagicMock() + fake_nio.MegolmEvent = FakeMegolmEvent + fake_nio.RoomMessageText = type("RoomMessageText", (), {}) + fake_nio.RoomMessageImage = FakeRoomMessageImage + fake_nio.RoomMessageAudio = type("RoomMessageAudio", (), {}) + fake_nio.RoomMessageVideo = type("RoomMessageVideo", (), {}) + fake_nio.RoomMessageFile = type("RoomMessageFile", (), {}) + + with patch.dict("sys.modules", {"nio": fake_nio}): + with patch.object(adapter, "_on_room_message_media", AsyncMock()) as mock_media: + await adapter._retry_pending_decryptions() + mock_media.assert_awaited_once_with(fake_room, decrypted_image) + + assert len(adapter._pending_megolm) == 0 + + +# --------------------------------------------------------------------------- +# E2EE: Key export / import +# --------------------------------------------------------------------------- + +class TestMatrixKeyExportImport: + @pytest.mark.asyncio + async def test_disconnect_exports_keys(self): + adapter = _make_adapter() + adapter._encryption = True + adapter._sync_task = None + + fake_client = MagicMock() + fake_client.olm = object() + fake_client.export_keys = AsyncMock() + fake_client.close = AsyncMock() + adapter._client = fake_client + + from gateway.platforms.matrix import _KEY_EXPORT_FILE, _KEY_EXPORT_PASSPHRASE + + await adapter.disconnect() + + fake_client.export_keys.assert_awaited_once_with( + str(_KEY_EXPORT_FILE), _KEY_EXPORT_PASSPHRASE, + ) + + @pytest.mark.asyncio + async def test_disconnect_handles_export_failure(self): + adapter = _make_adapter() + adapter._encryption = True + adapter._sync_task = None + + fake_client = MagicMock() + fake_client.olm = object() + fake_client.export_keys = AsyncMock(side_effect=Exception("export failed")) + fake_client.close = AsyncMock() + adapter._client = fake_client + + # Should not raise + await adapter.disconnect() + assert adapter._client is None # still cleaned up + + @pytest.mark.asyncio + async def test_disconnect_skips_export_when_no_encryption(self): + adapter = _make_adapter() + adapter._encryption = False + adapter._sync_task = None + + fake_client = MagicMock() + fake_client.close = AsyncMock() + adapter._client = fake_client + + await adapter.disconnect() + # Should not have tried to export + assert not hasattr(fake_client, "export_keys") or \ + not fake_client.export_keys.called diff --git a/tests/gateway/test_session_hygiene.py b/tests/gateway/test_session_hygiene.py index 843c0d416..5488296f6 100644 --- a/tests/gateway/test_session_hygiene.py +++ b/tests/gateway/test_session_hygiene.py @@ -212,47 +212,7 @@ class TestSessionHygieneWarnThreshold: assert post_compress_tokens < warn_threshold -class TestCompressionWarnRateLimit: - """Compression warning messages must be rate-limited per chat_id.""" - def _make_runner(self): - from unittest.mock import MagicMock, patch - with patch("gateway.run.load_gateway_config"), \ - patch("gateway.run.SessionStore"), \ - patch("gateway.run.DeliveryRouter"): - from gateway.run import GatewayRunner - runner = GatewayRunner.__new__(GatewayRunner) - runner._compression_warn_sent = {} - runner._compression_warn_cooldown = 3600 - return runner - - def test_first_warn_is_sent(self): - runner = self._make_runner() - now = 1_000_000.0 - last = runner._compression_warn_sent.get("chat:1", 0) - assert now - last >= runner._compression_warn_cooldown - - def test_second_warn_suppressed_within_cooldown(self): - runner = self._make_runner() - now = 1_000_000.0 - runner._compression_warn_sent["chat:1"] = now - 60 # 1 minute ago - last = runner._compression_warn_sent.get("chat:1", 0) - assert now - last < runner._compression_warn_cooldown - - def test_warn_allowed_after_cooldown(self): - runner = self._make_runner() - now = 1_000_000.0 - runner._compression_warn_sent["chat:1"] = now - 3601 # just past cooldown - last = runner._compression_warn_sent.get("chat:1", 0) - assert now - last >= runner._compression_warn_cooldown - - def test_rate_limit_is_per_chat(self): - """Rate-limiting one chat must not suppress warnings for another.""" - runner = self._make_runner() - now = 1_000_000.0 - runner._compression_warn_sent["chat:1"] = now - 60 # suppressed - last_other = runner._compression_warn_sent.get("chat:2", 0) - assert now - last_other >= runner._compression_warn_cooldown class TestEstimatedTokenThreshold: @@ -421,10 +381,6 @@ async def test_session_hygiene_messages_stay_in_originating_topic(monkeypatch, t result = await runner._handle_message(event) assert result == "ok" - assert len(adapter.sent) == 2 - assert adapter.sent[0]["chat_id"] == "-1001" - assert "Session is large" in adapter.sent[0]["content"] - assert adapter.sent[0]["metadata"] == {"thread_id": "17585"} - assert adapter.sent[1]["chat_id"] == "-1001" - assert "Compressed:" in adapter.sent[1]["content"] - assert adapter.sent[1]["metadata"] == {"thread_id": "17585"} + # Compression warnings are no longer sent to users — compression + # happens silently with server-side logging only. + assert len(adapter.sent) == 0 diff --git a/tests/gateway/test_unauthorized_dm_behavior.py b/tests/gateway/test_unauthorized_dm_behavior.py index 02aae301c..25b51dc2f 100644 --- a/tests/gateway/test_unauthorized_dm_behavior.py +++ b/tests/gateway/test_unauthorized_dm_behavior.py @@ -60,6 +60,7 @@ def _make_runner(platform: Platform, config: GatewayConfig): runner.adapters = {platform: adapter} runner.pairing_store = MagicMock() runner.pairing_store.is_approved.return_value = False + runner.pairing_store._is_rate_limited.return_value = False return runner, adapter @@ -142,6 +143,56 @@ async def test_unauthorized_whatsapp_dm_can_be_ignored(monkeypatch): adapter.send.assert_not_awaited() +@pytest.mark.asyncio +async def test_rate_limited_user_gets_no_response(monkeypatch): + """When a user is already rate-limited, pairing messages are silently ignored.""" + _clear_auth_env(monkeypatch) + config = GatewayConfig( + platforms={Platform.WHATSAPP: PlatformConfig(enabled=True)}, + ) + runner, adapter = _make_runner(Platform.WHATSAPP, config) + runner.pairing_store._is_rate_limited.return_value = True + + result = await runner._handle_message( + _make_event( + Platform.WHATSAPP, + "15551234567@s.whatsapp.net", + "15551234567@s.whatsapp.net", + ) + ) + + assert result is None + runner.pairing_store.generate_code.assert_not_called() + adapter.send.assert_not_awaited() + + +@pytest.mark.asyncio +async def test_rejection_message_records_rate_limit(monkeypatch): + """After sending a 'too many requests' rejection, rate limit is recorded + so subsequent messages are silently ignored.""" + _clear_auth_env(monkeypatch) + config = GatewayConfig( + platforms={Platform.WHATSAPP: PlatformConfig(enabled=True)}, + ) + runner, adapter = _make_runner(Platform.WHATSAPP, config) + runner.pairing_store.generate_code.return_value = None # triggers rejection + + result = await runner._handle_message( + _make_event( + Platform.WHATSAPP, + "15551234567@s.whatsapp.net", + "15551234567@s.whatsapp.net", + ) + ) + + assert result is None + adapter.send.assert_awaited_once() + assert "Too many" in adapter.send.await_args.args[1] + runner.pairing_store._record_rate_limit.assert_called_once_with( + "whatsapp", "15551234567@s.whatsapp.net" + ) + + @pytest.mark.asyncio async def test_global_ignore_suppresses_pairing_reply(monkeypatch): _clear_auth_env(monkeypatch) diff --git a/tests/gateway/test_update_command.py b/tests/gateway/test_update_command.py index ac9beac1b..0fc774a0a 100644 --- a/tests/gateway/test_update_command.py +++ b/tests/gateway/test_update_command.py @@ -45,6 +45,17 @@ def _make_runner(): class TestHandleUpdateCommand: """Tests for GatewayRunner._handle_update_command.""" + @pytest.mark.asyncio + async def test_managed_install_returns_package_manager_guidance(self, monkeypatch): + runner = _make_runner() + event = _make_event() + monkeypatch.setenv("HERMES_MANAGED", "homebrew") + + result = await runner._handle_update_command(event) + + assert "managed by Homebrew" in result + assert "brew upgrade hermes-agent" in result + @pytest.mark.asyncio async def test_no_git_directory(self, tmp_path): """Returns an error when .git does not exist.""" @@ -191,7 +202,7 @@ class TestHandleUpdateCommand: with patch("gateway.run._hermes_home", hermes_home), \ patch("gateway.run.__file__", fake_file), \ - patch("shutil.which", side_effect=lambda x: "/usr/bin/hermes" if x == "hermes" else "/usr/bin/systemd-run"), \ + patch("shutil.which", side_effect=lambda x: "/usr/bin/hermes" if x == "hermes" else "/usr/bin/setsid"), \ patch("subprocess.Popen"): result = await runner._handle_update_command(event) @@ -204,8 +215,8 @@ class TestHandleUpdateCommand: assert not (hermes_home / ".update_exit_code").exists() @pytest.mark.asyncio - async def test_spawns_systemd_run(self, tmp_path): - """Uses systemd-run when available.""" + async def test_spawns_setsid(self, tmp_path): + """Uses setsid when available.""" runner = _make_runner() event = _make_event() @@ -225,16 +236,16 @@ class TestHandleUpdateCommand: patch("subprocess.Popen", mock_popen): result = await runner._handle_update_command(event) - # Verify systemd-run was used + # Verify setsid was used call_args = mock_popen.call_args[0][0] - assert call_args[0] == "/usr/bin/systemd-run" - assert "--scope" in call_args + assert call_args[0] == "/usr/bin/setsid" + assert call_args[1] == "bash" assert ".update_exit_code" in call_args[-1] assert "Starting Hermes update" in result @pytest.mark.asyncio - async def test_fallback_nohup_when_no_systemd_run(self, tmp_path): - """Falls back to nohup when systemd-run is not available.""" + async def test_fallback_when_no_setsid(self, tmp_path): + """Falls back to start_new_session=True when setsid is not available.""" runner = _make_runner() event = _make_event() @@ -249,24 +260,27 @@ class TestHandleUpdateCommand: mock_popen = MagicMock() - def which_no_systemd(x): + def which_no_setsid(x): if x == "hermes": return "/usr/bin/hermes" - if x == "systemd-run": + if x == "setsid": return None return None with patch("gateway.run._hermes_home", hermes_home), \ patch("gateway.run.__file__", fake_file), \ - patch("shutil.which", side_effect=which_no_systemd), \ + patch("shutil.which", side_effect=which_no_setsid), \ patch("subprocess.Popen", mock_popen): result = await runner._handle_update_command(event) - # Verify bash -c nohup fallback was used + # Verify plain bash -c fallback (no nohup, no setsid) call_args = mock_popen.call_args[0][0] assert call_args[0] == "bash" - assert "nohup" in call_args[2] + assert "nohup" not in call_args[2] assert ".update_exit_code" in call_args[2] + # start_new_session=True should be in kwargs + call_kwargs = mock_popen.call_args[1] + assert call_kwargs.get("start_new_session") is True assert "Starting Hermes update" in result @pytest.mark.asyncio diff --git a/tests/hermes_cli/test_claw.py b/tests/hermes_cli/test_claw.py index a9788db93..138b21e9d 100644 --- a/tests/hermes_cli/test_claw.py +++ b/tests/hermes_cli/test_claw.py @@ -40,6 +40,119 @@ class TestFindMigrationScript: assert claw_mod._find_migration_script() is None +# --------------------------------------------------------------------------- +# _find_openclaw_dirs +# --------------------------------------------------------------------------- + + +class TestFindOpenclawDirs: + """Test discovery of OpenClaw directories.""" + + def test_finds_openclaw_dir(self, tmp_path): + openclaw = tmp_path / ".openclaw" + openclaw.mkdir() + with patch("pathlib.Path.home", return_value=tmp_path): + found = claw_mod._find_openclaw_dirs() + assert openclaw in found + + def test_finds_legacy_dirs(self, tmp_path): + clawdbot = tmp_path / ".clawdbot" + clawdbot.mkdir() + moldbot = tmp_path / ".moldbot" + moldbot.mkdir() + with patch("pathlib.Path.home", return_value=tmp_path): + found = claw_mod._find_openclaw_dirs() + assert len(found) == 2 + assert clawdbot in found + assert moldbot in found + + def test_returns_empty_when_none_exist(self, tmp_path): + with patch("pathlib.Path.home", return_value=tmp_path): + found = claw_mod._find_openclaw_dirs() + assert found == [] + + +# --------------------------------------------------------------------------- +# _scan_workspace_state +# --------------------------------------------------------------------------- + + +class TestScanWorkspaceState: + """Test scanning for workspace state files.""" + + def test_finds_root_state_files(self, tmp_path): + (tmp_path / "todo.json").write_text("{}") + (tmp_path / "sessions").mkdir() + findings = claw_mod._scan_workspace_state(tmp_path) + descs = [desc for _, desc in findings] + assert any("todo.json" in d for d in descs) + assert any("sessions" in d for d in descs) + + def test_finds_workspace_state_files(self, tmp_path): + ws = tmp_path / "workspace" + ws.mkdir() + (ws / "todo.json").write_text("{}") + (ws / "sessions").mkdir() + findings = claw_mod._scan_workspace_state(tmp_path) + descs = [desc for _, desc in findings] + assert any("workspace/todo.json" in d for d in descs) + assert any("workspace/sessions" in d for d in descs) + + def test_ignores_hidden_dirs(self, tmp_path): + scan_dir = tmp_path / "scan_target" + scan_dir.mkdir() + hidden = scan_dir / ".git" + hidden.mkdir() + (hidden / "todo.json").write_text("{}") + findings = claw_mod._scan_workspace_state(scan_dir) + assert len(findings) == 0 + + def test_empty_dir_returns_empty(self, tmp_path): + scan_dir = tmp_path / "scan_target" + scan_dir.mkdir() + findings = claw_mod._scan_workspace_state(scan_dir) + assert findings == [] + + +# --------------------------------------------------------------------------- +# _archive_directory +# --------------------------------------------------------------------------- + + +class TestArchiveDirectory: + """Test directory archival (rename).""" + + def test_renames_to_pre_migration(self, tmp_path): + source = tmp_path / ".openclaw" + source.mkdir() + (source / "test.txt").write_text("data") + + archive_path = claw_mod._archive_directory(source) + assert archive_path == tmp_path / ".openclaw.pre-migration" + assert archive_path.is_dir() + assert not source.exists() + assert (archive_path / "test.txt").read_text() == "data" + + def test_adds_timestamp_when_archive_exists(self, tmp_path): + source = tmp_path / ".openclaw" + source.mkdir() + # Pre-existing archive + (tmp_path / ".openclaw.pre-migration").mkdir() + + archive_path = claw_mod._archive_directory(source) + assert ".pre-migration-" in archive_path.name + assert archive_path.is_dir() + assert not source.exists() + + def test_dry_run_does_not_rename(self, tmp_path): + source = tmp_path / ".openclaw" + source.mkdir() + + archive_path = claw_mod._archive_directory(source, dry_run=True) + assert archive_path == tmp_path / ".openclaw.pre-migration" + assert source.is_dir() # Still exists + + # --------------------------------------------------------------------------- # claw_command routing # --------------------------------------------------------------------------- @@ -56,11 +169,24 @@ class TestClawCommand: claw_mod.claw_command(args) mock.assert_called_once_with(args) + def test_routes_to_cleanup(self): + args = Namespace(claw_action="cleanup", source=None, dry_run=False, yes=False) + with patch.object(claw_mod, "_cmd_cleanup") as mock: + claw_mod.claw_command(args) + mock.assert_called_once_with(args) + + def test_routes_clean_alias(self): + args = Namespace(claw_action="clean", source=None, dry_run=False, yes=False) + with patch.object(claw_mod, "_cmd_cleanup") as mock: + claw_mod.claw_command(args) + mock.assert_called_once_with(args) + def test_shows_help_for_no_action(self, capsys): args = Namespace(claw_action=None) claw_mod.claw_command(args) captured = capsys.readouterr() assert "migrate" in captured.out + assert "cleanup" in captured.out # --------------------------------------------------------------------------- @@ -168,6 +294,7 @@ class TestCmdMigrate: patch.object(claw_mod, "_load_migration_module", return_value=fake_mod), patch.object(claw_mod, "get_config_path", return_value=config_path), patch.object(claw_mod, "prompt_yes_no", return_value=True), + patch.object(claw_mod, "_offer_source_archival"), ): claw_mod._cmd_migrate(args) @@ -175,6 +302,75 @@ class TestCmdMigrate: assert "Migration Results" in captured.out assert "Migration complete!" in captured.out + def test_execute_offers_archival_on_success(self, tmp_path, capsys): + """After successful migration, _offer_source_archival should be called.""" + openclaw_dir = tmp_path / ".openclaw" + openclaw_dir.mkdir() + + fake_mod = ModuleType("openclaw_to_hermes") + fake_mod.resolve_selected_options = MagicMock(return_value={"soul"}) + fake_migrator = MagicMock() + fake_migrator.migrate.return_value = { + "summary": {"migrated": 3, "skipped": 0, "conflict": 0, "error": 0}, + "items": [ + {"kind": "soul", "status": "migrated", "destination": str(tmp_path / "SOUL.md")}, + ], + } + fake_mod.Migrator = MagicMock(return_value=fake_migrator) + + args = Namespace( + source=str(openclaw_dir), + dry_run=False, preset="full", overwrite=False, + migrate_secrets=False, workspace_target=None, + skill_conflict="skip", yes=True, + ) + + with ( + patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"), + patch.object(claw_mod, "_load_migration_module", return_value=fake_mod), + patch.object(claw_mod, "get_config_path", return_value=tmp_path / "config.yaml"), + patch.object(claw_mod, "save_config"), + patch.object(claw_mod, "load_config", return_value={}), + patch.object(claw_mod, "_offer_source_archival") as mock_archival, + ): + claw_mod._cmd_migrate(args) + + mock_archival.assert_called_once_with(openclaw_dir, True) + + def test_dry_run_skips_archival(self, tmp_path, capsys): + """Dry run should not offer archival.""" + openclaw_dir = tmp_path / ".openclaw" + openclaw_dir.mkdir() + + fake_mod = ModuleType("openclaw_to_hermes") + fake_mod.resolve_selected_options = MagicMock(return_value=set()) + fake_migrator = MagicMock() + fake_migrator.migrate.return_value = { + "summary": {"migrated": 2, "skipped": 0, "conflict": 0, "error": 0}, + "items": [], + "preset": "full", + } + fake_mod.Migrator = MagicMock(return_value=fake_migrator) + + args = Namespace( + source=str(openclaw_dir), + dry_run=True, preset="full", overwrite=False, + migrate_secrets=False, workspace_target=None, + skill_conflict="skip", yes=False, + ) + + with ( + patch.object(claw_mod, "_find_migration_script", return_value=tmp_path / "s.py"), + patch.object(claw_mod, "_load_migration_module", return_value=fake_mod), + patch.object(claw_mod, "get_config_path", return_value=tmp_path / "config.yaml"), + patch.object(claw_mod, "save_config"), + patch.object(claw_mod, "load_config", return_value={}), + patch.object(claw_mod, "_offer_source_archival") as mock_archival, + ): + claw_mod._cmd_migrate(args) + + mock_archival.assert_not_called() + def test_execute_cancelled_by_user(self, tmp_path, capsys): openclaw_dir = tmp_path / ".openclaw" openclaw_dir.mkdir() @@ -290,6 +486,172 @@ class TestCmdMigrate: assert call_kwargs["migrate_secrets"] is True +# --------------------------------------------------------------------------- +# _offer_source_archival +# --------------------------------------------------------------------------- + + +class TestOfferSourceArchival: + """Test the post-migration archival offer.""" + + def test_archives_with_auto_yes(self, tmp_path, capsys): + source = tmp_path / ".openclaw" + source.mkdir() + (source / "workspace").mkdir() + (source / "workspace" / "todo.json").write_text("{}") + + claw_mod._offer_source_archival(source, auto_yes=True) + + captured = capsys.readouterr() + assert "Archived" in captured.out + assert not source.exists() + assert (tmp_path / ".openclaw.pre-migration").is_dir() + + def test_skips_when_user_declines(self, tmp_path, capsys): + source = tmp_path / ".openclaw" + source.mkdir() + + with patch.object(claw_mod, "prompt_yes_no", return_value=False): + claw_mod._offer_source_archival(source, auto_yes=False) + + captured = capsys.readouterr() + assert "Skipped" in captured.out + assert source.is_dir() # Still exists + + def test_noop_when_source_missing(self, tmp_path, capsys): + claw_mod._offer_source_archival(tmp_path / "nonexistent", auto_yes=True) + captured = capsys.readouterr() + assert captured.out == "" # No output + + def test_shows_state_files(self, tmp_path, capsys): + source = tmp_path / ".openclaw" + source.mkdir() + ws = source / "workspace" + ws.mkdir() + (ws / "todo.json").write_text("{}") + + with patch.object(claw_mod, "prompt_yes_no", return_value=False): + claw_mod._offer_source_archival(source, auto_yes=False) + + captured = capsys.readouterr() + assert "todo.json" in captured.out + + def test_handles_archive_error(self, tmp_path, capsys): + source = tmp_path / ".openclaw" + source.mkdir() + + with patch.object(claw_mod, "_archive_directory", side_effect=OSError("permission denied")): + claw_mod._offer_source_archival(source, auto_yes=True) + + captured = capsys.readouterr() + assert "Could not archive" in captured.out + + +# --------------------------------------------------------------------------- +# _cmd_cleanup +# --------------------------------------------------------------------------- + + +class TestCmdCleanup: + """Test the cleanup command handler.""" + + def test_no_dirs_found(self, tmp_path, capsys): + args = Namespace(source=None, dry_run=False, yes=False) + with patch.object(claw_mod, "_find_openclaw_dirs", return_value=[]): + claw_mod._cmd_cleanup(args) + captured = capsys.readouterr() + assert "No OpenClaw directories found" in captured.out + + def test_dry_run_lists_dirs(self, tmp_path, capsys): + openclaw = tmp_path / ".openclaw" + openclaw.mkdir() + ws = openclaw / "workspace" + ws.mkdir() + (ws / "todo.json").write_text("{}") + + args = Namespace(source=None, dry_run=True, yes=False) + with patch.object(claw_mod, "_find_openclaw_dirs", return_value=[openclaw]): + claw_mod._cmd_cleanup(args) + + captured = capsys.readouterr() + assert "Would archive" in captured.out + assert openclaw.is_dir() # Not actually archived + + def test_archives_with_yes(self, tmp_path, capsys): + openclaw = tmp_path / ".openclaw" + openclaw.mkdir() + (openclaw / "workspace").mkdir() + (openclaw / "workspace" / "todo.json").write_text("{}") + + args = Namespace(source=None, dry_run=False, yes=True) + with patch.object(claw_mod, "_find_openclaw_dirs", return_value=[openclaw]): + claw_mod._cmd_cleanup(args) + + captured = capsys.readouterr() + assert "Archived" in captured.out + assert "Cleaned up 1" in captured.out + assert not openclaw.exists() + assert (tmp_path / ".openclaw.pre-migration").is_dir() + + def test_skips_when_user_declines(self, tmp_path, capsys): + openclaw = tmp_path / ".openclaw" + openclaw.mkdir() + + args = Namespace(source=None, dry_run=False, yes=False) + with ( + patch.object(claw_mod, "_find_openclaw_dirs", return_value=[openclaw]), + patch.object(claw_mod, "prompt_yes_no", return_value=False), + ): + claw_mod._cmd_cleanup(args) + + captured = capsys.readouterr() + assert "Skipped" in captured.out + assert openclaw.is_dir() + + def test_explicit_source(self, tmp_path, capsys): + custom_dir = tmp_path / "my-openclaw" + custom_dir.mkdir() + (custom_dir / "todo.json").write_text("{}") + + args = Namespace(source=str(custom_dir), dry_run=False, yes=True) + claw_mod._cmd_cleanup(args) + + captured = capsys.readouterr() + assert "Archived" in captured.out + assert not custom_dir.exists() + + def test_shows_workspace_details(self, tmp_path, capsys): + openclaw = tmp_path / ".openclaw" + openclaw.mkdir() + ws = openclaw / "workspace" + ws.mkdir() + (ws / "todo.json").write_text("{}") + (ws / "SOUL.md").write_text("# Soul") + + args = Namespace(source=None, dry_run=True, yes=False) + with patch.object(claw_mod, "_find_openclaw_dirs", return_value=[openclaw]): + claw_mod._cmd_cleanup(args) + + captured = capsys.readouterr() + assert "workspace/" in captured.out + assert "todo.json" in captured.out + + def test_handles_multiple_dirs(self, tmp_path, capsys): + openclaw = tmp_path / ".openclaw" + openclaw.mkdir() + clawdbot = tmp_path / ".clawdbot" + clawdbot.mkdir() + + args = Namespace(source=None, dry_run=False, yes=True) + with patch.object(claw_mod, "_find_openclaw_dirs", return_value=[openclaw, clawdbot]): + claw_mod._cmd_cleanup(args) + + captured = capsys.readouterr() + assert "Cleaned up 2" in captured.out + assert not openclaw.exists() + assert not clawdbot.exists() + + # --------------------------------------------------------------------------- # _print_migration_report # --------------------------------------------------------------------------- diff --git a/tests/hermes_cli/test_commands.py b/tests/hermes_cli/test_commands.py index 2c7ef280a..321f8f161 100644 --- a/tests/hermes_cli/test_commands.py +++ b/tests/hermes_cli/test_commands.py @@ -12,10 +12,13 @@ from hermes_cli.commands import ( SUBCOMMANDS, SlashCommandAutoSuggest, SlashCommandCompleter, + _TG_NAME_LIMIT, + _clamp_telegram_names, gateway_help_lines, resolve_command, slack_subcommand_map, telegram_bot_commands, + telegram_menu_commands, ) @@ -504,3 +507,83 @@ class TestGhostText: def test_no_suggestion_for_non_slash(self): assert _suggestion("hello") is None + + +# --------------------------------------------------------------------------- +# Telegram command name clamping (32-char limit) +# --------------------------------------------------------------------------- + + +class TestClampTelegramNames: + """Tests for _clamp_telegram_names() — 32-char enforcement + collision.""" + + def test_short_names_unchanged(self): + entries = [("help", "Show help"), ("status", "Show status")] + result = _clamp_telegram_names(entries, set()) + assert result == entries + + def test_long_name_truncated(self): + long = "a" * 40 + result = _clamp_telegram_names([(long, "desc")], set()) + assert len(result) == 1 + assert result[0][0] == "a" * _TG_NAME_LIMIT + assert result[0][1] == "desc" + + def test_collision_with_reserved_gets_digit_suffix(self): + # The truncated form collides with a reserved name + prefix = "x" * _TG_NAME_LIMIT + long_name = "x" * 40 + result = _clamp_telegram_names([(long_name, "d")], reserved={prefix}) + assert len(result) == 1 + name = result[0][0] + assert len(name) == _TG_NAME_LIMIT + assert name == "x" * (_TG_NAME_LIMIT - 1) + "0" + + def test_collision_between_entries_gets_incrementing_digits(self): + # Two long names that truncate to the same 32-char prefix + base = "y" * 40 + entries = [(base + "_alpha", "d1"), (base + "_beta", "d2")] + result = _clamp_telegram_names(entries, set()) + assert len(result) == 2 + assert result[0][0] == "y" * _TG_NAME_LIMIT + assert result[1][0] == "y" * (_TG_NAME_LIMIT - 1) + "0" + + def test_collision_with_reserved_and_entries_skips_taken_digits(self): + prefix = "z" * _TG_NAME_LIMIT + digit0 = "z" * (_TG_NAME_LIMIT - 1) + "0" + # Reserve both the plain truncation and digit-0 + reserved = {prefix, digit0} + long_name = "z" * 50 + result = _clamp_telegram_names([(long_name, "d")], reserved) + assert len(result) == 1 + assert result[0][0] == "z" * (_TG_NAME_LIMIT - 1) + "1" + + def test_all_digits_exhausted_drops_entry(self): + prefix = "w" * _TG_NAME_LIMIT + # Reserve the plain truncation + all 10 digit slots + reserved = {prefix} | {"w" * (_TG_NAME_LIMIT - 1) + str(d) for d in range(10)} + long_name = "w" * 50 + result = _clamp_telegram_names([(long_name, "d")], reserved) + assert result == [] + + def test_exact_32_chars_not_truncated(self): + name = "a" * _TG_NAME_LIMIT + result = _clamp_telegram_names([(name, "desc")], set()) + assert result[0][0] == name + + def test_duplicate_short_name_deduplicated(self): + entries = [("foo", "d1"), ("foo", "d2")] + result = _clamp_telegram_names(entries, set()) + assert len(result) == 1 + assert result[0] == ("foo", "d1") + + +class TestTelegramMenuCommands: + """Integration: telegram_menu_commands enforces the 32-char limit.""" + + def test_all_names_within_limit(self): + menu, _ = telegram_menu_commands(max_commands=100) + for name, _desc in menu: + assert 1 <= len(name) <= _TG_NAME_LIMIT, ( + f"Command '{name}' is {len(name)} chars (limit {_TG_NAME_LIMIT})" + ) diff --git a/tests/hermes_cli/test_managed_installs.py b/tests/hermes_cli/test_managed_installs.py new file mode 100644 index 000000000..c6b5d792c --- /dev/null +++ b/tests/hermes_cli/test_managed_installs.py @@ -0,0 +1,54 @@ +from types import SimpleNamespace +from unittest.mock import patch + +from hermes_cli.config import ( + format_managed_message, + get_managed_system, + recommended_update_command, +) +from hermes_cli.main import cmd_update +from tools.skills_hub import OptionalSkillSource + + +def test_get_managed_system_homebrew(monkeypatch): + monkeypatch.setenv("HERMES_MANAGED", "homebrew") + + assert get_managed_system() == "Homebrew" + assert recommended_update_command() == "brew upgrade hermes-agent" + + +def test_format_managed_message_homebrew(monkeypatch): + monkeypatch.setenv("HERMES_MANAGED", "homebrew") + + message = format_managed_message("update Hermes Agent") + + assert "managed by Homebrew" in message + assert "brew upgrade hermes-agent" in message + + +def test_recommended_update_command_defaults_to_hermes_update(monkeypatch): + monkeypatch.delenv("HERMES_MANAGED", raising=False) + + assert recommended_update_command() == "hermes update" + + +def test_cmd_update_blocks_managed_homebrew(monkeypatch, capsys): + monkeypatch.setenv("HERMES_MANAGED", "homebrew") + + with patch("hermes_cli.main.subprocess.run") as mock_run: + cmd_update(SimpleNamespace()) + + assert not mock_run.called + captured = capsys.readouterr() + assert "managed by Homebrew" in captured.err + assert "brew upgrade hermes-agent" in captured.err + + +def test_optional_skill_source_honors_env_override(monkeypatch, tmp_path): + optional_dir = tmp_path / "optional-skills" + optional_dir.mkdir() + monkeypatch.setenv("HERMES_OPTIONAL_SKILLS", str(optional_dir)) + + source = OptionalSkillSource() + + assert source._optional_dir == optional_dir diff --git a/tests/hermes_cli/test_setup.py b/tests/hermes_cli/test_setup.py index a4c85ba2b..f4f13696c 100644 --- a/tests/hermes_cli/test_setup.py +++ b/tests/hermes_cli/test_setup.py @@ -1,6 +1,8 @@ +"""Tests for setup_model_provider — verifies the delegation to +select_provider_and_model() and config dict sync.""" import json -from hermes_cli.auth import _update_config_for_provider, get_active_provider +from hermes_cli.auth import get_active_provider from hermes_cli.config import load_config, save_config from hermes_cli.setup import setup_model_provider @@ -23,156 +25,198 @@ def _clear_provider_env(monkeypatch): monkeypatch.delenv(key, raising=False) +def _stub_tts(monkeypatch): + """Stub out TTS prompts so setup_model_provider doesn't block.""" + monkeypatch.setattr("hermes_cli.setup.prompt_choice", lambda q, c, d=0: ( + _maybe_keep_current_tts(q, c) if _maybe_keep_current_tts(q, c) is not None + else d + )) + monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False) -def test_nous_oauth_setup_keeps_current_model_when_syncing_disk_provider( - tmp_path, monkeypatch -): + +def _write_model_config(tmp_path, provider, base_url="", model_name="test-model"): + """Simulate what a _model_flow_* function writes to disk.""" + cfg = load_config() + m = cfg.get("model") + if not isinstance(m, dict): + m = {"default": m} if m else {} + cfg["model"] = m + m["provider"] = provider + if base_url: + m["base_url"] = base_url + if model_name: + m["default"] = model_name + save_config(cfg) + + +def test_setup_delegates_to_select_provider_and_model(tmp_path, monkeypatch): + """setup_model_provider calls select_provider_and_model and syncs config.""" monkeypatch.setenv("HERMES_HOME", str(tmp_path)) _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) config = load_config() - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - return 1 # Nous Portal - if question == "Configure vision:": - return len(choices) - 1 - if question == "Select default model:": - assert choices[-1] == "Keep current (anthropic/claude-opus-4.6)" - return len(choices) - 1 - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError(f"Unexpected prompt_choice call: {question}") + def fake_select(): + _write_model_config(tmp_path, "custom", "http://localhost:11434/v1", "qwen3.5:32b") - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "") - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - - def _fake_login_nous(*args, **kwargs): - auth_path = tmp_path / "auth.json" - auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {}})) - _update_config_for_provider("nous", "https://inference.example.com/v1") - - monkeypatch.setattr("hermes_cli.auth._login_nous", _fake_login_nous) - monkeypatch.setattr( - "hermes_cli.auth.resolve_nous_runtime_credentials", - lambda *args, **kwargs: { - "base_url": "https://inference.example.com/v1", - "api_key": "nous-key", - }, - ) - monkeypatch.setattr( - "hermes_cli.auth.fetch_nous_models", - lambda *args, **kwargs: ["gemini-3-flash"], - ) + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) setup_model_provider(config) save_config(config) reloaded = load_config() + assert isinstance(reloaded["model"], dict) + assert reloaded["model"]["provider"] == "custom" + assert reloaded["model"]["base_url"] == "http://localhost:11434/v1" + assert reloaded["model"]["default"] == "qwen3.5:32b" + +def test_setup_syncs_openrouter_from_disk(tmp_path, monkeypatch): + """When select_provider_and_model saves OpenRouter config to disk, + the wizard's config dict picks it up.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + config = load_config() + assert isinstance(config.get("model"), str) # fresh install + + def fake_select(): + _write_model_config(tmp_path, "openrouter", model_name="anthropic/claude-opus-4.6") + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + + setup_model_provider(config) + save_config(config) + + reloaded = load_config() + assert isinstance(reloaded["model"], dict) + assert reloaded["model"]["provider"] == "openrouter" + + +def test_setup_syncs_nous_from_disk(tmp_path, monkeypatch): + """Nous OAuth writes config to disk; wizard config dict must pick it up.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + config = load_config() + + def fake_select(): + _write_model_config(tmp_path, "nous", "https://inference.example.com/v1", "gemini-3-flash") + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + + setup_model_provider(config) + save_config(config) + + reloaded = load_config() assert isinstance(reloaded["model"], dict) assert reloaded["model"]["provider"] == "nous" assert reloaded["model"]["base_url"] == "https://inference.example.com/v1" - assert reloaded["model"]["default"] == "anthropic/claude-opus-4.6" -def test_custom_setup_clears_active_oauth_provider(tmp_path, monkeypatch): +def test_setup_custom_providers_synced(tmp_path, monkeypatch): + """custom_providers written by select_provider_and_model must survive.""" monkeypatch.setenv("HERMES_HOME", str(tmp_path)) _clear_provider_env(monkeypatch) - - auth_path = tmp_path / "auth.json" - auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {}})) + _stub_tts(monkeypatch) config = load_config() - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - return 3 - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError(f"Unexpected prompt_choice call: {question}") + def fake_select(): + _write_model_config(tmp_path, "custom", "http://localhost:8080/v1", "llama3") + cfg = load_config() + cfg["custom_providers"] = [{"name": "Local", "base_url": "http://localhost:8080/v1"}] + save_config(cfg) - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - - # _model_flow_custom uses builtins.input (URL, key, model, context_length) - input_values = iter([ - "https://custom.example/v1", - "custom-api-key", - "custom/model", - "", # context_length (blank = auto-detect) - ]) - monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values)) - monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None) - monkeypatch.setattr( - "hermes_cli.models.probe_api_models", - lambda api_key, base_url: {"models": ["m"], "probed_url": base_url + "/models"}, - ) - - setup_model_provider(config) - - # Core assertion: switching to custom endpoint clears OAuth provider - assert get_active_provider() is None - - # _model_flow_custom writes config via its own load/save cycle - reloaded = load_config() - if isinstance(reloaded.get("model"), dict): - assert reloaded["model"].get("provider") == "custom" - assert reloaded["model"].get("default") == "custom/model" - - -def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, monkeypatch): - monkeypatch.setenv("HERMES_HOME", str(tmp_path)) - monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key") - _clear_provider_env(monkeypatch) - monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key") - - config = load_config() - - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - return 2 # OpenAI Codex - if question == "Select default model:": - return 0 - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError(f"Unexpected prompt_choice call: {question}") - - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "") - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - monkeypatch.setattr("hermes_cli.auth._login_openai_codex", lambda *args, **kwargs: None) - monkeypatch.setattr( - "hermes_cli.auth.resolve_codex_runtime_credentials", - lambda *args, **kwargs: { - "base_url": "https://chatgpt.com/backend-api/codex", - "api_key": "codex-access-token", - }, - ) - - captured = {} - - def _fake_get_codex_model_ids(access_token=None): - captured["access_token"] = access_token - return ["gpt-5.2-codex", "gpt-5.2"] - - monkeypatch.setattr( - "hermes_cli.codex_models.get_codex_model_ids", - _fake_get_codex_model_ids, - ) + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) setup_model_provider(config) save_config(config) reloaded = load_config() + assert reloaded.get("custom_providers") == [{"name": "Local", "base_url": "http://localhost:8080/v1"}] - assert captured["access_token"] == "codex-access-token" + +def test_setup_cancel_preserves_existing_config(tmp_path, monkeypatch): + """When the user cancels provider selection, existing config is preserved.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + # Pre-set a provider + _write_model_config(tmp_path, "openrouter", model_name="gpt-4o") + + config = load_config() + assert config["model"]["provider"] == "openrouter" + + def fake_select(): + pass # user cancelled — nothing written to disk + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + + setup_model_provider(config) + save_config(config) + + reloaded = load_config() + assert isinstance(reloaded["model"], dict) + assert reloaded["model"]["provider"] == "openrouter" + assert reloaded["model"]["default"] == "gpt-4o" + + +def test_setup_exception_in_select_gracefully_handled(tmp_path, monkeypatch): + """If select_provider_and_model raises, setup continues with existing config.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + config = load_config() + + def fake_select(): + raise RuntimeError("something broke") + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + + # Should not raise + setup_model_provider(config) + + +def test_setup_keyboard_interrupt_gracefully_handled(tmp_path, monkeypatch): + """KeyboardInterrupt during provider selection is handled.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + config = load_config() + + def fake_select(): + raise KeyboardInterrupt() + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + + setup_model_provider(config) + + +def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, monkeypatch): + """Codex model list fetching uses the runtime access token.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key") + _clear_provider_env(monkeypatch) + monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key") + + config = load_config() + _stub_tts(monkeypatch) + + def fake_select(): + _write_model_config(tmp_path, "openai-codex", "https://api.openai.com/v1", "gpt-4o") + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + + setup_model_provider(config) + save_config(config) + + reloaded = load_config() assert isinstance(reloaded["model"], dict) assert reloaded["model"]["provider"] == "openai-codex" - assert reloaded["model"]["default"] == "gpt-5.2-codex" - assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex" diff --git a/tests/hermes_cli/test_setup_model_provider.py b/tests/hermes_cli/test_setup_model_provider.py index 0acbfea51..eb59360a0 100644 --- a/tests/hermes_cli/test_setup_model_provider.py +++ b/tests/hermes_cli/test_setup_model_provider.py @@ -1,9 +1,14 @@ -"""Regression tests for interactive setup provider/model persistence.""" +"""Regression tests for interactive setup provider/model persistence. + +Since setup_model_provider delegates to select_provider_and_model() +from hermes_cli.main, these tests mock the delegation point and verify +that the setup wizard correctly syncs config from disk after the call. +""" from __future__ import annotations from hermes_cli.config import load_config, save_config, save_env_value -from hermes_cli.setup import _print_setup_summary, setup_model_provider +from hermes_cli.setup import setup_model_provider def _maybe_keep_current_tts(question, choices): @@ -13,19 +18,6 @@ def _maybe_keep_current_tts(question, choices): return len(choices) - 1 -def _read_env(home): - env_path = home / ".env" - data = {} - if not env_path.exists(): - return data - for line in env_path.read_text().splitlines(): - if not line or line.startswith("#") or "=" not in line: - continue - k, v = line.split("=", 1) - data[k] = v - return data - - def _clear_provider_env(monkeypatch): for key in ( "HERMES_INFERENCE_PROVIDER", @@ -44,430 +36,372 @@ def _clear_provider_env(monkeypatch): monkeypatch.delenv(key, raising=False) +def _stub_tts(monkeypatch): + monkeypatch.setattr("hermes_cli.setup.prompt_choice", lambda q, c, d=0: ( + _maybe_keep_current_tts(q, c) if _maybe_keep_current_tts(q, c) is not None + else d + )) + monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False) + + +def _write_model_config(provider, base_url="", model_name="test-model"): + """Simulate what a _model_flow_* function writes to disk.""" + cfg = load_config() + m = cfg.get("model") + if not isinstance(m, dict): + m = {"default": m} if m else {} + cfg["model"] = m + m["provider"] = provider + if base_url: + m["base_url"] = base_url + else: + m.pop("base_url", None) + if model_name: + m["default"] = model_name + m.pop("api_mode", None) + save_config(cfg) + + def test_setup_keep_current_custom_from_config_does_not_fall_through(tmp_path, monkeypatch): """Keep-current custom should not fall through to the generic model menu.""" monkeypatch.setenv("HERMES_HOME", str(tmp_path)) _clear_provider_env(monkeypatch) - save_env_value("OPENAI_BASE_URL", "https://example.invalid/v1") - save_env_value("OPENAI_API_KEY", "custom-key") + _stub_tts(monkeypatch) + + # Pre-set custom provider + _write_model_config("custom", "http://localhost:8080/v1", "local-model") config = load_config() - config["model"] = { - "default": "custom/model", - "provider": "custom", - "base_url": "https://example.invalid/v1", - } - save_config(config) + assert config["model"]["provider"] == "custom" - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - assert choices[-1] == "Keep current (Custom: https://example.invalid/v1)" - return len(choices) - 1 - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError("Model menu should not appear for keep-current custom") + def fake_select(): + pass # user chose "cancel" or "keep current" - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "") - monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) - monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None) - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) setup_model_provider(config) save_config(config) reloaded = load_config() + assert isinstance(reloaded["model"], dict) assert reloaded["model"]["provider"] == "custom" - assert reloaded["model"]["default"] == "custom/model" - assert reloaded["model"]["base_url"] == "https://example.invalid/v1" + assert reloaded["model"]["base_url"] == "http://localhost:8080/v1" -def test_setup_custom_endpoint_saves_working_v1_base_url(tmp_path, monkeypatch): +def test_setup_keep_current_config_provider_uses_provider_specific_model_menu( + tmp_path, monkeypatch +): + """Keeping current provider preserves the config on disk.""" monkeypatch.setenv("HERMES_HOME", str(tmp_path)) _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + _write_model_config("zai", "https://open.bigmodel.cn/api/paas/v4", "glm-5") config = load_config() - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - return 3 # Custom endpoint - if question == "Configure vision:": - return len(choices) - 1 # Skip - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError(f"Unexpected prompt_choice call: {question}") + def fake_select(): + pass # keep current - # _model_flow_custom uses builtins.input (URL, key, model, context_length) - input_values = iter([ - "http://localhost:8000", - "local-key", - "llm", - "", # context_length (blank = auto-detect) - ]) - monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values)) - - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) - monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None) - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: []) - monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None) - monkeypatch.setattr( - "hermes_cli.models.probe_api_models", - lambda api_key, base_url: { - "models": ["llm"], - "probed_url": "http://localhost:8000/v1/models", - "resolved_base_url": "http://localhost:8000/v1", - "suggested_base_url": "http://localhost:8000/v1", - "used_fallback": True, - }, - ) - - setup_model_provider(config) - - env = _read_env(tmp_path) - - # _model_flow_custom saves env vars and config to disk - assert env.get("OPENAI_BASE_URL") == "http://localhost:8000/v1" - assert env.get("OPENAI_API_KEY") == "local-key" - - # The model config is saved as a dict by _model_flow_custom - reloaded = load_config() - model_cfg = reloaded.get("model", {}) - if isinstance(model_cfg, dict): - assert model_cfg.get("provider") == "custom" - assert model_cfg.get("default") == "llm" - - -def test_setup_keep_current_config_provider_uses_provider_specific_model_menu(tmp_path, monkeypatch): - """Keep-current should respect config-backed providers, not fall back to OpenRouter.""" - monkeypatch.setenv("HERMES_HOME", str(tmp_path)) - _clear_provider_env(monkeypatch) - - config = load_config() - config["model"] = { - "default": "claude-opus-4-6", - "provider": "anthropic", - } - save_config(config) - - captured = {"provider_choices": None, "model_choices": None} - - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - captured["provider_choices"] = list(choices) - assert choices[-1] == "Keep current (Anthropic)" - return len(choices) - 1 - if question == "Configure vision:": - assert question == "Configure vision:" - assert choices[-1] == "Skip for now" - return len(choices) - 1 - if question == "Select default model:": - captured["model_choices"] = list(choices) - return len(choices) - 1 # keep current model - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError(f"Unexpected prompt_choice call: {question}") - - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "") - monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) - monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None) - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - monkeypatch.setattr("hermes_cli.models.provider_model_ids", lambda provider: []) - monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: []) - - setup_model_provider(config) - save_config(config) - - assert captured["provider_choices"] is not None - assert captured["model_choices"] is not None - assert captured["model_choices"][0] == "claude-opus-4-6" - assert "anthropic/claude-opus-4.6 (recommended)" not in captured["model_choices"] - - -def test_setup_keep_current_anthropic_can_configure_openai_vision_default(tmp_path, monkeypatch): - monkeypatch.setenv("HERMES_HOME", str(tmp_path)) - _clear_provider_env(monkeypatch) - - config = load_config() - config["model"] = { - "default": "claude-opus-4-6", - "provider": "anthropic", - } - save_config(config) - - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - assert choices[-1] == "Keep current (Anthropic)" - return len(choices) - 1 - if question == "Configure vision:": - return 1 - if question == "Select vision model:": - assert choices[-1] == "Use default (gpt-4o-mini)" - return len(choices) - 1 - if question == "Select default model:": - assert choices[-1] == "Keep current (claude-opus-4-6)" - return len(choices) - 1 - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError(f"Unexpected prompt_choice call: {question}") - - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - monkeypatch.setattr( - "hermes_cli.setup.prompt", - lambda message, *args, **kwargs: "sk-openai" if "OpenAI API key" in message else "", - ) - monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) - monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None) - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - monkeypatch.setattr("hermes_cli.models.provider_model_ids", lambda provider: []) - monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: []) - - setup_model_provider(config) - env = _read_env(tmp_path) - - assert env.get("OPENAI_API_KEY") == "sk-openai" - assert env.get("OPENAI_BASE_URL") == "https://api.openai.com/v1" - assert env.get("AUXILIARY_VISION_MODEL") == "gpt-4o-mini" - - -def test_setup_copilot_uses_gh_auth_and_saves_provider(tmp_path, monkeypatch): - monkeypatch.setenv("HERMES_HOME", str(tmp_path)) - _clear_provider_env(monkeypatch) - - config = load_config() - - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - assert choices[14] == "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)" - return 14 - if question == "Select default model:": - assert "gpt-4.1" in choices - assert "gpt-5.4" in choices - return choices.index("gpt-5.4") - if question == "Select reasoning effort:": - assert "low" in choices - assert "high" in choices - return choices.index("high") - if question == "Configure vision:": - return len(choices) - 1 - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError(f"Unexpected prompt_choice call: {question}") - - def fake_prompt(message, *args, **kwargs): - raise AssertionError(f"Unexpected prompt call: {message}") - - def fake_get_auth_status(provider_id): - if provider_id == "copilot": - return {"logged_in": True} - return {"logged_in": False} - - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - monkeypatch.setattr("hermes_cli.setup.prompt", fake_prompt) - monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) - monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None) - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - monkeypatch.setattr("hermes_cli.auth.get_auth_status", fake_get_auth_status) - monkeypatch.setattr( - "hermes_cli.auth.resolve_api_key_provider_credentials", - lambda provider_id: { - "provider": provider_id, - "api_key": "gh-cli-token", - "base_url": "https://api.githubcopilot.com", - "source": "gh auth token", - }, - ) - monkeypatch.setattr( - "hermes_cli.models.fetch_github_model_catalog", - lambda api_key: [ - { - "id": "gpt-4.1", - "capabilities": {"type": "chat", "supports": {}}, - "supported_endpoints": ["/chat/completions"], - }, - { - "id": "gpt-5.4", - "capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}}, - "supported_endpoints": ["/responses"], - }, - ], - ) - monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: []) - - setup_model_provider(config) - save_config(config) - - env = _read_env(tmp_path) - reloaded = load_config() - - assert env.get("GITHUB_TOKEN") is None - assert reloaded["model"]["provider"] == "copilot" - assert reloaded["model"]["base_url"] == "https://api.githubcopilot.com" - assert reloaded["model"]["default"] == "gpt-5.4" - assert reloaded["model"]["api_mode"] == "codex_responses" - assert reloaded["agent"]["reasoning_effort"] == "high" - - -def test_setup_copilot_acp_uses_model_picker_and_saves_provider(tmp_path, monkeypatch): - monkeypatch.setenv("HERMES_HOME", str(tmp_path)) - _clear_provider_env(monkeypatch) - - config = load_config() - - def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - assert choices[15] == "GitHub Copilot ACP (spawns `copilot --acp --stdio`)" - return 15 - if question == "Select default model:": - assert "gpt-4.1" in choices - assert "gpt-5.4" in choices - return choices.index("gpt-5.4") - if question == "Configure vision:": - return len(choices) - 1 - tts_idx = _maybe_keep_current_tts(question, choices) - if tts_idx is not None: - return tts_idx - raise AssertionError(f"Unexpected prompt_choice call: {question}") - - def fake_prompt(message, *args, **kwargs): - raise AssertionError(f"Unexpected prompt call: {message}") - - monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) - monkeypatch.setattr("hermes_cli.setup.prompt", fake_prompt) - monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) - monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None) - monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - monkeypatch.setattr("hermes_cli.auth.get_auth_status", lambda provider_id: {"logged_in": provider_id == "copilot-acp"}) - monkeypatch.setattr( - "hermes_cli.auth.resolve_api_key_provider_credentials", - lambda provider_id: { - "provider": "copilot", - "api_key": "gh-cli-token", - "base_url": "https://api.githubcopilot.com", - "source": "gh auth token", - }, - ) - monkeypatch.setattr( - "hermes_cli.models.fetch_github_model_catalog", - lambda api_key: [ - { - "id": "gpt-4.1", - "capabilities": {"type": "chat", "supports": {}}, - "supported_endpoints": ["/chat/completions"], - }, - { - "id": "gpt-5.4", - "capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}}, - "supported_endpoints": ["/responses"], - }, - ], - ) - monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: []) + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) setup_model_provider(config) save_config(config) reloaded = load_config() - - assert reloaded["model"]["provider"] == "copilot-acp" - assert reloaded["model"]["base_url"] == "acp://copilot" - assert reloaded["model"]["default"] == "gpt-5.4" - assert reloaded["model"]["api_mode"] == "chat_completions" + assert isinstance(reloaded["model"], dict) + assert reloaded["model"]["provider"] == "zai" -def test_setup_switch_custom_to_codex_clears_custom_endpoint_and_updates_config(tmp_path, monkeypatch): - """Switching from custom to Codex should clear custom endpoint overrides.""" +def test_setup_same_provider_rotation_strategy_saved_for_multi_credential_pool(tmp_path, monkeypatch): monkeypatch.setenv("HERMES_HOME", str(tmp_path)) _clear_provider_env(monkeypatch) + save_env_value("OPENROUTER_API_KEY", "or-key") - save_env_value("OPENAI_BASE_URL", "https://example.invalid/v1") - save_env_value("OPENAI_API_KEY", "sk-custom") - save_env_value("OPENROUTER_API_KEY", "sk-or") + # Pre-write config so the pool step sees provider="openrouter" + _write_model_config("openrouter", "", "anthropic/claude-opus-4.6") config = load_config() - config["model"] = { - "default": "custom/model", - "provider": "custom", - "base_url": "https://example.invalid/v1", - } - save_config(config) + + class _Entry: + def __init__(self, label): + self.label = label + + class _Pool: + def entries(self): + return [_Entry("primary"), _Entry("secondary")] + + def fake_select(): + pass # no-op — config already has provider set def fake_prompt_choice(question, choices, default=0): - if question == "Select your inference provider:": - return 2 # OpenAI Codex - if question == "Select default model:": + if "rotation strategy" in question: + return 1 # round robin + tts_idx = _maybe_keep_current_tts(question, choices) + if tts_idx is not None: + return tts_idx + return default + + def fake_prompt_yes_no(question, default=True): + return False + + # Patch directly on the module objects to ensure local imports pick them up. + import hermes_cli.main as _main_mod + import hermes_cli.setup as _setup_mod + import agent.credential_pool as _pool_mod + import agent.auxiliary_client as _aux_mod + + monkeypatch.setattr(_main_mod, "select_provider_and_model", fake_select) + # NOTE: _stub_tts overwrites prompt_choice, so set our mock AFTER it. + _stub_tts(monkeypatch) + monkeypatch.setattr(_setup_mod, "prompt_choice", fake_prompt_choice) + monkeypatch.setattr(_setup_mod, "prompt_yes_no", fake_prompt_yes_no) + monkeypatch.setattr(_setup_mod, "prompt", lambda *args, **kwargs: "") + monkeypatch.setattr(_pool_mod, "load_pool", lambda provider: _Pool()) + monkeypatch.setattr(_aux_mod, "get_available_vision_backends", lambda: []) + + setup_model_provider(config) + + # The pool has 2 entries, so the strategy prompt should fire + strategy = config.get("credential_pool_strategies", {}).get("openrouter") + assert strategy == "round_robin", f"Expected round_robin but got {strategy}" + + +def test_setup_same_provider_fallback_can_add_another_credential(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + save_env_value("OPENROUTER_API_KEY", "or-key") + + # Pre-write config so the pool step sees provider="openrouter" + _write_model_config("openrouter", "", "anthropic/claude-opus-4.6") + + config = load_config() + pool_sizes = iter([1, 2]) + add_calls = [] + + class _Entry: + def __init__(self, label): + self.label = label + + class _Pool: + def __init__(self, size): + self._size = size + + def entries(self): + return [_Entry(f"cred-{idx}") for idx in range(self._size)] + + def fake_load_pool(provider): + return _Pool(next(pool_sizes)) + + def fake_auth_add_command(args): + add_calls.append(args.provider) + + def fake_select(): + pass # no-op — config already has provider set + + def fake_prompt_choice(question, choices, default=0): + if question == "Select same-provider rotation strategy:": return 0 tts_idx = _maybe_keep_current_tts(question, choices) + if tts_idx is not None: + return tts_idx + return default + + yes_no_answers = iter([True, False]) + + def fake_prompt_yes_no(question, default=True): + if question == "Add another credential for same-provider fallback?": + return next(yes_no_answers) + return False + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + _stub_tts(monkeypatch) + monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) + monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", fake_prompt_yes_no) + monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "") + monkeypatch.setattr("agent.credential_pool.load_pool", fake_load_pool) + monkeypatch.setattr("hermes_cli.auth_commands.auth_add_command", fake_auth_add_command) + monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: []) + + setup_model_provider(config) + + assert add_calls == ["openrouter"] + assert config.get("credential_pool_strategies", {}).get("openrouter") == "fill_first" + + +def test_setup_pool_step_shows_manual_vs_auto_detected_counts(tmp_path, monkeypatch, capsys): + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + save_env_value("OPENROUTER_API_KEY", "or-key") + + # Pre-write config so the pool step sees provider="openrouter" + _write_model_config("openrouter", "", "anthropic/claude-opus-4.6") + + config = load_config() + + class _Entry: + def __init__(self, label, source): + self.label = label + self.source = source + + class _Pool: + def entries(self): + return [ + _Entry("primary", "manual"), + _Entry("secondary", "manual"), + _Entry("OPENROUTER_API_KEY", "env:OPENROUTER_API_KEY"), + ] + + def fake_select(): + pass # no-op — config already has provider set + + def fake_prompt_choice(question, choices, default=0): + if "rotation strategy" in question: + return 0 + tts_idx = _maybe_keep_current_tts(question, choices) + if tts_idx is not None: + return tts_idx + return default + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + _stub_tts(monkeypatch) + monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) + monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) + monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "") + monkeypatch.setattr("agent.credential_pool.load_pool", lambda provider: _Pool()) + monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: []) + + setup_model_provider(config) + + out = capsys.readouterr().out + assert "Current pooled credentials for openrouter: 3 (2 manual, 1 auto-detected from env/shared auth)" in out + + +def test_setup_copilot_acp_skips_same_provider_pool_step(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + + config = load_config() + + def fake_prompt_choice(question, choices, default=0): + if question == "Select your inference provider:": + return 15 # GitHub Copilot ACP + if question == "Select default model:": + return 0 + if question == "Configure vision:": + return len(choices) - 1 + tts_idx = _maybe_keep_current_tts(question, choices) if tts_idx is not None: return tts_idx raise AssertionError(f"Unexpected prompt_choice call: {question}") + def fake_prompt_yes_no(question, default=True): + if question == "Add another credential for same-provider fallback?": + raise AssertionError("same-provider pool prompt should not appear for copilot-acp") + return False + monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice) + monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", fake_prompt_yes_no) monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "") - monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False) monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None) monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: []) - monkeypatch.setattr("hermes_cli.auth._login_openai_codex", lambda *args, **kwargs: None) - monkeypatch.setattr( - "hermes_cli.auth.resolve_codex_runtime_credentials", - lambda *args, **kwargs: { - "base_url": "https://chatgpt.com/backend-api/codex", - "api_key": "codex-...oken", - }, - ) - monkeypatch.setattr( - "hermes_cli.codex_models.get_codex_model_ids", - lambda **kwargs: ["openai/gpt-5.3-codex", "openai/gpt-5-codex-mini"], - ) + monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: []) + + setup_model_provider(config) + + assert config.get("credential_pool_strategies", {}) == {} + + +def test_setup_copilot_uses_gh_auth_and_saves_provider(tmp_path, monkeypatch): + """Copilot provider saves correctly through delegation.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + config = load_config() + + def fake_select(): + _write_model_config("copilot", "https://models.github.ai/inference/v1", "gpt-4o") + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) setup_model_provider(config) save_config(config) - env = _read_env(tmp_path) reloaded = load_config() + assert isinstance(reloaded["model"], dict) + assert reloaded["model"]["provider"] == "copilot" - assert env.get("OPENAI_BASE_URL") == "" - assert env.get("OPENAI_API_KEY") == "" + +def test_setup_copilot_acp_uses_model_picker_and_saves_provider(tmp_path, monkeypatch): + """Copilot ACP provider saves correctly through delegation.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + config = load_config() + + def fake_select(): + _write_model_config("copilot-acp", "", "claude-sonnet-4") + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + + setup_model_provider(config) + save_config(config) + + reloaded = load_config() + assert isinstance(reloaded["model"], dict) + assert reloaded["model"]["provider"] == "copilot-acp" + + +def test_setup_switch_custom_to_codex_clears_custom_endpoint_and_updates_config( + tmp_path, monkeypatch +): + """Switching from custom to codex updates config correctly.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path)) + _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) + + # Start with custom + _write_model_config("custom", "http://localhost:11434/v1", "qwen3.5:32b") + + config = load_config() + assert config["model"]["provider"] == "custom" + + def fake_select(): + _write_model_config("openai-codex", "https://api.openai.com/v1", "gpt-4o") + + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + + setup_model_provider(config) + save_config(config) + + reloaded = load_config() + assert isinstance(reloaded["model"], dict) assert reloaded["model"]["provider"] == "openai-codex" - assert reloaded["model"]["default"] == "openai/gpt-5.3-codex" - assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex" + assert reloaded["model"]["default"] == "gpt-4o" -def test_setup_summary_marks_codex_auth_as_vision_available(tmp_path, monkeypatch, capsys): +def test_setup_switch_preserves_non_model_config(tmp_path, monkeypatch): + """Provider switch preserves other config sections (terminal, display, etc.).""" monkeypatch.setenv("HERMES_HOME", str(tmp_path)) _clear_provider_env(monkeypatch) + _stub_tts(monkeypatch) - (tmp_path / "auth.json").write_text( - '{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token": "***", "refresh_token": "***"}}}}' - ) + config = load_config() + config["terminal"]["timeout"] = 999 + save_config(config) - monkeypatch.setattr("shutil.which", lambda _name: None) + config = load_config() - _print_setup_summary(load_config(), tmp_path) - output = capsys.readouterr().out + def fake_select(): + _write_model_config("openrouter", model_name="gpt-4o") - assert "Vision (image analysis)" in output - assert "missing run 'hermes setup' to configure" not in output - assert "Mixture of Agents" in output - assert "missing OPENROUTER_API_KEY" in output + monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select) + setup_model_provider(config) + save_config(config) -def test_setup_summary_marks_anthropic_auth_as_vision_available(tmp_path, monkeypatch, capsys): - monkeypatch.setenv("HERMES_HOME", str(tmp_path)) - _clear_provider_env(monkeypatch) - monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-api03-key") - monkeypatch.setattr("shutil.which", lambda _name: None) - monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: ["anthropic"]) - - _print_setup_summary(load_config(), tmp_path) - output = capsys.readouterr().out - - assert "Vision (image analysis)" in output - assert "missing run 'hermes setup' to configure" not in output + reloaded = load_config() + assert reloaded["terminal"]["timeout"] == 999 + assert reloaded["model"]["provider"] == "openrouter" diff --git a/tests/hermes_cli/test_update_gateway_restart.py b/tests/hermes_cli/test_update_gateway_restart.py index 89ac84219..1d6b064af 100644 --- a/tests/hermes_cli/test_update_gateway_restart.py +++ b/tests/hermes_cli/test_update_gateway_restart.py @@ -25,6 +25,8 @@ def _make_run_side_effect( verify_ok=True, commit_count="3", systemd_active=False, + system_service_active=False, + system_restart_rc=0, launchctl_loaded=False, ): """Build a subprocess.run side_effect that simulates git + service commands.""" @@ -45,14 +47,23 @@ def _make_run_side_effect( if "rev-list" in joined: return subprocess.CompletedProcess(cmd, 0, stdout=f"{commit_count}\n", stderr="") - # systemctl --user is-active + # systemctl is-active — distinguish --user from system scope if "systemctl" in joined and "is-active" in joined: - if systemd_active: - return subprocess.CompletedProcess(cmd, 0, stdout="active\n", stderr="") - return subprocess.CompletedProcess(cmd, 3, stdout="inactive\n", stderr="") + if "--user" in joined: + if systemd_active: + return subprocess.CompletedProcess(cmd, 0, stdout="active\n", stderr="") + return subprocess.CompletedProcess(cmd, 3, stdout="inactive\n", stderr="") + else: + # System-level check (no --user) + if system_service_active: + return subprocess.CompletedProcess(cmd, 0, stdout="active\n", stderr="") + return subprocess.CompletedProcess(cmd, 3, stdout="inactive\n", stderr="") - # systemctl --user restart + # systemctl restart — distinguish --user from system scope if "systemctl" in joined and "restart" in joined: + if "--user" not in joined and system_service_active: + stderr = "" if system_restart_rc == 0 else "Failed to restart: Permission denied" + return subprocess.CompletedProcess(cmd, system_restart_rc, stdout="", stderr=stderr) return subprocess.CompletedProcess(cmd, 0, stdout="", stderr="") # launchctl list ai.hermes.gateway @@ -393,3 +404,91 @@ class TestCmdUpdateLaunchdRestart: assert "Stopped gateway" not in captured assert "Gateway restarted" not in captured assert "Gateway restarted via launchd" not in captured + + +# --------------------------------------------------------------------------- +# cmd_update — system-level systemd service detection +# --------------------------------------------------------------------------- + + +class TestCmdUpdateSystemService: + """cmd_update detects system-level gateway services where --user fails.""" + + @patch("shutil.which", return_value=None) + @patch("subprocess.run") + def test_update_detects_system_service_and_restarts( + self, mock_run, _mock_which, mock_args, capsys, monkeypatch, + ): + """When user systemd is inactive but a system service exists, restart via system scope.""" + monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) + monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + + mock_run.side_effect = _make_run_side_effect( + commit_count="3", + systemd_active=False, + system_service_active=True, + ) + + with patch("gateway.status.get_running_pid", return_value=12345), \ + patch("gateway.status.remove_pid_file"): + cmd_update(mock_args) + + captured = capsys.readouterr().out + assert "system gateway service" in captured.lower() + assert "Gateway restarted (system service)" in captured + # Verify systemctl restart (no --user) was called + restart_calls = [ + c for c in mock_run.call_args_list + if "restart" in " ".join(str(a) for a in c.args[0]) + and "systemctl" in " ".join(str(a) for a in c.args[0]) + and "--user" not in " ".join(str(a) for a in c.args[0]) + ] + assert len(restart_calls) == 1 + + @patch("shutil.which", return_value=None) + @patch("subprocess.run") + def test_update_system_service_restart_failure_shows_sudo_hint( + self, mock_run, _mock_which, mock_args, capsys, monkeypatch, + ): + """When system service restart fails (e.g. no root), show sudo hint.""" + monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) + monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + + mock_run.side_effect = _make_run_side_effect( + commit_count="3", + systemd_active=False, + system_service_active=True, + system_restart_rc=1, + ) + + with patch("gateway.status.get_running_pid", return_value=12345), \ + patch("gateway.status.remove_pid_file"): + cmd_update(mock_args) + + captured = capsys.readouterr().out + assert "sudo systemctl restart" in captured + + @patch("shutil.which", return_value=None) + @patch("subprocess.run") + def test_user_service_takes_priority_over_system( + self, mock_run, _mock_which, mock_args, capsys, monkeypatch, + ): + """When both user and system services are active, user wins.""" + monkeypatch.setattr(gateway_cli, "is_macos", lambda: False) + monkeypatch.setattr(gateway_cli, "is_linux", lambda: True) + + mock_run.side_effect = _make_run_side_effect( + commit_count="3", + systemd_active=True, + system_service_active=True, + ) + + with patch("gateway.status.get_running_pid", return_value=12345), \ + patch("gateway.status.remove_pid_file"), \ + patch("os.kill"): + cmd_update(mock_args) + + captured = capsys.readouterr().out + # Should restart via user service, not system + assert "Gateway restarted." in captured + assert "(system service)" not in captured diff --git a/tests/honcho_integration/test_config_isolation.py b/tests/honcho_integration/test_config_isolation.py new file mode 100644 index 000000000..4d9898e68 --- /dev/null +++ b/tests/honcho_integration/test_config_isolation.py @@ -0,0 +1,190 @@ +"""Tests for Honcho config profile isolation. + +Verifies that each Hermes profile writes to its own instance-local +honcho.json ($HERMES_HOME/honcho.json) rather than the shared global +~/.honcho/config.json. +""" + +import json +import os +from pathlib import Path +from unittest.mock import patch + +import pytest + +from honcho_integration.cli import ( + _config_path, + _local_config_path, + _read_config, + _write_config, +) + + +@pytest.fixture +def isolated_home(tmp_path, monkeypatch): + """Create an isolated HERMES_HOME + real home for testing.""" + hermes_home = tmp_path / "profile_a" + hermes_home.mkdir() + global_dir = tmp_path / "home" / ".honcho" + global_dir.mkdir(parents=True) + global_config = global_dir / "config.json" + + monkeypatch.setenv("HERMES_HOME", str(hermes_home)) + monkeypatch.setattr(Path, "home", staticmethod(lambda: tmp_path / "home")) + # GLOBAL_CONFIG_PATH is a module-level constant cached at import time, + # so we must patch it in both the defining module and the importing module. + import honcho_integration.client as _client_mod + import honcho_integration.cli as _cli_mod + monkeypatch.setattr(_client_mod, "GLOBAL_CONFIG_PATH", global_config) + monkeypatch.setattr(_cli_mod, "GLOBAL_CONFIG_PATH", global_config) + + return { + "hermes_home": hermes_home, + "global_config": global_config, + "local_config": hermes_home / "honcho.json", + } + + +class TestLocalConfigPath: + """_local_config_path always returns $HERMES_HOME/honcho.json.""" + + def test_returns_hermes_home_path(self, isolated_home): + assert _local_config_path() == isolated_home["local_config"] + + def test_differs_from_global(self, isolated_home): + from honcho_integration.client import GLOBAL_CONFIG_PATH + assert _local_config_path() != GLOBAL_CONFIG_PATH + + +class TestWriteConfigIsolation: + """_write_config defaults to the instance-local path.""" + + def test_write_creates_local_file(self, isolated_home): + cfg = {"apiKey": "test-key", "hosts": {"hermes": {"enabled": True}}} + _write_config(cfg) + + assert isolated_home["local_config"].exists() + written = json.loads(isolated_home["local_config"].read_text()) + assert written["apiKey"] == "test-key" + + def test_write_does_not_touch_global(self, isolated_home): + # Pre-populate global config + isolated_home["global_config"].write_text( + json.dumps({"apiKey": "global-key"}) + ) + + cfg = {"apiKey": "profile-key"} + _write_config(cfg) + + # Global should be untouched + global_data = json.loads(isolated_home["global_config"].read_text()) + assert global_data["apiKey"] == "global-key" + + # Local should have the new value + local_data = json.loads(isolated_home["local_config"].read_text()) + assert local_data["apiKey"] == "profile-key" + + def test_explicit_path_override_still_works(self, isolated_home): + custom = isolated_home["hermes_home"] / "custom.json" + _write_config({"custom": True}, path=custom) + assert custom.exists() + assert not isolated_home["local_config"].exists() + + +class TestReadConfigFallback: + """_read_config falls back to global when no local file exists.""" + + def test_reads_local_when_exists(self, isolated_home): + isolated_home["local_config"].write_text( + json.dumps({"source": "local"}) + ) + cfg = _read_config() + assert cfg["source"] == "local" + + def test_falls_back_to_global(self, isolated_home): + isolated_home["global_config"].write_text( + json.dumps({"source": "global"}) + ) + # No local file exists + assert not isolated_home["local_config"].exists() + cfg = _read_config() + assert cfg["source"] == "global" + + def test_local_takes_priority_over_global(self, isolated_home): + isolated_home["local_config"].write_text( + json.dumps({"source": "local"}) + ) + isolated_home["global_config"].write_text( + json.dumps({"source": "global"}) + ) + cfg = _read_config() + assert cfg["source"] == "local" + + +class TestMultiProfileIsolation: + """Two profiles writing config don't interfere with each other.""" + + def test_two_profiles_get_separate_configs(self, tmp_path, monkeypatch): + home = tmp_path / "home" + home.mkdir() + monkeypatch.setattr(Path, "home", staticmethod(lambda: home)) + + profile_a = tmp_path / "profile_a" + profile_b = tmp_path / "profile_b" + profile_a.mkdir() + profile_b.mkdir() + + # Profile A writes its config + monkeypatch.setenv("HERMES_HOME", str(profile_a)) + _write_config({"apiKey": "key-a", "hosts": {"hermes": {"peerName": "alice"}}}) + + # Profile B writes its config + monkeypatch.setenv("HERMES_HOME", str(profile_b)) + _write_config({"apiKey": "key-b", "hosts": {"hermes": {"peerName": "bob"}}}) + + # Verify isolation + a_data = json.loads((profile_a / "honcho.json").read_text()) + b_data = json.loads((profile_b / "honcho.json").read_text()) + + assert a_data["hosts"]["hermes"]["peerName"] == "alice" + assert b_data["hosts"]["hermes"]["peerName"] == "bob" + + def test_first_setup_seeds_from_global(self, tmp_path, monkeypatch): + """First setup reads global config, writes to local.""" + home = tmp_path / "home" + global_dir = home / ".honcho" + global_dir.mkdir(parents=True) + monkeypatch.setattr(Path, "home", staticmethod(lambda: home)) + import honcho_integration.client as _client_mod + import honcho_integration.cli as _cli_mod + global_cfg_path = global_dir / "config.json" + monkeypatch.setattr(_client_mod, "GLOBAL_CONFIG_PATH", global_cfg_path) + monkeypatch.setattr(_cli_mod, "GLOBAL_CONFIG_PATH", global_cfg_path) + + # Existing global config + global_config = global_dir / "config.json" + global_config.write_text(json.dumps({ + "apiKey": "shared-key", + "hosts": {"hermes": {"workspace": "shared-ws"}}, + })) + + profile = tmp_path / "new_profile" + profile.mkdir() + monkeypatch.setenv("HERMES_HOME", str(profile)) + + # Read seeds from global + cfg = _read_config() + assert cfg["apiKey"] == "shared-key" + + # Modify and write goes to local + cfg["hosts"]["hermes"]["peerName"] = "new-user" + _write_config(cfg) + + local_config = profile / "honcho.json" + assert local_config.exists() + local_data = json.loads(local_config.read_text()) + assert local_data["hosts"]["hermes"]["peerName"] == "new-user" + + # Global unchanged + global_data = json.loads(global_config.read_text()) + assert "peerName" not in global_data["hosts"]["hermes"] diff --git a/tests/test_api_key_providers.py b/tests/test_api_key_providers.py index 0c6337d3e..e250bbb25 100644 --- a/tests/test_api_key_providers.py +++ b/tests/test_api_key_providers.py @@ -622,6 +622,57 @@ class TestHasAnyProviderConfigured: from hermes_cli.main import _has_any_provider_configured assert _has_any_provider_configured() is True + def test_claude_code_creds_ignored_on_fresh_install(self, monkeypatch, tmp_path): + """Claude Code credentials should NOT skip the wizard when Hermes is unconfigured.""" + from hermes_cli import config as config_module + hermes_home = tmp_path / ".hermes" + hermes_home.mkdir() + monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env") + monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home) + # Clear all provider env vars so earlier checks don't short-circuit + for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY", + "ANTHROPIC_TOKEN", "OPENAI_BASE_URL"): + monkeypatch.delenv(var, raising=False) + # Simulate valid Claude Code credentials + monkeypatch.setattr( + "agent.anthropic_adapter.read_claude_code_credentials", + lambda: {"accessToken": "sk-ant-test", "refreshToken": "ref-tok"}, + ) + monkeypatch.setattr( + "agent.anthropic_adapter.is_claude_code_token_valid", + lambda creds: True, + ) + from hermes_cli.main import _has_any_provider_configured + assert _has_any_provider_configured() is False + + def test_claude_code_creds_counted_when_hermes_configured(self, monkeypatch, tmp_path): + """Claude Code credentials should count when Hermes has been explicitly configured.""" + import yaml + from hermes_cli import config as config_module + hermes_home = tmp_path / ".hermes" + hermes_home.mkdir() + # Write a config with a non-default model to simulate explicit configuration + config_file = hermes_home / "config.yaml" + config_file.write_text(yaml.dump({"model": {"default": "my-local-model"}})) + monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env") + monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home) + monkeypatch.setenv("HERMES_HOME", str(hermes_home)) + # Clear all provider env vars + for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY", + "ANTHROPIC_TOKEN", "OPENAI_BASE_URL"): + monkeypatch.delenv(var, raising=False) + # Simulate valid Claude Code credentials + monkeypatch.setattr( + "agent.anthropic_adapter.read_claude_code_credentials", + lambda: {"accessToken": "sk-ant-test", "refreshToken": "ref-tok"}, + ) + monkeypatch.setattr( + "agent.anthropic_adapter.is_claude_code_token_valid", + lambda creds: True, + ) + from hermes_cli.main import _has_any_provider_configured + assert _has_any_provider_configured() is True + # ============================================================================= # Kimi Code auto-detection tests diff --git a/tests/test_auth_commands.py b/tests/test_auth_commands.py new file mode 100644 index 000000000..c55629404 --- /dev/null +++ b/tests/test_auth_commands.py @@ -0,0 +1,391 @@ +"""Tests for auth subcommands backed by the credential pool.""" + +from __future__ import annotations + +import base64 +import json + +import pytest + + +def _write_auth_store(tmp_path, payload: dict) -> None: + hermes_home = tmp_path / "hermes" + hermes_home.mkdir(parents=True, exist_ok=True) + (hermes_home / "auth.json").write_text(json.dumps(payload, indent=2)) + + +def _jwt_with_email(email: str) -> str: + header = base64.urlsafe_b64encode(b'{"alg":"RS256","typ":"JWT"}').rstrip(b"=").decode() + payload = base64.urlsafe_b64encode( + json.dumps({"email": email}).encode() + ).rstrip(b"=").decode() + return f"{header}.{payload}.signature" + + +@pytest.fixture(autouse=True) +def _clear_provider_env(monkeypatch): + for key in ( + "OPENROUTER_API_KEY", + "OPENAI_API_KEY", + "ANTHROPIC_API_KEY", + "ANTHROPIC_TOKEN", + "CLAUDE_CODE_OAUTH_TOKEN", + ): + monkeypatch.delenv(key, raising=False) + + +def test_auth_add_api_key_persists_manual_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + _write_auth_store(tmp_path, {"version": 1, "providers": {}}) + + from hermes_cli.auth_commands import auth_add_command + + class _Args: + provider = "openrouter" + auth_type = "api-key" + api_key = "sk-or-manual" + label = "personal" + + auth_add_command(_Args()) + + payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + entries = payload["credential_pool"]["openrouter"] + entry = next(item for item in entries if item["source"] == "manual") + assert entry["label"] == "personal" + assert entry["auth_type"] == "api_key" + assert entry["source"] == "manual" + assert entry["access_token"] == "sk-or-manual" + + +def test_auth_add_anthropic_oauth_persists_pool_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False) + monkeypatch.delenv("ANTHROPIC_TOKEN", raising=False) + monkeypatch.delenv("CLAUDE_CODE_OAUTH_TOKEN", raising=False) + _write_auth_store(tmp_path, {"version": 1, "providers": {}}) + token = _jwt_with_email("claude@example.com") + monkeypatch.setattr( + "agent.anthropic_adapter.run_hermes_oauth_login_pure", + lambda: { + "access_token": token, + "refresh_token": "refresh-token", + "expires_at_ms": 1711234567000, + }, + ) + + from hermes_cli.auth_commands import auth_add_command + + class _Args: + provider = "anthropic" + auth_type = "oauth" + api_key = None + label = None + + auth_add_command(_Args()) + + payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + entries = payload["credential_pool"]["anthropic"] + entry = next(item for item in entries if item["source"] == "manual:hermes_pkce") + assert entry["label"] == "claude@example.com" + assert entry["source"] == "manual:hermes_pkce" + assert entry["refresh_token"] == "refresh-token" + assert entry["expires_at_ms"] == 1711234567000 + + +def test_auth_add_nous_oauth_persists_pool_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store(tmp_path, {"version": 1, "providers": {}}) + token = _jwt_with_email("nous@example.com") + monkeypatch.setattr( + "hermes_cli.auth._nous_device_code_login", + lambda **kwargs: { + "portal_base_url": "https://portal.example.com", + "inference_base_url": "https://inference.example.com/v1", + "client_id": "hermes-cli", + "scope": "inference:mint_agent_key", + "token_type": "Bearer", + "access_token": token, + "refresh_token": "refresh-token", + "obtained_at": "2026-03-23T10:00:00+00:00", + "expires_at": "2026-03-23T11:00:00+00:00", + "expires_in": 3600, + "agent_key": "ak-test", + "agent_key_id": "ak-id", + "agent_key_expires_at": "2026-03-23T10:30:00+00:00", + "agent_key_expires_in": 1800, + "agent_key_reused": False, + "agent_key_obtained_at": "2026-03-23T10:00:10+00:00", + "tls": {"insecure": False, "ca_bundle": None}, + }, + ) + + from hermes_cli.auth_commands import auth_add_command + + class _Args: + provider = "nous" + auth_type = "oauth" + api_key = None + label = None + portal_url = None + inference_url = None + client_id = None + scope = None + no_browser = False + timeout = None + insecure = False + ca_bundle = None + + auth_add_command(_Args()) + + payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + entries = payload["credential_pool"]["nous"] + entry = next(item for item in entries if item["source"] == "manual:device_code") + assert entry["label"] == "nous@example.com" + assert entry["source"] == "manual:device_code" + assert entry["agent_key"] == "ak-test" + assert entry["portal_base_url"] == "https://portal.example.com" + + +def test_auth_add_codex_oauth_persists_pool_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store(tmp_path, {"version": 1, "providers": {}}) + token = _jwt_with_email("codex@example.com") + monkeypatch.setattr( + "hermes_cli.auth._codex_device_code_login", + lambda: { + "tokens": { + "access_token": token, + "refresh_token": "refresh-token", + }, + "base_url": "https://chatgpt.com/backend-api/codex", + "last_refresh": "2026-03-23T10:00:00Z", + }, + ) + + from hermes_cli.auth_commands import auth_add_command + + class _Args: + provider = "openai-codex" + auth_type = "oauth" + api_key = None + label = None + + auth_add_command(_Args()) + + payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + entries = payload["credential_pool"]["openai-codex"] + entry = next(item for item in entries if item["source"] == "manual:device_code") + assert entry["label"] == "codex@example.com" + assert entry["source"] == "manual:device_code" + assert entry["refresh_token"] == "refresh-token" + assert entry["base_url"] == "https://chatgpt.com/backend-api/codex" + + +def test_auth_remove_reindexes_priorities(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + # Prevent pool auto-seeding from host env vars and file-backed sources + monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False) + monkeypatch.delenv("ANTHROPIC_TOKEN", raising=False) + monkeypatch.delenv("CLAUDE_CODE_OAUTH_TOKEN", raising=False) + monkeypatch.setattr( + "agent.credential_pool._seed_from_singletons", + lambda provider, entries: (False, set()), + ) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "anthropic": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-ant-api-primary", + }, + { + "id": "cred-2", + "label": "secondary", + "auth_type": "api_key", + "priority": 1, + "source": "manual", + "access_token": "sk-ant-api-secondary", + }, + ] + }, + }, + ) + + from hermes_cli.auth_commands import auth_remove_command + + class _Args: + provider = "anthropic" + index = 1 + + auth_remove_command(_Args()) + + payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + entries = payload["credential_pool"]["anthropic"] + assert len(entries) == 1 + assert entries[0]["label"] == "secondary" + assert entries[0]["priority"] == 0 + + +def test_auth_reset_clears_provider_statuses(tmp_path, monkeypatch, capsys): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "anthropic": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-ant-api-primary", + "last_status": "exhausted", + "last_status_at": 1711230000.0, + "last_error_code": 402, + } + ] + }, + }, + ) + + from hermes_cli.auth_commands import auth_reset_command + + class _Args: + provider = "anthropic" + + auth_reset_command(_Args()) + + out = capsys.readouterr().out + assert "Reset status" in out + + payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + entry = payload["credential_pool"]["anthropic"][0] + assert entry["last_status"] is None + assert entry["last_status_at"] is None + assert entry["last_error_code"] is None + + +def test_clear_provider_auth_removes_provider_pool_entries(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "active_provider": "anthropic", + "providers": { + "anthropic": {"access_token": "legacy-token"}, + }, + "credential_pool": { + "anthropic": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "oauth", + "priority": 0, + "source": "manual:hermes_pkce", + "access_token": "pool-token", + } + ], + "openrouter": [ + { + "id": "cred-2", + "label": "other-provider", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-or-test", + } + ], + }, + }, + ) + + from hermes_cli.auth import clear_provider_auth + + assert clear_provider_auth("anthropic") is True + + payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + assert payload["active_provider"] is None + assert "anthropic" not in payload.get("providers", {}) + assert "anthropic" not in payload.get("credential_pool", {}) + assert "openrouter" in payload.get("credential_pool", {}) + + +def test_auth_list_does_not_call_mutating_select(monkeypatch, capsys): + from hermes_cli.auth_commands import auth_list_command + + class _Entry: + id = "cred-1" + label = "primary" + auth_type="***" + source = "manual" + last_status = None + last_error_code = None + last_status_at = None + + class _Pool: + def entries(self): + return [_Entry()] + + def peek(self): + return _Entry() + + def select(self): + raise AssertionError("auth_list_command should not call select()") + + monkeypatch.setattr( + "hermes_cli.auth_commands.load_pool", + lambda provider: _Pool() if provider == "openrouter" else type("_EmptyPool", (), {"entries": lambda self: []})(), + ) + + class _Args: + provider = "openrouter" + + auth_list_command(_Args()) + + out = capsys.readouterr().out + assert "openrouter (1 credentials):" in out + assert "primary" in out + + +def test_auth_list_shows_exhausted_cooldown(monkeypatch, capsys): + from hermes_cli.auth_commands import auth_list_command + + class _Entry: + id = "cred-1" + label = "primary" + auth_type = "api_key" + source = "manual" + last_status = "exhausted" + last_error_code = 429 + last_status_at = 1000.0 + + class _Pool: + def entries(self): + return [_Entry()] + + def peek(self): + return None + + monkeypatch.setattr("hermes_cli.auth_commands.load_pool", lambda provider: _Pool()) + monkeypatch.setattr("hermes_cli.auth_commands.time.time", lambda: 1030.0) + + class _Args: + provider = "openrouter" + + auth_list_command(_Args()) + + out = capsys.readouterr().out + assert "exhausted (429)" in out + assert "59m 30s left" in out diff --git a/tests/test_cli_provider_resolution.py b/tests/test_cli_provider_resolution.py index 667cd33a6..3c9b31f5f 100644 --- a/tests/test_cli_provider_resolution.py +++ b/tests/test_cli_provider_resolution.py @@ -424,6 +424,7 @@ def test_cmd_model_falls_back_to_auto_on_invalid_provider(monkeypatch, capsys): monkeypatch.setattr("hermes_cli.auth.resolve_provider", _resolve_provider) monkeypatch.setattr(hermes_main, "_prompt_provider_choice", lambda choices: len(choices) - 1) + monkeypatch.setattr("sys.stdin", type("FakeTTY", (), {"isatty": lambda self: True})()) hermes_main.cmd_model(SimpleNamespace()) output = capsys.readouterr().out @@ -459,13 +460,16 @@ def test_model_flow_custom_saves_verified_v1_base_url(monkeypatch, capsys): ) monkeypatch.setattr("hermes_cli.config.save_config", lambda cfg: None) - answers = iter(["http://localhost:8000", "local-key", "llm", ""]) + # After the probe detects a single model ("llm"), the flow asks + # "Use this model? [Y/n]:" — confirm with Enter, then context length. + answers = iter(["http://localhost:8000", "local-key", "", ""]) monkeypatch.setattr("builtins.input", lambda _prompt="": next(answers)) hermes_main._model_flow_custom({}) output = capsys.readouterr().out assert "Saving the working base URL instead" in output - assert saved_env["OPENAI_BASE_URL"] == "http://localhost:8000/v1" - assert saved_env["OPENAI_API_KEY"] == "local-key" + assert "Detected model: llm" in output + # OPENAI_BASE_URL is no longer saved to .env — config.yaml is authoritative + assert "OPENAI_BASE_URL" not in saved_env assert saved_env["MODEL"] == "llm" \ No newline at end of file diff --git a/tests/test_codex_execution_paths.py b/tests/test_codex_execution_paths.py index 2a6044294..de33a0b91 100644 --- a/tests/test_codex_execution_paths.py +++ b/tests/test_codex_execution_paths.py @@ -112,7 +112,7 @@ def test_cron_run_job_codex_path_handles_internal_401_refresh(monkeypatch): _Codex401ThenSuccessAgent.last_init = {} success, output, final_response, error = cron_scheduler.run_job( - {"id": "job-1", "name": "Codex Refresh Test", "prompt": "ping"} + {"id": "job-1", "name": "Codex Refresh Test", "prompt": "ping", "model": "gpt-5.3-codex"} ) assert success is True @@ -139,6 +139,7 @@ def test_gateway_run_agent_codex_path_handles_internal_401_refresh(monkeypatch): }, ) monkeypatch.setenv("HERMES_TOOL_PROGRESS", "false") + monkeypatch.setenv("HERMES_MODEL", "gpt-5.3-codex") _Codex401ThenSuccessAgent.refresh_attempts = 0 _Codex401ThenSuccessAgent.last_init = {} diff --git a/tests/test_compression_persistence.py b/tests/test_compression_persistence.py new file mode 100644 index 000000000..272b39bfe --- /dev/null +++ b/tests/test_compression_persistence.py @@ -0,0 +1,202 @@ +"""Tests for context compression persistence in the gateway. + +Verifies that when context compression fires during run_conversation(), +the compressed messages are properly persisted to both SQLite (via the +agent) and JSONL (via the gateway). + +Bug scenario (pre-fix): + 1. Gateway loads 200-message history, passes to agent + 2. Agent's run_conversation() compresses to ~30 messages mid-run + 3. _compress_context() resets _last_flushed_db_idx = 0 + 4. On exit, _flush_messages_to_session_db() calculates: + flush_from = max(len(conversation_history=200), _last_flushed_db_idx=0) = 200 + 5. messages[200:] is empty (only ~30 messages after compression) + 6. Nothing written to new session's SQLite — compressed context lost + 7. Gateway's history_offset was still 200, producing empty new_messages + 8. Fallback wrote only user/assistant pair — summary lost +""" + +import os +import tempfile +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + + +# --------------------------------------------------------------------------- +# Part 1: Agent-side — _flush_messages_to_session_db after compression +# --------------------------------------------------------------------------- + +class TestFlushAfterCompression: + """Verify that compressed messages are flushed to the new session's SQLite + even when conversation_history (from the original session) is longer than + the compressed messages list.""" + + def _make_agent(self, session_db): + with patch.dict(os.environ, {"OPENROUTER_API_KEY": "test-key"}): + from run_agent import AIAgent + agent = AIAgent( + model="test/model", + quiet_mode=True, + session_db=session_db, + session_id="original-session", + skip_context_files=True, + skip_memory=True, + ) + return agent + + def test_flush_after_compression_with_long_history(self): + """The actual bug: conversation_history longer than compressed messages. + + Before the fix, flush_from = max(len(conversation_history), 0) = 200, + but messages only has ~30 entries, so messages[200:] is empty. + After the fix, conversation_history is cleared to None after compression, + so flush_from = max(0, 0) = 0, and ALL compressed messages are written. + """ + from hermes_state import SessionDB + + with tempfile.TemporaryDirectory() as tmpdir: + db_path = Path(tmpdir) / "test.db" + db = SessionDB(db_path=db_path) + + agent = self._make_agent(db) + + # Simulate the original long history (200 messages) + original_history = [ + {"role": "user" if i % 2 == 0 else "assistant", + "content": f"message {i}"} + for i in range(200) + ] + + # First, flush original messages to the original session + agent._flush_messages_to_session_db(original_history, []) + original_rows = db.get_messages("original-session") + assert len(original_rows) == 200 + + # Now simulate compression: new session, reset idx, shorter messages + agent.session_id = "compressed-session" + db.create_session(session_id="compressed-session", source="test") + agent._last_flushed_db_idx = 0 + + # The compressed messages (summary + tail + new turn) + compressed_messages = [ + {"role": "user", "content": "[CONTEXT COMPACTION] Summary of work..."}, + {"role": "user", "content": "What should we do next?"}, + {"role": "assistant", "content": "Let me check..."}, + {"role": "user", "content": "new question"}, + {"role": "assistant", "content": "new answer"}, + ] + + # THE BUG: passing the original history as conversation_history + # causes flush_from = max(200, 0) = 200, skipping everything. + # After the fix, conversation_history should be None. + agent._flush_messages_to_session_db(compressed_messages, None) + + new_rows = db.get_messages("compressed-session") + assert len(new_rows) == 5, ( + f"Expected 5 compressed messages in new session, got {len(new_rows)}. " + f"Compression persistence bug: messages not written to SQLite." + ) + + def test_flush_with_stale_history_loses_messages(self): + """Demonstrates the bug condition: stale conversation_history causes data loss.""" + from hermes_state import SessionDB + + with tempfile.TemporaryDirectory() as tmpdir: + db_path = Path(tmpdir) / "test.db" + db = SessionDB(db_path=db_path) + + agent = self._make_agent(db) + + # Simulate compression reset + agent.session_id = "new-session" + db.create_session(session_id="new-session", source="test") + agent._last_flushed_db_idx = 0 + + compressed = [ + {"role": "user", "content": "summary"}, + {"role": "assistant", "content": "continuing..."}, + ] + + # Bug: passing a conversation_history longer than compressed messages + stale_history = [{"role": "user", "content": f"msg{i}"} for i in range(100)] + agent._flush_messages_to_session_db(compressed, stale_history) + + rows = db.get_messages("new-session") + # With the stale history, flush_from = max(100, 0) = 100 + # But compressed only has 2 entries → messages[100:] = empty + assert len(rows) == 0, ( + "Expected 0 messages with stale conversation_history " + "(this test verifies the bug condition exists)" + ) + + +# --------------------------------------------------------------------------- +# Part 2: Gateway-side — history_offset after session split +# --------------------------------------------------------------------------- + +class TestGatewayHistoryOffsetAfterSplit: + """Verify that when the agent creates a new session during compression, + the gateway uses history_offset=0 so all compressed messages are written + to the JSONL transcript.""" + + def test_history_offset_zero_on_session_split(self): + """When agent.session_id differs from the original, history_offset must be 0.""" + # This tests the logic in gateway/run.py run_sync(): + # _session_was_split = agent.session_id != session_id + # _effective_history_offset = 0 if _session_was_split else len(agent_history) + + original_session_id = "session-abc" + agent_session_id = "session-compressed-xyz" # Different = compression happened + agent_history_len = 200 + + # Simulate the gateway's offset calculation (post-fix) + _session_was_split = (agent_session_id != original_session_id) + _effective_history_offset = 0 if _session_was_split else agent_history_len + + assert _session_was_split is True + assert _effective_history_offset == 0 + + def test_history_offset_preserved_without_split(self): + """When no compression happened, history_offset is the original length.""" + session_id = "session-abc" + agent_session_id = "session-abc" # Same = no compression + agent_history_len = 200 + + _session_was_split = (agent_session_id != session_id) + _effective_history_offset = 0 if _session_was_split else agent_history_len + + assert _session_was_split is False + assert _effective_history_offset == 200 + + def test_new_messages_extraction_after_split(self): + """After compression with offset=0, new_messages should be ALL agent messages.""" + # Simulates the gateway's new_messages calculation + agent_messages = [ + {"role": "user", "content": "[CONTEXT COMPACTION] Summary..."}, + {"role": "user", "content": "recent question"}, + {"role": "assistant", "content": "recent answer"}, + {"role": "user", "content": "new question"}, + {"role": "assistant", "content": "new answer"}, + ] + history_offset = 0 # After fix: 0 on session split + + new_messages = agent_messages[history_offset:] if len(agent_messages) > history_offset else [] + assert len(new_messages) == 5, ( + f"Expected all 5 messages with offset=0, got {len(new_messages)}" + ) + + def test_new_messages_empty_with_stale_offset(self): + """Demonstrates the bug: stale offset produces empty new_messages.""" + agent_messages = [ + {"role": "user", "content": "summary"}, + {"role": "assistant", "content": "answer"}, + ] + # Bug: offset is the pre-compression history length + history_offset = 200 + + new_messages = agent_messages[history_offset:] if len(agent_messages) > history_offset else [] + assert len(new_messages) == 0, ( + "Expected 0 messages with stale offset=200 (demonstrates the bug)" + ) diff --git a/tests/test_credential_pool.py b/tests/test_credential_pool.py new file mode 100644 index 000000000..14302ab13 --- /dev/null +++ b/tests/test_credential_pool.py @@ -0,0 +1,949 @@ +"""Tests for multi-credential runtime pooling and rotation.""" + +from __future__ import annotations + +import json +import time + +import pytest + + +def _write_auth_store(tmp_path, payload: dict) -> None: + hermes_home = tmp_path / "hermes" + hermes_home.mkdir(parents=True, exist_ok=True) + (hermes_home / "auth.json").write_text(json.dumps(payload, indent=2)) + + +def test_fill_first_selection_skips_recently_exhausted_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "anthropic": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "***", + "last_status": "exhausted", + "last_status_at": time.time(), + "last_error_code": 402, + }, + { + "id": "cred-2", + "label": "secondary", + "auth_type": "api_key", + "priority": 1, + "source": "manual", + "access_token": "***", + "last_status": "ok", + "last_status_at": None, + "last_error_code": None, + }, + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("anthropic") + entry = pool.select() + + assert entry is not None + assert entry.id == "cred-2" + assert pool.current().id == "cred-2" + + +def test_select_clears_expired_exhaustion(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "anthropic": [ + { + "id": "cred-1", + "label": "old", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "***", + "last_status": "exhausted", + "last_status_at": time.time() - 90000, + "last_error_code": 402, + } + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("anthropic") + entry = pool.select() + + assert entry is not None + assert entry.last_status == "ok" + + +def test_round_robin_strategy_rotates_priorities(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "openrouter": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "***", + }, + { + "id": "cred-2", + "label": "secondary", + "auth_type": "api_key", + "priority": 1, + "source": "manual", + "access_token": "***", + }, + ] + }, + }, + ) + config_path = tmp_path / "hermes" / "config.yaml" + config_path.write_text("credential_pool_strategies:\n openrouter: round_robin\n") + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + first = pool.select() + assert first is not None + assert first.id == "cred-1" + + reloaded = load_pool("openrouter") + second = reloaded.select() + assert second is not None + assert second.id == "cred-2" + + +def test_random_strategy_uses_random_choice(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "openrouter": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "***", + }, + { + "id": "cred-2", + "label": "secondary", + "auth_type": "api_key", + "priority": 1, + "source": "manual", + "access_token": "***", + }, + ] + }, + }, + ) + config_path = tmp_path / "hermes" / "config.yaml" + config_path.write_text("credential_pool_strategies:\n openrouter: random\n") + + monkeypatch.setattr("agent.credential_pool.random.choice", lambda entries: entries[-1]) + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + selected = pool.select() + assert selected is not None + assert selected.id == "cred-2" + + + +def test_exhausted_entry_resets_after_ttl(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "openrouter": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-or-primary", + "base_url": "https://openrouter.ai/api/v1", + "last_status": "exhausted", + "last_status_at": time.time() - 90000, + "last_error_code": 429, + } + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + entry = pool.select() + + assert entry is not None + assert entry.id == "cred-1" + assert entry.last_status == "ok" + + +def test_mark_exhausted_and_rotate_persists_status(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "anthropic": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-ant-api-primary", + }, + { + "id": "cred-2", + "label": "secondary", + "auth_type": "api_key", + "priority": 1, + "source": "manual", + "access_token": "sk-ant-api-secondary", + }, + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("anthropic") + assert pool.select().id == "cred-1" + + next_entry = pool.mark_exhausted_and_rotate(status_code=402) + + assert next_entry is not None + assert next_entry.id == "cred-2" + + auth_payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + persisted = auth_payload["credential_pool"]["anthropic"][0] + assert persisted["last_status"] == "exhausted" + assert persisted["last_error_code"] == 402 + + +def test_try_refresh_current_updates_only_current_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "openai-codex": [ + { + "id": "cred-1", + "label": "primary", + "auth_type": "oauth", + "priority": 0, + "source": "device_code", + "access_token": "access-old", + "refresh_token": "refresh-old", + "base_url": "https://chatgpt.com/backend-api/codex", + }, + { + "id": "cred-2", + "label": "secondary", + "auth_type": "oauth", + "priority": 1, + "source": "device_code", + "access_token": "access-other", + "refresh_token": "refresh-other", + "base_url": "https://chatgpt.com/backend-api/codex", + }, + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + monkeypatch.setattr( + "hermes_cli.auth.refresh_codex_oauth_pure", + lambda access_token, refresh_token, timeout_seconds=20.0: { + "access_token": "access-new", + "refresh_token": "refresh-new", + }, + ) + + pool = load_pool("openai-codex") + current = pool.select() + assert current.id == "cred-1" + + refreshed = pool.try_refresh_current() + + assert refreshed is not None + assert refreshed.access_token == "access-new" + + auth_payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + primary, secondary = auth_payload["credential_pool"]["openai-codex"] + assert primary["access_token"] == "access-new" + assert primary["refresh_token"] == "refresh-new" + assert secondary["access_token"] == "access-other" + assert secondary["refresh_token"] == "refresh-other" + + +def test_load_pool_seeds_env_api_key(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.setenv("OPENROUTER_API_KEY", "sk-or-seeded") + _write_auth_store(tmp_path, {"version": 1, "providers": {}}) + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + entry = pool.select() + + assert entry is not None + assert entry.source == "env:OPENROUTER_API_KEY" + assert entry.access_token == "sk-or-seeded" + + +def test_load_pool_removes_stale_seeded_env_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "openrouter": [ + { + "id": "seeded-env", + "label": "OPENROUTER_API_KEY", + "auth_type": "api_key", + "priority": 0, + "source": "env:OPENROUTER_API_KEY", + "access_token": "stale-token", + "base_url": "https://openrouter.ai/api/v1", + } + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + + assert pool.entries() == [] + + auth_payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + assert auth_payload["credential_pool"]["openrouter"] == [] + + +def test_load_pool_migrates_nous_provider_state(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "active_provider": "nous", + "providers": { + "nous": { + "portal_base_url": "https://portal.example.com", + "inference_base_url": "https://inference.example.com/v1", + "client_id": "hermes-cli", + "token_type": "Bearer", + "scope": "inference:mint_agent_key", + "access_token": "access-token", + "refresh_token": "refresh-token", + "expires_at": "2026-03-24T12:00:00+00:00", + "agent_key": "agent-key", + "agent_key_expires_at": "2026-03-24T13:30:00+00:00", + } + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("nous") + entry = pool.select() + + assert entry is not None + assert entry.source == "device_code" + assert entry.portal_base_url == "https://portal.example.com" + assert entry.agent_key == "agent-key" + + +def test_load_pool_removes_stale_file_backed_singleton_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False) + monkeypatch.delenv("ANTHROPIC_TOKEN", raising=False) + monkeypatch.delenv("CLAUDE_CODE_OAUTH_TOKEN", raising=False) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "anthropic": [ + { + "id": "seeded-file", + "label": "claude-code", + "auth_type": "oauth", + "priority": 0, + "source": "claude_code", + "access_token": "stale-access-token", + "refresh_token": "stale-refresh-token", + "expires_at_ms": int(time.time() * 1000) + 60_000, + } + ] + }, + }, + ) + + monkeypatch.setattr( + "agent.anthropic_adapter.read_hermes_oauth_credentials", + lambda: None, + ) + monkeypatch.setattr( + "agent.anthropic_adapter.read_claude_code_credentials", + lambda: None, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("anthropic") + + assert pool.entries() == [] + + auth_payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + assert auth_payload["credential_pool"]["anthropic"] == [] + + +def test_load_pool_migrates_nous_provider_state_preserves_tls(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "active_provider": "nous", + "providers": { + "nous": { + "portal_base_url": "https://portal.example.com", + "inference_base_url": "https://inference.example.com/v1", + "client_id": "hermes-cli", + "token_type": "Bearer", + "scope": "inference:mint_agent_key", + "access_token": "access-token", + "refresh_token": "refresh-token", + "expires_at": "2026-03-24T12:00:00+00:00", + "agent_key": "agent-key", + "agent_key_expires_at": "2026-03-24T13:30:00+00:00", + "tls": { + "insecure": True, + "ca_bundle": "/tmp/nous-ca.pem", + }, + } + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("nous") + entry = pool.select() + + assert entry is not None + assert entry.tls == { + "insecure": True, + "ca_bundle": "/tmp/nous-ca.pem", + } + + auth_payload = json.loads((tmp_path / "hermes" / "auth.json").read_text()) + assert auth_payload["credential_pool"]["nous"][0]["tls"] == { + "insecure": True, + "ca_bundle": "/tmp/nous-ca.pem", + } + + +def test_singleton_seed_does_not_clobber_manual_oauth_entry(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False) + monkeypatch.delenv("ANTHROPIC_TOKEN", raising=False) + monkeypatch.delenv("CLAUDE_CODE_OAUTH_TOKEN", raising=False) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "anthropic": [ + { + "id": "manual-1", + "label": "manual-pkce", + "auth_type": "oauth", + "priority": 0, + "source": "manual:hermes_pkce", + "access_token": "manual-token", + "refresh_token": "manual-refresh", + "expires_at_ms": 1711234567000, + } + ] + }, + }, + ) + + monkeypatch.setattr( + "agent.anthropic_adapter.read_hermes_oauth_credentials", + lambda: { + "accessToken": "seeded-token", + "refreshToken": "seeded-refresh", + "expiresAt": 1711234999000, + }, + ) + monkeypatch.setattr( + "agent.anthropic_adapter.read_claude_code_credentials", + lambda: None, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("anthropic") + entries = pool.entries() + + assert len(entries) == 2 + assert {entry.source for entry in entries} == {"manual:hermes_pkce", "hermes_pkce"} + + +def test_load_pool_prefers_anthropic_env_token_over_file_backed_oauth(tmp_path, monkeypatch): + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False) + monkeypatch.setenv("ANTHROPIC_TOKEN", "env-override-token") + monkeypatch.delenv("CLAUDE_CODE_OAUTH_TOKEN", raising=False) + _write_auth_store(tmp_path, {"version": 1, "providers": {}}) + + monkeypatch.setattr( + "agent.anthropic_adapter.read_hermes_oauth_credentials", + lambda: { + "accessToken": "file-backed-token", + "refreshToken": "refresh-token", + "expiresAt": int(time.time() * 1000) + 3_600_000, + }, + ) + monkeypatch.setattr( + "agent.anthropic_adapter.read_claude_code_credentials", + lambda: None, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("anthropic") + entry = pool.select() + + assert entry is not None + assert entry.source == "env:ANTHROPIC_TOKEN" + assert entry.access_token == "env-override-token" + + +def test_least_used_strategy_selects_lowest_count(tmp_path, monkeypatch): + """least_used strategy should select the credential with the lowest request_count.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.setattr( + "agent.credential_pool.get_pool_strategy", + lambda _provider: "least_used", + ) + monkeypatch.setattr( + "agent.credential_pool._seed_from_singletons", + lambda provider, entries: (False, set()), + ) + monkeypatch.setattr( + "agent.credential_pool._seed_from_env", + lambda provider, entries: (False, set()), + ) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "openrouter": [ + { + "id": "key-a", + "label": "heavy", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-or-heavy", + "request_count": 100, + }, + { + "id": "key-b", + "label": "light", + "auth_type": "api_key", + "priority": 1, + "source": "manual", + "access_token": "sk-or-light", + "request_count": 10, + }, + { + "id": "key-c", + "label": "medium", + "auth_type": "api_key", + "priority": 2, + "source": "manual", + "access_token": "sk-or-medium", + "request_count": 50, + }, + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + entry = pool.select() + assert entry is not None + assert entry.id == "key-b" + assert entry.access_token == "sk-or-light" + + +def test_mark_used_increments_request_count(tmp_path, monkeypatch): + """mark_used should increment the request_count of the current entry.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.setattr( + "agent.credential_pool.get_pool_strategy", + lambda _provider: "fill_first", + ) + monkeypatch.setattr( + "agent.credential_pool._seed_from_singletons", + lambda provider, entries: (False, set()), + ) + monkeypatch.setattr( + "agent.credential_pool._seed_from_env", + lambda provider, entries: (False, set()), + ) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "openrouter": [ + { + "id": "key-a", + "label": "test", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-or-test", + "request_count": 5, + }, + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + entry = pool.select() + assert entry is not None + assert entry.request_count == 5 + pool.mark_used() + updated = pool.current() + assert updated is not None + assert updated.request_count == 6 + + +def test_thread_safety_concurrent_select(tmp_path, monkeypatch): + """Concurrent select() calls should not corrupt pool state.""" + import threading as _threading + + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.setattr( + "agent.credential_pool.get_pool_strategy", + lambda _provider: "round_robin", + ) + monkeypatch.setattr( + "agent.credential_pool._seed_from_singletons", + lambda provider, entries: (False, set()), + ) + monkeypatch.setattr( + "agent.credential_pool._seed_from_env", + lambda provider, entries: (False, set()), + ) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "openrouter": [ + { + "id": f"key-{i}", + "label": f"key-{i}", + "auth_type": "api_key", + "priority": i, + "source": "manual", + "access_token": f"sk-or-{i}", + } + for i in range(5) + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + results = [] + errors = [] + + def worker(): + try: + for _ in range(20): + entry = pool.select() + if entry: + results.append(entry.id) + pool.mark_used(entry.id) + except Exception as exc: + errors.append(exc) + + threads = [_threading.Thread(target=worker) for _ in range(4)] + for t in threads: + t.start() + for t in threads: + t.join() + + assert not errors, f"Thread errors: {errors}" + assert len(results) == 80 # 4 threads * 20 selects + + +def test_custom_endpoint_pool_keyed_by_name(tmp_path, monkeypatch): + """Verify load_pool('custom:together.ai') works and returns entries from auth.json.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + # Disable seeding so we only test stored entries + monkeypatch.setattr( + "agent.credential_pool._seed_custom_pool", + lambda pool_key, entries: (False, set()), + ) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "custom:together.ai": [ + { + "id": "cred-1", + "label": "together-key", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-together-xxx", + "base_url": "https://api.together.ai/v1", + }, + { + "id": "cred-2", + "label": "together-key-2", + "auth_type": "api_key", + "priority": 1, + "source": "manual", + "access_token": "sk-together-yyy", + "base_url": "https://api.together.ai/v1", + }, + ] + }, + }, + ) + + from agent.credential_pool import load_pool + + pool = load_pool("custom:together.ai") + assert pool.has_credentials() + entries = pool.entries() + assert len(entries) == 2 + assert entries[0].access_token == "sk-together-xxx" + assert entries[1].access_token == "sk-together-yyy" + + # Select should return the first entry (fill_first default) + entry = pool.select() + assert entry is not None + assert entry.id == "cred-1" + + +def test_custom_endpoint_pool_seeds_from_config(tmp_path, monkeypatch): + """Verify seeding from custom_providers api_key in config.yaml.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store(tmp_path, {"version": 1}) + + # Write config.yaml with a custom_providers entry + config_path = tmp_path / "hermes" / "config.yaml" + import yaml + config_path.write_text(yaml.dump({ + "custom_providers": [ + { + "name": "Together.ai", + "base_url": "https://api.together.ai/v1", + "api_key": "sk-config-seeded", + } + ] + })) + + from agent.credential_pool import load_pool + + pool = load_pool("custom:together.ai") + assert pool.has_credentials() + entries = pool.entries() + assert len(entries) == 1 + assert entries[0].access_token == "sk-config-seeded" + assert entries[0].source == "config:Together.ai" + + +def test_custom_endpoint_pool_seeds_from_model_config(tmp_path, monkeypatch): + """Verify seeding from model.api_key when model.provider=='custom' and base_url matches.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store(tmp_path, {"version": 1}) + + import yaml + config_path = tmp_path / "hermes" / "config.yaml" + config_path.write_text(yaml.dump({ + "custom_providers": [ + { + "name": "Together.ai", + "base_url": "https://api.together.ai/v1", + } + ], + "model": { + "provider": "custom", + "base_url": "https://api.together.ai/v1", + "api_key": "sk-model-key", + }, + })) + + from agent.credential_pool import load_pool + + pool = load_pool("custom:together.ai") + assert pool.has_credentials() + entries = pool.entries() + # Should have the model_config entry + model_entries = [e for e in entries if e.source == "model_config"] + assert len(model_entries) == 1 + assert model_entries[0].access_token == "sk-model-key" + + +def test_custom_pool_does_not_break_existing_providers(tmp_path, monkeypatch): + """Existing registry providers work exactly as before with custom pool support.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + monkeypatch.setenv("OPENROUTER_API_KEY", "sk-or-test") + _write_auth_store(tmp_path, {"version": 1, "providers": {}}) + + from agent.credential_pool import load_pool + + pool = load_pool("openrouter") + entry = pool.select() + assert entry is not None + assert entry.source == "env:OPENROUTER_API_KEY" + assert entry.access_token == "sk-or-test" + + +def test_get_custom_provider_pool_key(tmp_path, monkeypatch): + """get_custom_provider_pool_key maps base_url to custom: pool key.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + (tmp_path / "hermes").mkdir(parents=True, exist_ok=True) + import yaml + config_path = tmp_path / "hermes" / "config.yaml" + config_path.write_text(yaml.dump({ + "custom_providers": [ + { + "name": "Together.ai", + "base_url": "https://api.together.ai/v1", + "api_key": "sk-xxx", + }, + { + "name": "My Local Server", + "base_url": "http://localhost:8080/v1", + }, + ] + })) + + from agent.credential_pool import get_custom_provider_pool_key + + assert get_custom_provider_pool_key("https://api.together.ai/v1") == "custom:together.ai" + assert get_custom_provider_pool_key("https://api.together.ai/v1/") == "custom:together.ai" + assert get_custom_provider_pool_key("http://localhost:8080/v1") == "custom:my-local-server" + assert get_custom_provider_pool_key("https://unknown.example.com/v1") is None + assert get_custom_provider_pool_key("") is None + + +def test_list_custom_pool_providers(tmp_path, monkeypatch): + """list_custom_pool_providers returns custom: pool keys from auth.json.""" + monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes")) + _write_auth_store( + tmp_path, + { + "version": 1, + "credential_pool": { + "anthropic": [ + { + "id": "a1", + "label": "test", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-ant-xxx", + } + ], + "custom:together.ai": [ + { + "id": "c1", + "label": "together", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-tog-xxx", + } + ], + "custom:fireworks": [ + { + "id": "c2", + "label": "fireworks", + "auth_type": "api_key", + "priority": 0, + "source": "manual", + "access_token": "sk-fw-xxx", + } + ], + "custom:empty": [], + }, + }, + ) + + from agent.credential_pool import list_custom_pool_providers + + result = list_custom_pool_providers() + assert result == ["custom:fireworks", "custom:together.ai"] + # "custom:empty" not included because it's empty diff --git a/tests/test_packaging_metadata.py b/tests/test_packaging_metadata.py new file mode 100644 index 000000000..ce6d4793f --- /dev/null +++ b/tests/test_packaging_metadata.py @@ -0,0 +1,22 @@ +from pathlib import Path +import tomllib + + +REPO_ROOT = Path(__file__).resolve().parents[1] + + +def test_faster_whisper_is_not_a_base_dependency(): + data = tomllib.loads((REPO_ROOT / "pyproject.toml").read_text(encoding="utf-8")) + deps = data["project"]["dependencies"] + + assert not any(dep.startswith("faster-whisper") for dep in deps) + + voice_extra = data["project"]["optional-dependencies"]["voice"] + assert any(dep.startswith("faster-whisper") for dep in voice_extra) + + +def test_manifest_includes_bundled_skills(): + manifest = (REPO_ROOT / "MANIFEST.in").read_text(encoding="utf-8") + + assert "graft skills" in manifest + assert "graft optional-skills" in manifest diff --git a/tests/test_provider_parity.py b/tests/test_provider_parity.py index b34c9cd70..deb657340 100644 --- a/tests/test_provider_parity.py +++ b/tests/test_provider_parity.py @@ -559,11 +559,18 @@ class TestAuxiliaryClientProviderPriority: assert model == "google/gemini-3-flash-preview" def test_custom_endpoint_when_no_nous(self, monkeypatch): + """Custom endpoint is used when no OpenRouter/Nous keys are available. + + Since the March 2026 config refactor, OPENAI_BASE_URL env var is no + longer consulted — base_url comes from config.yaml via + resolve_runtime_provider. Mock _resolve_custom_runtime directly. + """ monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) - monkeypatch.setenv("OPENAI_BASE_URL", "http://localhost:1234/v1") monkeypatch.setenv("OPENAI_API_KEY", "local-key") from agent.auxiliary_client import get_text_auxiliary_client with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \ + patch("agent.auxiliary_client._resolve_custom_runtime", + return_value=("http://localhost:1234/v1", "local-key")), \ patch("agent.auxiliary_client.OpenAI") as mock: client, model = get_text_auxiliary_client() assert mock.call_args.kwargs["base_url"] == "http://localhost:1234/v1" diff --git a/tests/test_run_agent.py b/tests/test_run_agent.py index c42ee29f2..7ea3a63fe 100644 --- a/tests/test_run_agent.py +++ b/tests/test_run_agent.py @@ -1771,6 +1771,62 @@ class TestNousCredentialRefresh: assert isinstance(agent.client, _RebuiltClient) +class TestCredentialPoolRecovery: + def test_recover_with_pool_rotates_on_402(self, agent): + current = SimpleNamespace(label="primary") + next_entry = SimpleNamespace(label="secondary") + + class _Pool: + def current(self): + return current + + def mark_exhausted_and_rotate(self, *, status_code): + assert status_code == 402 + return next_entry + + agent._credential_pool = _Pool() + agent._swap_credential = MagicMock() + + recovered, retry_same = agent._recover_with_credential_pool( + status_code=402, + has_retried_429=False, + ) + + assert recovered is True + assert retry_same is False + agent._swap_credential.assert_called_once_with(next_entry) + + def test_recover_with_pool_retries_first_429_then_rotates(self, agent): + next_entry = SimpleNamespace(label="secondary") + + class _Pool: + def current(self): + return SimpleNamespace(label="primary") + + def mark_exhausted_and_rotate(self, *, status_code): + assert status_code == 429 + return next_entry + + agent._credential_pool = _Pool() + agent._swap_credential = MagicMock() + + recovered, retry_same = agent._recover_with_credential_pool( + status_code=429, + has_retried_429=False, + ) + assert recovered is False + assert retry_same is True + agent._swap_credential.assert_not_called() + + recovered, retry_same = agent._recover_with_credential_pool( + status_code=429, + has_retried_429=True, + ) + assert recovered is True + assert retry_same is False + agent._swap_credential.assert_called_once_with(next_entry) + + class TestMaxTokensParam: """Verify _max_tokens_param returns the correct key for each provider.""" diff --git a/tests/test_runtime_provider_resolution.py b/tests/test_runtime_provider_resolution.py index 84b018333..1a65aa31b 100644 --- a/tests/test_runtime_provider_resolution.py +++ b/tests/test_runtime_provider_resolution.py @@ -1,6 +1,123 @@ from hermes_cli import runtime_provider as rp +def test_resolve_runtime_provider_uses_credential_pool(monkeypatch): + class _Entry: + access_token = "pool-token" + source = "manual" + base_url = "https://chatgpt.com/backend-api/codex" + + class _Pool: + def has_credentials(self): + return True + + def select(self): + return _Entry() + + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openai-codex") + monkeypatch.setattr(rp, "load_pool", lambda provider: _Pool()) + + resolved = rp.resolve_runtime_provider(requested="openai-codex") + + assert resolved["provider"] == "openai-codex" + assert resolved["api_key"] == "pool-token" + assert resolved["credential_pool"] is not None + assert resolved["source"] == "manual" + + +def test_resolve_runtime_provider_anthropic_pool_respects_config_base_url(monkeypatch): + class _Entry: + access_token = "pool-token" + source = "manual" + base_url = "https://api.anthropic.com" + + class _Pool: + def has_credentials(self): + return True + + def select(self): + return _Entry() + + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "anthropic") + monkeypatch.setattr( + rp, + "_get_model_config", + lambda: { + "provider": "anthropic", + "base_url": "https://proxy.example.com/anthropic", + }, + ) + monkeypatch.setattr(rp, "load_pool", lambda provider: _Pool()) + + resolved = rp.resolve_runtime_provider(requested="anthropic") + + assert resolved["provider"] == "anthropic" + assert resolved["api_mode"] == "anthropic_messages" + assert resolved["api_key"] == "pool-token" + assert resolved["base_url"] == "https://proxy.example.com/anthropic" + + +def test_resolve_runtime_provider_anthropic_explicit_override_skips_pool(monkeypatch): + def _unexpected_pool(provider): + raise AssertionError(f"load_pool should not be called for {provider}") + + def _unexpected_anthropic_token(): + raise AssertionError("resolve_anthropic_token should not be called") + + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "anthropic") + monkeypatch.setattr( + rp, + "_get_model_config", + lambda: { + "provider": "anthropic", + "base_url": "https://config.example.com/anthropic", + }, + ) + monkeypatch.setattr(rp, "load_pool", _unexpected_pool) + monkeypatch.setattr( + "agent.anthropic_adapter.resolve_anthropic_token", + _unexpected_anthropic_token, + ) + + resolved = rp.resolve_runtime_provider( + requested="anthropic", + explicit_api_key="anthropic-explicit-token", + explicit_base_url="https://proxy.example.com/anthropic/", + ) + + assert resolved["provider"] == "anthropic" + assert resolved["api_mode"] == "anthropic_messages" + assert resolved["api_key"] == "anthropic-explicit-token" + assert resolved["base_url"] == "https://proxy.example.com/anthropic" + assert resolved["source"] == "explicit" + assert resolved.get("credential_pool") is None + + +def test_resolve_runtime_provider_falls_back_when_pool_empty(monkeypatch): + class _Pool: + def has_credentials(self): + return False + + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openai-codex") + monkeypatch.setattr(rp, "load_pool", lambda provider: _Pool()) + monkeypatch.setattr( + rp, + "resolve_codex_runtime_credentials", + lambda: { + "provider": "openai-codex", + "base_url": "https://chatgpt.com/backend-api/codex", + "api_key": "codex-token", + "source": "hermes-auth-store", + "last_refresh": "2026-02-26T00:00:00Z", + }, + ) + + resolved = rp.resolve_runtime_provider(requested="openai-codex") + + assert resolved["api_key"] == "codex-token" + assert resolved.get("credential_pool") is None + + def test_resolve_runtime_provider_codex(monkeypatch): monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openai-codex") monkeypatch.setattr( @@ -40,6 +157,36 @@ def test_resolve_runtime_provider_ai_gateway(monkeypatch): assert resolved["requested_provider"] == "ai-gateway" +def test_resolve_runtime_provider_ai_gateway_explicit_override_skips_pool(monkeypatch): + def _unexpected_pool(provider): + raise AssertionError(f"load_pool should not be called for {provider}") + + def _unexpected_provider_resolution(provider): + raise AssertionError(f"resolve_api_key_provider_credentials should not be called for {provider}") + + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "ai-gateway") + monkeypatch.setattr(rp, "_get_model_config", lambda: {}) + monkeypatch.setattr(rp, "load_pool", _unexpected_pool) + monkeypatch.setattr( + rp, + "resolve_api_key_provider_credentials", + _unexpected_provider_resolution, + ) + + resolved = rp.resolve_runtime_provider( + requested="ai-gateway", + explicit_api_key="ai-gateway-explicit-token", + explicit_base_url="https://proxy.example.com/v1/", + ) + + assert resolved["provider"] == "ai-gateway" + assert resolved["api_mode"] == "chat_completions" + assert resolved["api_key"] == "ai-gateway-explicit-token" + assert resolved["base_url"] == "https://proxy.example.com/v1" + assert resolved["source"] == "explicit" + assert resolved.get("credential_pool") is None + + def test_resolve_runtime_provider_openrouter_explicit(monkeypatch): monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter") monkeypatch.setattr(rp, "_get_model_config", lambda: {}) @@ -61,6 +208,69 @@ def test_resolve_runtime_provider_openrouter_explicit(monkeypatch): assert resolved["source"] == "explicit" +def test_resolve_runtime_provider_auto_uses_openrouter_pool(monkeypatch): + class _Entry: + access_token = "pool-key" + source = "manual" + base_url = "https://openrouter.ai/api/v1" + + class _Pool: + def has_credentials(self): + return True + + def select(self): + return _Entry() + + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter") + monkeypatch.setattr(rp, "_get_model_config", lambda: {}) + monkeypatch.setattr(rp, "load_pool", lambda provider: _Pool()) + monkeypatch.delenv("OPENAI_BASE_URL", raising=False) + monkeypatch.delenv("OPENROUTER_BASE_URL", raising=False) + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) + + resolved = rp.resolve_runtime_provider(requested="auto") + + assert resolved["provider"] == "openrouter" + assert resolved["api_key"] == "pool-key" + assert resolved["base_url"] == "https://openrouter.ai/api/v1" + assert resolved["source"] == "manual" + assert resolved.get("credential_pool") is not None + + +def test_resolve_runtime_provider_openrouter_explicit_api_key_skips_pool(monkeypatch): + class _Entry: + access_token = "pool-key" + source = "manual" + base_url = "https://openrouter.ai/api/v1" + + class _Pool: + def has_credentials(self): + return True + + def select(self): + return _Entry() + + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter") + monkeypatch.setattr(rp, "_get_model_config", lambda: {}) + monkeypatch.setattr(rp, "load_pool", lambda provider: _Pool()) + monkeypatch.delenv("OPENAI_BASE_URL", raising=False) + monkeypatch.delenv("OPENROUTER_BASE_URL", raising=False) + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + monkeypatch.delenv("OPENROUTER_API_KEY", raising=False) + + resolved = rp.resolve_runtime_provider( + requested="openrouter", + explicit_api_key="explicit-key", + ) + + assert resolved["provider"] == "openrouter" + assert resolved["api_key"] == "explicit-key" + assert resolved["base_url"] == rp.OPENROUTER_BASE_URL + assert resolved["source"] == "explicit" + assert resolved.get("credential_pool") is None + + def test_resolve_runtime_provider_openrouter_ignores_codex_config_base_url(monkeypatch): monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter") monkeypatch.setattr( @@ -136,16 +346,19 @@ def test_openai_key_used_when_no_openrouter_key(monkeypatch): def test_custom_endpoint_prefers_openai_key(monkeypatch): - """Custom endpoint should use OPENAI_API_KEY, not OPENROUTER_API_KEY. + """Custom endpoint should use config api_key over OPENROUTER_API_KEY. - Regression test for #560: when base_url is a non-OpenRouter endpoint, - OPENROUTER_API_KEY was being sent as the auth header instead of OPENAI_API_KEY. + Updated for #4165: config.yaml is now the source of truth for endpoint URLs, + OPENAI_BASE_URL env var is no longer consulted. """ monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter") - monkeypatch.setattr(rp, "_get_model_config", lambda: {}) - monkeypatch.setenv("OPENAI_BASE_URL", "https://api.z.ai/api/coding/paas/v4") + monkeypatch.setattr(rp, "_get_model_config", lambda: { + "provider": "custom", + "base_url": "https://api.z.ai/api/coding/paas/v4", + "api_key": "zai-key", + }) + monkeypatch.delenv("OPENAI_BASE_URL", raising=False) monkeypatch.delenv("OPENROUTER_BASE_URL", raising=False) - monkeypatch.setenv("OPENAI_API_KEY", "zai-key") monkeypatch.setenv("OPENROUTER_API_KEY", "openrouter-key") resolved = rp.resolve_runtime_provider(requested="custom") @@ -221,19 +434,22 @@ def test_custom_endpoint_uses_config_api_field_when_no_api_key(monkeypatch): assert resolved["api_key"] == "config-api-field" -def test_custom_endpoint_auto_provider_prefers_openai_key(monkeypatch): - """Auto provider with non-OpenRouter base_url should prefer OPENAI_API_KEY. +def test_custom_endpoint_explicit_custom_prefers_config_key(monkeypatch): + """Explicit 'custom' provider with config base_url+api_key should use them. - Same as #560 but via 'hermes model' flow which sets provider to 'auto'. + Updated for #4165: config.yaml is the source of truth, not OPENAI_BASE_URL. """ monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter") - monkeypatch.setattr(rp, "_get_model_config", lambda: {}) - monkeypatch.setenv("OPENAI_BASE_URL", "https://my-vllm-server.example.com/v1") + monkeypatch.setattr(rp, "_get_model_config", lambda: { + "provider": "custom", + "base_url": "https://my-vllm-server.example.com/v1", + "api_key": "sk-vllm-key", + }) + monkeypatch.delenv("OPENAI_BASE_URL", raising=False) monkeypatch.delenv("OPENROUTER_BASE_URL", raising=False) - monkeypatch.setenv("OPENAI_API_KEY", "sk-vllm-key") monkeypatch.setenv("OPENROUTER_API_KEY", "sk-or-...leak") - resolved = rp.resolve_runtime_provider(requested="auto") + resolved = rp.resolve_runtime_provider(requested="custom") assert resolved["base_url"] == "https://my-vllm-server.example.com/v1" assert resolved["api_key"] == "sk-vllm-key" @@ -359,6 +575,36 @@ def test_explicit_openrouter_skips_openai_base_url(monkeypatch): assert resolved["api_key"] == "or-test-key" +def test_explicit_openrouter_honors_openrouter_base_url_over_pool(monkeypatch): + class _Entry: + access_token = "pool-key" + source = "manual" + base_url = "https://openrouter.ai/api/v1" + + class _Pool: + def has_credentials(self): + return True + + def select(self): + return _Entry() + + monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter") + monkeypatch.setattr(rp, "_get_model_config", lambda: {}) + monkeypatch.setattr(rp, "load_pool", lambda provider: _Pool()) + monkeypatch.setenv("OPENROUTER_BASE_URL", "https://mirror.example.com/v1") + monkeypatch.setenv("OPENROUTER_API_KEY", "mirror-key") + monkeypatch.delenv("OPENAI_BASE_URL", raising=False) + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + + resolved = rp.resolve_runtime_provider(requested="openrouter") + + assert resolved["provider"] == "openrouter" + assert resolved["base_url"] == "https://mirror.example.com/v1" + assert resolved["api_key"] == "mirror-key" + assert resolved["source"] == "env/config" + assert resolved.get("credential_pool") is None + + def test_resolve_requested_provider_precedence(monkeypatch): monkeypatch.setenv("HERMES_INFERENCE_PROVIDER", "nous") monkeypatch.setattr(rp, "_get_model_config", lambda: {"provider": "openai-codex"}) @@ -545,7 +791,7 @@ def test_alibaba_default_coding_intl_endpoint_uses_chat_completions(monkeypatch) assert resolved["provider"] == "alibaba" assert resolved["api_mode"] == "chat_completions" - assert resolved["base_url"] == "https://coding-intl.dashscope.aliyuncs.com/v1" + assert resolved["base_url"] == "https://dashscope-intl.aliyuncs.com/compatible-mode/v1" def test_alibaba_anthropic_endpoint_override_uses_anthropic_messages(monkeypatch): diff --git a/tests/test_setup_model_selection.py b/tests/test_setup_model_selection.py index 514a43045..3a02ebbf0 100644 --- a/tests/test_setup_model_selection.py +++ b/tests/test_setup_model_selection.py @@ -32,8 +32,8 @@ class TestSetupProviderModelSelection: @pytest.mark.parametrize("provider_id,expected_defaults", [ ("zai", ["glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"]), ("kimi-coding", ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"]), - ("minimax", ["MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1"]), - ("minimax-cn", ["MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1"]), + ("minimax", ["MiniMax-M2.7", "MiniMax-M2.7-highspeed", "MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1"]), + ("minimax-cn", ["MiniMax-M2.7", "MiniMax-M2.7-highspeed", "MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1"]), ]) @patch("hermes_cli.models.fetch_api_models", return_value=[]) @patch("hermes_cli.config.get_env_value", return_value="fake-key") diff --git a/tests/test_trajectory_compressor.py b/tests/test_trajectory_compressor.py index c95a3af94..72708b8d9 100644 --- a/tests/test_trajectory_compressor.py +++ b/tests/test_trajectory_compressor.py @@ -405,12 +405,13 @@ class TestGenerateSummary: @pytest.mark.asyncio async def test_generate_summary_async_handles_none_content(self): tc = _make_compressor() - tc.async_client = MagicMock() - tc.async_client.chat.completions.create = AsyncMock( + mock_client = MagicMock() + mock_client.chat.completions.create = AsyncMock( return_value=SimpleNamespace( choices=[SimpleNamespace(message=SimpleNamespace(content=None))] ) ) + tc._get_async_client = MagicMock(return_value=mock_client) metrics = TrajectoryMetrics() summary = await tc._generate_summary_async("Turn content", metrics) diff --git a/tests/tools/test_browser_camofox.py b/tests/tools/test_browser_camofox.py index a59862b9b..f9ff0e7c7 100644 --- a/tests/tools/test_browser_camofox.py +++ b/tests/tools/test_browser_camofox.py @@ -235,8 +235,13 @@ class TestCamofoxGetImages: mock_post.return_value = _mock_response(json_data={"tabId": "tab10", "url": "https://x.com"}) camofox_navigate("https://x.com", task_id="t10") + # camofox_get_images parses images from the accessibility tree snapshot + snapshot_text = ( + '- img "Logo"\n' + ' /url: https://x.com/img.png\n' + ) mock_get.return_value = _mock_response(json_data={ - "images": [{"src": "https://x.com/img.png", "alt": "Logo"}], + "snapshot": snapshot_text, }) result = json.loads(camofox_get_images(task_id="t10")) assert result["success"] is True diff --git a/tests/tools/test_browser_ssrf_local.py b/tests/tools/test_browser_ssrf_local.py new file mode 100644 index 000000000..44d3b8ea1 --- /dev/null +++ b/tests/tools/test_browser_ssrf_local.py @@ -0,0 +1,163 @@ +"""Tests that browser_navigate SSRF checks respect the allow_private_urls setting. + +When ``browser.allow_private_urls`` is ``False`` (default), private/internal +addresses are blocked. When set to ``True``, they are allowed — useful for +local development, LAN access, and Hermes self-testing. +""" + +import json + +import pytest + +from tools import browser_tool + + +def _make_browser_result(url="https://example.com"): + """Return a mock successful browser command result.""" + return {"success": True, "data": {"title": "OK", "url": url}} + + +# --------------------------------------------------------------------------- +# Pre-navigation SSRF check +# --------------------------------------------------------------------------- + + +class TestPreNavigationSsrf: + PRIVATE_URL = "http://127.0.0.1:8080/dashboard" + + @pytest.fixture() + def _common_patches(self, monkeypatch): + """Shared patches for pre-navigation tests that pass the SSRF check.""" + monkeypatch.setattr(browser_tool, "_is_camofox_mode", lambda: False) + monkeypatch.setattr(browser_tool, "check_website_access", lambda url: None) + monkeypatch.setattr( + browser_tool, + "_get_session_info", + lambda task_id: { + "session_name": f"s_{task_id}", + "bb_session_id": None, + "cdp_url": None, + "features": {"local": True}, + "_first_nav": False, + }, + ) + monkeypatch.setattr( + browser_tool, + "_run_browser_command", + lambda *a, **kw: _make_browser_result(), + ) + + def test_blocks_private_url_by_default(self, monkeypatch, _common_patches): + """SSRF protection is on when allow_private_urls is not set (False).""" + monkeypatch.setattr(browser_tool, "_allow_private_urls", lambda: False) + monkeypatch.setattr(browser_tool, "_is_safe_url", lambda url: False) + + result = json.loads(browser_tool.browser_navigate(self.PRIVATE_URL)) + + assert result["success"] is False + assert "private or internal address" in result["error"] + + def test_blocks_private_url_when_setting_false(self, monkeypatch, _common_patches): + """SSRF protection is on when allow_private_urls is explicitly False.""" + monkeypatch.setattr(browser_tool, "_allow_private_urls", lambda: False) + monkeypatch.setattr(browser_tool, "_is_safe_url", lambda url: False) + + result = json.loads(browser_tool.browser_navigate(self.PRIVATE_URL)) + + assert result["success"] is False + + def test_allows_private_url_when_setting_true(self, monkeypatch, _common_patches): + """Private URLs are allowed when allow_private_urls is True.""" + monkeypatch.setattr(browser_tool, "_allow_private_urls", lambda: True) + # _is_safe_url would block this, but the setting overrides it + monkeypatch.setattr(browser_tool, "_is_safe_url", lambda url: False) + + result = json.loads(browser_tool.browser_navigate(self.PRIVATE_URL)) + + assert result["success"] is True + + def test_allows_public_url_regardless_of_setting(self, monkeypatch, _common_patches): + """Public URLs always pass regardless of the allow_private_urls setting.""" + monkeypatch.setattr(browser_tool, "_allow_private_urls", lambda: False) + monkeypatch.setattr(browser_tool, "_is_safe_url", lambda url: True) + + result = json.loads(browser_tool.browser_navigate("https://example.com")) + + assert result["success"] is True + + +# --------------------------------------------------------------------------- +# Post-redirect SSRF check +# --------------------------------------------------------------------------- + + +class TestPostRedirectSsrf: + PUBLIC_URL = "https://example.com/redirect" + PRIVATE_FINAL_URL = "http://192.168.1.1/internal" + + @pytest.fixture() + def _common_patches(self, monkeypatch): + """Shared patches for redirect tests.""" + monkeypatch.setattr(browser_tool, "_is_camofox_mode", lambda: False) + monkeypatch.setattr(browser_tool, "check_website_access", lambda url: None) + monkeypatch.setattr( + browser_tool, + "_get_session_info", + lambda task_id: { + "session_name": f"s_{task_id}", + "bb_session_id": None, + "cdp_url": None, + "features": {"local": True}, + "_first_nav": False, + }, + ) + + def test_blocks_redirect_to_private_by_default(self, monkeypatch, _common_patches): + """Redirects to private addresses are blocked when setting is False.""" + monkeypatch.setattr(browser_tool, "_allow_private_urls", lambda: False) + monkeypatch.setattr( + browser_tool, "_is_safe_url", lambda url: "192.168" not in url, + ) + monkeypatch.setattr( + browser_tool, + "_run_browser_command", + lambda *a, **kw: _make_browser_result(url=self.PRIVATE_FINAL_URL), + ) + + result = json.loads(browser_tool.browser_navigate(self.PUBLIC_URL)) + + assert result["success"] is False + assert "redirect landed on a private/internal address" in result["error"] + + def test_allows_redirect_to_private_when_setting_true(self, monkeypatch, _common_patches): + """Redirects to private addresses are allowed when setting is True.""" + monkeypatch.setattr(browser_tool, "_allow_private_urls", lambda: True) + monkeypatch.setattr( + browser_tool, "_is_safe_url", lambda url: "192.168" not in url, + ) + monkeypatch.setattr( + browser_tool, + "_run_browser_command", + lambda *a, **kw: _make_browser_result(url=self.PRIVATE_FINAL_URL), + ) + + result = json.loads(browser_tool.browser_navigate(self.PUBLIC_URL)) + + assert result["success"] is True + assert result["url"] == self.PRIVATE_FINAL_URL + + def test_allows_redirect_to_public_regardless_of_setting(self, monkeypatch, _common_patches): + """Redirects to public addresses always pass.""" + final = "https://example.com/final" + monkeypatch.setattr(browser_tool, "_allow_private_urls", lambda: False) + monkeypatch.setattr(browser_tool, "_is_safe_url", lambda url: True) + monkeypatch.setattr( + browser_tool, + "_run_browser_command", + lambda *a, **kw: _make_browser_result(url=final), + ) + + result = json.loads(browser_tool.browser_navigate(self.PUBLIC_URL)) + + assert result["success"] is True + assert result["url"] == final diff --git a/tests/tools/test_delegate.py b/tests/tools/test_delegate.py index 1a779f8a0..d86a8c488 100644 --- a/tests/tools/test_delegate.py +++ b/tests/tools/test_delegate.py @@ -593,7 +593,14 @@ class TestDelegationCredentialResolution(unittest.TestCase): "model": "qwen2.5-coder", "base_url": "http://localhost:1234/v1", } - with patch.dict(os.environ, {"OPENROUTER_API_KEY": "env-openrouter-key"}, clear=False): + with patch.dict( + os.environ, + { + "OPENROUTER_API_KEY": "env-openrouter-key", + "OPENAI_API_KEY": "", + }, + clear=False, + ): with self.assertRaises(ValueError) as ctx: _resolve_delegation_credentials(cfg, parent) self.assertIn("OPENAI_API_KEY", str(ctx.exception)) diff --git a/tests/tools/test_transcription.py b/tests/tools/test_transcription.py index 0ce3f2468..5f42272a5 100644 --- a/tests/tools/test_transcription.py +++ b/tests/tools/test_transcription.py @@ -18,6 +18,11 @@ import pytest # --------------------------------------------------------------------------- +@pytest.fixture(autouse=True) +def _clear_openai_env(monkeypatch): + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + + class TestGetProvider: """_get_provider() picks the right backend based on config + availability.""" diff --git a/tools/browser_tool.py b/tools/browser_tool.py index 33a1c8ef6..1861152e3 100644 --- a/tools/browser_tool.py +++ b/tools/browser_tool.py @@ -237,6 +237,8 @@ _PROVIDER_REGISTRY: Dict[str, type] = { _cached_cloud_provider: Optional[CloudBrowserProvider] = None _cloud_provider_resolved = False +_allow_private_urls_resolved = False +_cached_allow_private_urls: Optional[bool] = None def _get_cloud_provider() -> Optional[CloudBrowserProvider]: @@ -265,6 +267,31 @@ def _get_cloud_provider() -> Optional[CloudBrowserProvider]: return _cached_cloud_provider +def _allow_private_urls() -> bool: + """Return whether the browser is allowed to navigate to private/internal addresses. + + Reads ``config["browser"]["allow_private_urls"]`` once and caches the result + for the process lifetime. Defaults to ``False`` (SSRF protection active). + """ + global _cached_allow_private_urls, _allow_private_urls_resolved + if _allow_private_urls_resolved: + return _cached_allow_private_urls + + _allow_private_urls_resolved = True + _cached_allow_private_urls = False # safe default + try: + hermes_home = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes")) + config_path = hermes_home / "config.yaml" + if config_path.exists(): + import yaml + with open(config_path) as f: + cfg = yaml.safe_load(f) or {} + _cached_allow_private_urls = bool(cfg.get("browser", {}).get("allow_private_urls")) + except Exception as e: + logger.debug("Could not read allow_private_urls from config: %s", e) + return _cached_allow_private_urls + + def _socket_safe_tmpdir() -> str: """Return a short temp directory path suitable for Unix domain sockets. @@ -1038,8 +1065,10 @@ def browser_navigate(url: str, task_id: Optional[str] = None) -> str: Returns: JSON string with navigation result (includes stealth features info on first nav) """ - # SSRF protection — block private/internal addresses before navigating - if not _is_safe_url(url): + # SSRF protection — block private/internal addresses before navigating. + # Can be opted out via ``browser.allow_private_urls`` in config for local + # development or LAN access use cases. + if not _allow_private_urls() and not _is_safe_url(url): return json.dumps({ "success": False, "error": "Blocked: URL targets a private or internal address", @@ -1081,7 +1110,7 @@ def browser_navigate(url: str, task_id: Optional[str] = None) -> str: # Post-redirect SSRF check — if the browser followed a redirect to a # private/internal address, block the result so the model can't read # internal content via subsequent browser_snapshot calls. - if final_url and final_url != url and not _is_safe_url(final_url): + if not _allow_private_urls() and final_url and final_url != url and not _is_safe_url(final_url): # Navigate away to a blank page to prevent snapshot leaks _run_browser_command(effective_task_id, "open", ["about:blank"], timeout=10) return json.dumps({ diff --git a/tools/skills_hub.py b/tools/skills_hub.py index a824c3e3b..c818261d7 100644 --- a/tools/skills_hub.py +++ b/tools/skills_hub.py @@ -2115,7 +2115,11 @@ class OptionalSkillSource(SkillSource): """ def __init__(self): - self._optional_dir = Path(__file__).parent.parent / "optional-skills" + from hermes_constants import get_optional_skills_dir + + self._optional_dir = get_optional_skills_dir( + Path(__file__).parent.parent / "optional-skills" + ) def source_id(self) -> str: return "official" diff --git a/tools/web_tools.py b/tools/web_tools.py index c8e7fb0f3..c61bc1eb7 100644 --- a/tools/web_tools.py +++ b/tools/web_tools.py @@ -77,20 +77,18 @@ def _get_backend() -> str: if configured in ("parallel", "firecrawl", "tavily", "exa"): return configured - # Fallback for manual / legacy config — use whichever key is present. - has_firecrawl = _has_env("FIRECRAWL_API_KEY") or _has_env("FIRECRAWL_API_URL") - has_parallel = _has_env("PARALLEL_API_KEY") - has_tavily = _has_env("TAVILY_API_KEY") - has_exa = _has_env("EXA_API_KEY") - if has_exa and not has_firecrawl and not has_parallel and not has_tavily: - return "exa" - if has_tavily and not has_firecrawl and not has_parallel: - return "tavily" - if has_parallel and not has_firecrawl: - return "parallel" + # Fallback for manual / legacy config — pick highest-priority backend + # that has a key configured. Order: firecrawl > parallel > tavily > exa. + for backend, keys in [ + ("firecrawl", ("FIRECRAWL_API_KEY", "FIRECRAWL_API_URL")), + ("parallel", ("PARALLEL_API_KEY",)), + ("tavily", ("TAVILY_API_KEY",)), + ("exa", ("EXA_API_KEY",)), + ]: + if any(_has_env(k) for k in keys): + return backend - # Default to firecrawl (backward compat, or when both are set) - return "firecrawl" + return "firecrawl" # default (backward compat) # ─── Firecrawl Client ──────────────────────────────────────────────────────── diff --git a/website/docs/developer-guide/adding-providers.md b/website/docs/developer-guide/adding-providers.md index 9547e78d0..a0c9f9122 100644 --- a/website/docs/developer-guide/adding-providers.md +++ b/website/docs/developer-guide/adding-providers.md @@ -28,7 +28,7 @@ A built-in provider has to line up across a few layers: - `api_key` - `source` 3. `run_agent.py` uses `api_mode` to decide how requests are built and sent. -4. `hermes_cli/models.py`, `hermes_cli/main.py`, and `hermes_cli/setup.py` make the provider show up in the CLI. +4. `hermes_cli/models.py` and `hermes_cli/main.py` make the provider show up in the CLI. (`hermes_cli/setup.py` delegates to `main.py` automatically — no changes needed there.) 5. `agent/auxiliary_client.py` and `agent/model_metadata.py` keep side tasks and token budgeting working. The important abstraction is `api_mode`. @@ -78,11 +78,14 @@ This path includes everything from Path A plus: 2. `hermes_cli/models.py` 3. `hermes_cli/runtime_provider.py` 4. `hermes_cli/main.py` -5. `hermes_cli/setup.py` -6. `agent/auxiliary_client.py` -7. `agent/model_metadata.py` -8. tests -9. user-facing docs under `website/docs/` +5. `agent/auxiliary_client.py` +6. `agent/model_metadata.py` +7. tests +8. user-facing docs under `website/docs/` + +:::tip +`hermes_cli/setup.py` does **not** need changes. The setup wizard delegates provider/model selection to `select_provider_and_model()` in `main.py` — any provider added there is automatically available in `hermes setup`. +::: ### Additional for native / non-OpenAI providers @@ -185,29 +188,22 @@ If the provider is OpenAI-compatible, `api_mode` should usually stay `chat_compl Be careful with API-key precedence. Hermes already contains logic to avoid leaking an OpenRouter key to unrelated endpoints. A new provider should be equally explicit about which key goes to which base URL. -## Step 5: Wire the CLI in `hermes_cli/main.py` and `hermes_cli/setup.py` +## Step 5: Wire the CLI in `hermes_cli/main.py` -A provider is not discoverable until it shows up in the interactive flows. +A provider is not discoverable until it shows up in the interactive `hermes model` flow. -Update: +Update these in `hermes_cli/main.py`: -### `hermes_cli/main.py` - -- `provider_labels` -- provider dispatch inside the `model` command +- `provider_labels` dict +- `providers` list in `select_provider_and_model()` +- provider dispatch (`if selected_provider == ...`) - `--provider` argument choices - login/logout choices if the provider supports those flows - a `_model_flow_()` function, or reuse `_model_flow_api_key_provider()` if it fits -### `hermes_cli/setup.py` - -- `provider_choices` -- auth branch for the provider -- model-selection branch -- any provider-specific explanatory text -- any place where a provider should be excluded from OpenRouter-only prompts or routing settings - -If you only update one of these files, `hermes model` and `hermes setup` will drift. +:::tip +`hermes_cli/setup.py` does not need changes — it calls `select_provider_and_model()` from `main.py`, so your new provider appears in both `hermes model` and `hermes setup` automatically. +::: ## Step 6: Keep auxiliary calls working @@ -353,8 +349,7 @@ Use this if the provider is standard chat completions. - [ ] aliases added in `hermes_cli/auth.py` and `hermes_cli/models.py` - [ ] model catalog added in `hermes_cli/models.py` - [ ] runtime branch added in `hermes_cli/runtime_provider.py` -- [ ] CLI wiring added in `hermes_cli/main.py` -- [ ] setup wiring added in `hermes_cli/setup.py` +- [ ] CLI wiring added in `hermes_cli/main.py` (setup.py inherits automatically) - [ ] aux model added in `agent/auxiliary_client.py` - [ ] context lengths added in `agent/model_metadata.py` - [ ] runtime / CLI tests updated @@ -412,7 +407,7 @@ If you are hunting for all the places a provider touches, search these symbols: - `_PROVIDER_MODELS` - `resolve_runtime_provider` - `_model_flow_` -- `provider_choices` +- `select_provider_and_model` - `api_mode` - `_API_KEY_PROVIDER_AUX_MODELS` - `self.client.` diff --git a/website/docs/developer-guide/context-compression-and-caching.md b/website/docs/developer-guide/context-compression-and-caching.md index 92bf718cd..65c0911f4 100644 --- a/website/docs/developer-guide/context-compression-and-caching.md +++ b/website/docs/developer-guide/context-compression-and-caching.md @@ -1,72 +1,321 @@ ---- -sidebar_position: 6 -title: "Context Compression & Prompt Caching" -description: "How Hermes compresses long conversations and applies provider-side prompt caching" ---- +# Context Compression and Caching -# Context Compression & Prompt Caching +Hermes Agent uses a dual compression system and Anthropic prompt caching to +manage context window usage efficiently across long conversations. -Hermes manages long conversations with two complementary mechanisms: +Source files: `agent/context_compressor.py`, `agent/prompt_caching.py`, +`gateway/run.py` (session hygiene), `run_agent.py` (lines 1146-1204) -- prompt caching -- context compression -Primary files: +## Dual Compression System -- `agent/prompt_caching.py` -- `agent/context_compressor.py` -- `run_agent.py` +Hermes has two separate compression layers that operate independently: -## Prompt caching +``` + ┌──────────────────────────┐ + Incoming message │ Gateway Session Hygiene │ Fires at 85% of context + ─────────────────► │ (pre-agent, rough est.) │ Safety net for large sessions + └─────────────┬────────────┘ + │ + ▼ + ┌──────────────────────────┐ + │ Agent ContextCompressor │ Fires at 50% of context (default) + │ (in-loop, real tokens) │ Normal context management + └──────────────────────────┘ +``` -For Anthropic/native and Claude-via-OpenRouter flows, Hermes applies Anthropic-style cache markers. +### 1. Gateway Session Hygiene (85% threshold) -Current strategy: +Located in `gateway/run.py` (around line 2220). This is a **safety net** that +runs before the agent processes a message. It prevents API failures when sessions +grow too large between turns (e.g., overnight accumulation in Telegram/Discord). -- cache the system prompt -- cache the last 3 non-system messages -- default TTL is 5 minutes unless explicitly extended +- **Threshold**: Fixed at 85% of model context length +- **Token source**: Prefers actual API-reported tokens from last turn; falls back + to rough character-based estimate (`estimate_messages_tokens_rough`) +- **Fires**: Only when `len(history) >= 4` and compression is enabled +- **Purpose**: Catch sessions that escaped the agent's own compressor -This is implemented in `agent/prompt_caching.py`. +The gateway hygiene threshold is intentionally higher than the agent's compressor. +Setting it at 50% (same as the agent) caused premature compression on every turn +in long gateway sessions. -## Why prompt stability matters +### 2. Agent ContextCompressor (50% threshold, configurable) -Prompt caching only helps when the stable prefix remains stable. That is why Hermes avoids rebuilding or mutating the core system prompt mid-session unless it has to. +Located in `agent/context_compressor.py`. This is the **primary compression +system** that runs inside the agent's tool loop with access to accurate, +API-reported token counts. -## Compression trigger -Hermes can compress context when conversations become large. Configuration defaults live in `config.yaml`, and the compressor also has runtime checks based on actual prompt token counts. +## Configuration -## Compression algorithm +All compression settings are read from `config.yaml` under the `compression` key: -The compressor protects: +```yaml +compression: + enabled: true # Enable/disable compression (default: true) + threshold: 0.50 # Fraction of context window (default: 0.50 = 50%) + target_ratio: 0.20 # How much of threshold to keep as tail (default: 0.20) + protect_last_n: 20 # Minimum protected tail messages (default: 20) + summary_model: null # Override model for summaries (default: uses auxiliary) +``` -- the first N turns -- the last N turns +### Parameter Details -and summarizes the middle section. +| Parameter | Default | Range | Description | +|-----------|---------|-------|-------------| +| `threshold` | `0.50` | 0.0-1.0 | Compression triggers when prompt tokens ≥ `threshold × context_length` | +| `target_ratio` | `0.20` | 0.10-0.80 | Controls tail protection token budget: `threshold_tokens × target_ratio` | +| `protect_last_n` | `20` | ≥1 | Minimum number of recent messages always preserved | +| `protect_first_n` | `3` | (hardcoded) | System prompt + first exchange always preserved | -It also cleans up structural issues such as orphaned tool-call/result pairs so the API never receives invalid conversation structure after compression. +### Computed Values (for a 200K context model at defaults) -## Pre-compression memory flush +``` +context_length = 200,000 +threshold_tokens = 200,000 × 0.50 = 100,000 +tail_token_budget = 100,000 × 0.20 = 20,000 +max_summary_tokens = min(200,000 × 0.05, 12,000) = 10,000 +``` -Before compression, Hermes can give the model one last chance to persist memory so facts are not lost when middle turns are summarized away. -## Session lineage after compression +## Compression Algorithm -Compression can split the session into a new session ID while preserving parent lineage in the state DB. +The `ContextCompressor.compress()` method follows a 4-phase algorithm: -This lets Hermes continue operating with a smaller active context while retaining a searchable ancestry chain. +### Phase 1: Prune Old Tool Results (cheap, no LLM call) -## Re-injected state after compression +Old tool results (>200 chars) outside the protected tail are replaced with: +``` +[Old tool output cleared to save context space] +``` -After compression, Hermes may re-inject compact operational state such as: +This is a cheap pre-pass that saves significant tokens from verbose tool +outputs (file contents, terminal output, search results). -- todo snapshot -- prior-read-files summary +### Phase 2: Determine Boundaries -## Related docs +``` +┌─────────────────────────────────────────────────────────────┐ +│ Message list │ +│ │ +│ [0..2] ← protect_first_n (system + first exchange) │ +│ [3..N] ← middle turns → SUMMARIZED │ +│ [N..end] ← tail (by token budget OR protect_last_n) │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` -- [Prompt Assembly](./prompt-assembly.md) -- [Session Storage](./session-storage.md) -- [Agent Loop Internals](./agent-loop.md) +Tail protection is **token-budget based**: walks backward from the end, +accumulating tokens until the budget is exhausted. Falls back to the fixed +`protect_last_n` count if the budget would protect fewer messages. + +Boundaries are aligned to avoid splitting tool_call/tool_result groups. +The `_align_boundary_backward()` method walks past consecutive tool results +to find the parent assistant message, keeping groups intact. + +### Phase 3: Generate Structured Summary + +The middle turns are summarized using the auxiliary LLM with a structured +template: + +``` +## Goal +[What the user is trying to accomplish] + +## Constraints & Preferences +[User preferences, coding style, constraints, important decisions] + +## Progress +### Done +[Completed work — specific file paths, commands run, results] +### In Progress +[Work currently underway] +### Blocked +[Any blockers or issues encountered] + +## Key Decisions +[Important technical decisions and why] + +## Relevant Files +[Files read, modified, or created — with brief note on each] + +## Next Steps +[What needs to happen next] + +## Critical Context +[Specific values, error messages, configuration details] +``` + +Summary budget scales with the amount of content being compressed: +- Formula: `content_tokens × 0.20` (the `_SUMMARY_RATIO` constant) +- Minimum: 2,000 tokens +- Maximum: `min(context_length × 0.05, 12,000)` tokens + +### Phase 4: Assemble Compressed Messages + +The compressed message list is: +1. Head messages (with a note appended to system prompt on first compression) +2. Summary message (role chosen to avoid consecutive same-role violations) +3. Tail messages (unmodified) + +Orphaned tool_call/tool_result pairs are cleaned up by `_sanitize_tool_pairs()`: +- Tool results referencing removed calls → removed +- Tool calls whose results were removed → stub result injected + +### Iterative Re-compression + +On subsequent compressions, the previous summary is passed to the LLM with +instructions to **update** it rather than summarize from scratch. This preserves +information across multiple compactions — items move from "In Progress" to "Done", +new progress is added, and obsolete information is removed. + +The `_previous_summary` field on the compressor instance stores the last summary +text for this purpose. + + +## Before/After Example + +### Before Compression (45 messages, ~95K tokens) + +``` +[0] system: "You are a helpful assistant..." (system prompt) +[1] user: "Help me set up a FastAPI project" +[2] assistant: terminal: mkdir project +[3] tool: "directory created" +[4] assistant: write_file: main.py +[5] tool: "file written (2.3KB)" + ... 30 more turns of file editing, testing, debugging ... +[38] assistant: terminal: pytest +[39] tool: "8 passed, 2 failed\n..." (5KB output) +[40] user: "Fix the failing tests" +[41] assistant: read_file: tests/test_api.py +[42] tool: "import pytest\n..." (3KB) +[43] assistant: "I see the issue with the test fixtures..." +[44] user: "Great, also add error handling" +``` + +### After Compression (25 messages, ~45K tokens) + +``` +[0] system: "You are a helpful assistant... + [Note: Some earlier conversation turns have been compacted...]" +[1] user: "Help me set up a FastAPI project" +[2] assistant: "[CONTEXT COMPACTION] Earlier turns were compacted... + + ## Goal + Set up a FastAPI project with tests and error handling + + ## Progress + ### Done + - Created project structure: main.py, tests/, requirements.txt + - Implemented 5 API endpoints in main.py + - Wrote 10 test cases in tests/test_api.py + - 8/10 tests passing + + ### In Progress + - Fixing 2 failing tests (test_create_user, test_delete_user) + + ## Relevant Files + - main.py — FastAPI app with 5 endpoints + - tests/test_api.py — 10 test cases + - requirements.txt — fastapi, pytest, httpx + + ## Next Steps + - Fix failing test fixtures + - Add error handling" +[3] user: "Fix the failing tests" +[4] assistant: read_file: tests/test_api.py +[5] tool: "import pytest\n..." +[6] assistant: "I see the issue with the test fixtures..." +[7] user: "Great, also add error handling" +``` + + +## Prompt Caching (Anthropic) + +Source: `agent/prompt_caching.py` + +Reduces input token costs by ~75% on multi-turn conversations by caching the +conversation prefix. Uses Anthropic's `cache_control` breakpoints. + +### Strategy: system_and_3 + +Anthropic allows a maximum of 4 `cache_control` breakpoints per request. Hermes +uses the "system_and_3" strategy: + +``` +Breakpoint 1: System prompt (stable across all turns) +Breakpoint 2: 3rd-to-last non-system message ─┐ +Breakpoint 3: 2nd-to-last non-system message ├─ Rolling window +Breakpoint 4: Last non-system message ─┘ +``` + +### How It Works + +`apply_anthropic_cache_control()` deep-copies the messages and injects +`cache_control` markers: + +```python +# Cache marker format +marker = {"type": "ephemeral"} +# Or for 1-hour TTL: +marker = {"type": "ephemeral", "ttl": "1h"} +``` + +The marker is applied differently based on content type: + +| Content Type | Where Marker Goes | +|-------------|-------------------| +| String content | Converted to `[{"type": "text", "text": ..., "cache_control": ...}]` | +| List content | Added to the last element's dict | +| None/empty | Added as `msg["cache_control"]` | +| Tool messages | Added as `msg["cache_control"]` (native Anthropic only) | + +### Cache-Aware Design Patterns + +1. **Stable system prompt**: The system prompt is breakpoint 1 and cached across + all turns. Avoid mutating it mid-conversation (compression appends a note + only on the first compaction). + +2. **Message ordering matters**: Cache hits require prefix matching. Adding or + removing messages in the middle invalidates the cache for everything after. + +3. **Compression cache interaction**: After compression, the cache is invalidated + for the compressed region but the system prompt cache survives. The rolling + 3-message window re-establishes caching within 1-2 turns. + +4. **TTL selection**: Default is `5m` (5 minutes). Use `1h` for long-running + sessions where the user takes breaks between turns. + +### Enabling Prompt Caching + +Prompt caching is automatically enabled when: +- The model is an Anthropic Claude model (detected by model name) +- The provider supports `cache_control` (native Anthropic API or OpenRouter) + +```yaml +# config.yaml — TTL is configurable +model: + cache_ttl: "5m" # "5m" or "1h" +``` + +The CLI shows caching status at startup: +``` +💾 Prompt caching: ENABLED (Claude via OpenRouter, 5m TTL) +``` + + +## Context Pressure Warnings + +The agent emits context pressure warnings at 85% of the compression threshold +(not 85% of context — 85% of the threshold which is itself 50% of context): + +``` +⚠️ Context is 85% to compaction threshold (42,500/50,000 tokens) +``` + +After compression, if usage drops below 85% of threshold, the warning state +is cleared. If compression fails to reduce below the warning level (the +conversation is too dense), the warning persists but compression won't +re-trigger until the threshold is exceeded again. diff --git a/website/docs/developer-guide/prompt-assembly.md b/website/docs/developer-guide/prompt-assembly.md index 9fdb59256..858ac38ec 100644 --- a/website/docs/developer-guide/prompt-assembly.md +++ b/website/docs/developer-guide/prompt-assembly.md @@ -41,6 +41,163 @@ The cached system prompt is assembled in roughly this order: When `skip_context_files` is set (e.g., subagent delegation), SOUL.md is not loaded and the hardcoded `DEFAULT_AGENT_IDENTITY` is used instead. +### Concrete example: assembled system prompt + +Here is a simplified view of what the final system prompt looks like when all layers are present (comments show the source of each section): + +``` +# Layer 1: Agent Identity (from ~/.hermes/SOUL.md) +You are Hermes, an AI assistant created by Nous Research. +You are an expert software engineer and researcher. +You value correctness, clarity, and efficiency. +... + +# Layer 2: Tool-aware behavior guidance +You have persistent memory across sessions. Save durable facts using +the memory tool: user preferences, environment details, tool quirks, +and stable conventions. Memory is injected into every turn, so keep +it compact and focused on facts that will still matter later. +... +When the user references something from a past conversation or you +suspect relevant cross-session context exists, use session_search +to recall it before asking them to repeat themselves. + +# Tool-use enforcement (for GPT/Codex models only) +You MUST use your tools to take action — do not describe what you +would do or plan to do without actually doing it. +... + +# Layer 3: Honcho static block (when active) +[Honcho personality/context data] + +# Layer 4: Optional system message (from config or API) +[User-configured system message override] + +# Layer 5: Frozen MEMORY snapshot +## Persistent Memory +- User prefers Python 3.12, uses pyproject.toml +- Default editor is nvim +- Working on project "atlas" in ~/code/atlas +- Timezone: US/Pacific + +# Layer 6: Frozen USER profile snapshot +## User Profile +- Name: Alice +- GitHub: alice-dev + +# Layer 7: Skills index +## Skills (mandatory) +Before replying, scan the skills below. If one clearly matches +your task, load it with skill_view(name) and follow its instructions. +... + + software-development: + - code-review: Structured code review workflow + - test-driven-development: TDD methodology + research: + - arxiv: Search and summarize arXiv papers + + +# Layer 8: Context files (from project directory) +# Project Context +The following project context files have been loaded and should be followed: + +## AGENTS.md +This is the atlas project. Use pytest for testing. The main +entry point is src/atlas/main.py. Always run `make lint` before +committing. + +# Layer 9: Timestamp + session +Current time: 2026-03-30T14:30:00-07:00 +Session: abc123 + +# Layer 10: Platform hint +You are a CLI AI Agent. Try not to use markdown but simple text +renderable inside a terminal. +``` + +## How SOUL.md appears in the prompt + +`SOUL.md` lives at `~/.hermes/SOUL.md` and serves as the agent's identity — the very first section of the system prompt. The loading logic in `prompt_builder.py` works as follows: + +```python +# From agent/prompt_builder.py (simplified) +def load_soul_md() -> Optional[str]: + soul_path = get_hermes_home() / "SOUL.md" + if not soul_path.exists(): + return None + content = soul_path.read_text(encoding="utf-8").strip() + content = _scan_context_content(content, "SOUL.md") # Security scan + content = _truncate_content(content, "SOUL.md") # Cap at 20k chars + return content +``` + +When `load_soul_md()` returns content, it replaces the hardcoded `DEFAULT_AGENT_IDENTITY`. The `build_context_files_prompt()` function is then called with `skip_soul=True` to prevent SOUL.md from appearing twice (once as identity, once as a context file). + +If `SOUL.md` doesn't exist, the system falls back to: + +``` +You are Hermes Agent, an intelligent AI assistant created by Nous Research. +You are helpful, knowledgeable, and direct. You assist users with a wide +range of tasks including answering questions, writing and editing code, +analyzing information, creative work, and executing actions via your tools. +You communicate clearly, admit uncertainty when appropriate, and prioritize +being genuinely useful over being verbose unless otherwise directed below. +Be targeted and efficient in your exploration and investigations. +``` + +## How context files are injected + +`build_context_files_prompt()` uses a **priority system** — only one project context type is loaded (first match wins): + +```python +# From agent/prompt_builder.py (simplified) +def build_context_files_prompt(cwd=None, skip_soul=False): + cwd_path = Path(cwd).resolve() + + # Priority: first match wins — only ONE project context loaded + project_context = ( + _load_hermes_md(cwd_path) # 1. .hermes.md / HERMES.md (walks to git root) + or _load_agents_md(cwd_path) # 2. AGENTS.md (cwd only) + or _load_claude_md(cwd_path) # 3. CLAUDE.md (cwd only) + or _load_cursorrules(cwd_path) # 4. .cursorrules / .cursor/rules/*.mdc + ) + + sections = [] + if project_context: + sections.append(project_context) + + # SOUL.md from HERMES_HOME (independent of project context) + if not skip_soul: + soul_content = load_soul_md() + if soul_content: + sections.append(soul_content) + + if not sections: + return "" + + return ( + "# Project Context\n\n" + "The following project context files have been loaded " + "and should be followed:\n\n" + + "\n".join(sections) + ) +``` + +### Context file discovery details + +| Priority | Files | Search scope | Notes | +|----------|-------|-------------|-------| +| 1 | `.hermes.md`, `HERMES.md` | CWD up to git root | Hermes-native project config | +| 2 | `AGENTS.md` | CWD only | Common agent instruction file | +| 3 | `CLAUDE.md` | CWD only | Claude Code compatibility | +| 4 | `.cursorrules`, `.cursor/rules/*.mdc` | CWD only | Cursor compatibility | + +All context files are: +- **Security scanned** — checked for prompt injection patterns (invisible unicode, "ignore previous instructions", credential exfiltration attempts) +- **Truncated** — capped at 20,000 characters using 70/20 head/tail ratio with a truncation marker +- **YAML frontmatter stripped** — `.hermes.md` frontmatter is removed (reserved for future config overrides) + ## API-call-time-only layers These are intentionally *not* persisted as part of the cached system prompt: diff --git a/website/docs/developer-guide/session-storage.md b/website/docs/developer-guide/session-storage.md index 103a72b5d..c21401508 100644 --- a/website/docs/developer-guide/session-storage.md +++ b/website/docs/developer-guide/session-storage.md @@ -1,66 +1,388 @@ ---- -sidebar_position: 8 -title: "Session Storage" -description: "How Hermes stores sessions in SQLite, maintains lineage, and exposes recall/search" ---- - # Session Storage -Hermes uses a SQLite-backed session store as the main source of truth for historical conversation state. +Hermes Agent uses a SQLite database (`~/.hermes/state.db`) to persist session +metadata, full message history, and model configuration across CLI and gateway +sessions. This replaces the earlier per-session JSONL file approach. -Primary files: +Source file: `hermes_state.py` -- `hermes_state.py` -- `gateway/session.py` -- `tools/session_search_tool.py` -## Main database +## Architecture Overview -The primary store lives at: - -```text -~/.hermes/state.db +``` +~/.hermes/state.db (SQLite, WAL mode) +├── sessions — Session metadata, token counts, billing +├── messages — Full message history per session +├── messages_fts — FTS5 virtual table for full-text search +└── schema_version — Single-row table tracking migration state ``` -It contains: +Key design decisions: +- **WAL mode** for concurrent readers + one writer (gateway multi-platform) +- **FTS5 virtual table** for fast text search across all session messages +- **Session lineage** via `parent_session_id` chains (compression-triggered splits) +- **Source tagging** (`cli`, `telegram`, `discord`, etc.) for platform filtering +- Batch runner and RL trajectories are NOT stored here (separate systems) -- sessions -- messages -- metadata such as token counts and titles -- lineage relationships -- full-text search indexes -## What is stored per session +## SQLite Schema -Examples of important session metadata: +### Sessions Table -- session ID -- source/platform -- title -- created/updated timestamps -- token counts -- tool call counts -- stored system prompt snapshot -- parent session ID after compression splits +```sql +CREATE TABLE IF NOT EXISTS sessions ( + id TEXT PRIMARY KEY, + source TEXT NOT NULL, + user_id TEXT, + model TEXT, + model_config TEXT, + system_prompt TEXT, + parent_session_id TEXT, + started_at REAL NOT NULL, + ended_at REAL, + end_reason TEXT, + message_count INTEGER DEFAULT 0, + tool_call_count INTEGER DEFAULT 0, + input_tokens INTEGER DEFAULT 0, + output_tokens INTEGER DEFAULT 0, + cache_read_tokens INTEGER DEFAULT 0, + cache_write_tokens INTEGER DEFAULT 0, + reasoning_tokens INTEGER DEFAULT 0, + billing_provider TEXT, + billing_base_url TEXT, + billing_mode TEXT, + estimated_cost_usd REAL, + actual_cost_usd REAL, + cost_status TEXT, + cost_source TEXT, + pricing_version TEXT, + title TEXT, + FOREIGN KEY (parent_session_id) REFERENCES sessions(id) +); -## Lineage +CREATE INDEX IF NOT EXISTS idx_sessions_source ON sessions(source); +CREATE INDEX IF NOT EXISTS idx_sessions_parent ON sessions(parent_session_id); +CREATE INDEX IF NOT EXISTS idx_sessions_started ON sessions(started_at DESC); +CREATE UNIQUE INDEX IF NOT EXISTS idx_sessions_title_unique + ON sessions(title) WHERE title IS NOT NULL; +``` -When Hermes compresses a conversation, it can continue in a new session ID while preserving ancestry via `parent_session_id`. +### Messages Table -This means resuming/searching can follow session families instead of treating each compressed shard as unrelated. +```sql +CREATE TABLE IF NOT EXISTS messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT NOT NULL REFERENCES sessions(id), + role TEXT NOT NULL, + content TEXT, + tool_call_id TEXT, + tool_calls TEXT, + tool_name TEXT, + timestamp REAL NOT NULL, + token_count INTEGER, + finish_reason TEXT, + reasoning TEXT, + reasoning_details TEXT, + codex_reasoning_items TEXT +); -## Gateway vs CLI persistence +CREATE INDEX IF NOT EXISTS idx_messages_session ON messages(session_id, timestamp); +``` -- CLI uses the state DB directly for resume/history/search -- gateway keeps active-session mappings and may also maintain additional platform transcript/state files -- some legacy JSON/JSONL artifacts still exist for compatibility, but SQLite is the main historical store +Notes: +- `tool_calls` is stored as a JSON string (serialized list of tool call objects) +- `reasoning_details` and `codex_reasoning_items` are stored as JSON strings +- `reasoning` stores the raw reasoning text for providers that expose it +- Timestamps are Unix epoch floats (`time.time()`) -## Session search +### FTS5 Full-Text Search -The `session_search` tool uses the session DB's search features to retrieve and summarize relevant past work. +```sql +CREATE VIRTUAL TABLE IF NOT EXISTS messages_fts USING fts5( + content, + content=messages, + content_rowid=id +); +``` -## Related docs +The FTS5 table is kept in sync via three triggers that fire on INSERT, UPDATE, +and DELETE of the `messages` table: -- [Gateway Internals](./gateway-internals.md) -- [Prompt Assembly](./prompt-assembly.md) -- [Context Compression & Prompt Caching](./context-compression-and-caching.md) +```sql +CREATE TRIGGER IF NOT EXISTS messages_fts_insert AFTER INSERT ON messages BEGIN + INSERT INTO messages_fts(rowid, content) VALUES (new.id, new.content); +END; + +CREATE TRIGGER IF NOT EXISTS messages_fts_delete AFTER DELETE ON messages BEGIN + INSERT INTO messages_fts(messages_fts, rowid, content) + VALUES('delete', old.id, old.content); +END; + +CREATE TRIGGER IF NOT EXISTS messages_fts_update AFTER UPDATE ON messages BEGIN + INSERT INTO messages_fts(messages_fts, rowid, content) + VALUES('delete', old.id, old.content); + INSERT INTO messages_fts(rowid, content) VALUES (new.id, new.content); +END; +``` + + +## Schema Version and Migrations + +Current schema version: **6** + +The `schema_version` table stores a single integer. On initialization, +`_init_schema()` checks the current version and applies migrations sequentially: + +| Version | Change | +|---------|--------| +| 1 | Initial schema (sessions, messages, FTS5) | +| 2 | Add `finish_reason` column to messages | +| 3 | Add `title` column to sessions | +| 4 | Add unique index on `title` (NULLs allowed, non-NULL must be unique) | +| 5 | Add billing columns: `cache_read_tokens`, `cache_write_tokens`, `reasoning_tokens`, `billing_provider`, `billing_base_url`, `billing_mode`, `estimated_cost_usd`, `actual_cost_usd`, `cost_status`, `cost_source`, `pricing_version` | +| 6 | Add reasoning columns to messages: `reasoning`, `reasoning_details`, `codex_reasoning_items` | + +Each migration uses `ALTER TABLE ADD COLUMN` wrapped in try/except to handle +the column-already-exists case (idempotent). The version number is bumped after +each successful migration block. + + +## Write Contention Handling + +Multiple hermes processes (gateway + CLI sessions + worktree agents) share one +`state.db`. The `SessionDB` class handles write contention with: + +- **Short SQLite timeout** (1 second) instead of the default 30s +- **Application-level retry** with random jitter (20-150ms, up to 15 retries) +- **BEGIN IMMEDIATE** transactions to surface lock contention at transaction start +- **Periodic WAL checkpoints** every 50 successful writes (PASSIVE mode) + +This avoids the "convoy effect" where SQLite's deterministic internal backoff +causes all competing writers to retry at the same intervals. + +``` +_WRITE_MAX_RETRIES = 15 +_WRITE_RETRY_MIN_S = 0.020 # 20ms +_WRITE_RETRY_MAX_S = 0.150 # 150ms +_CHECKPOINT_EVERY_N_WRITES = 50 +``` + + +## Common Operations + +### Initialize + +```python +from hermes_state import SessionDB + +db = SessionDB() # Default: ~/.hermes/state.db +db = SessionDB(db_path=Path("/tmp/test.db")) # Custom path +``` + +### Create and Manage Sessions + +```python +# Create a new session +db.create_session( + session_id="sess_abc123", + source="cli", + model="anthropic/claude-sonnet-4.6", + user_id="user_1", + parent_session_id=None, # or previous session ID for lineage +) + +# End a session +db.end_session("sess_abc123", end_reason="user_exit") + +# Reopen a session (clear ended_at/end_reason) +db.reopen_session("sess_abc123") +``` + +### Store Messages + +```python +msg_id = db.append_message( + session_id="sess_abc123", + role="assistant", + content="Here's the answer...", + tool_calls=[{"id": "call_1", "function": {"name": "terminal", "arguments": "{}"}}], + token_count=150, + finish_reason="stop", + reasoning="Let me think about this...", +) +``` + +### Retrieve Messages + +```python +# Raw messages with all metadata +messages = db.get_messages("sess_abc123") + +# OpenAI conversation format (for API replay) +conversation = db.get_messages_as_conversation("sess_abc123") +# Returns: [{"role": "user", "content": "..."}, {"role": "assistant", ...}] +``` + +### Session Titles + +```python +# Set a title (must be unique among non-NULL titles) +db.set_session_title("sess_abc123", "Fix Docker Build") + +# Resolve by title (returns most recent in lineage) +session_id = db.resolve_session_by_title("Fix Docker Build") + +# Auto-generate next title in lineage +next_title = db.get_next_title_in_lineage("Fix Docker Build") +# Returns: "Fix Docker Build #2" +``` + + +## Full-Text Search + +The `search_messages()` method supports FTS5 query syntax with automatic +sanitization of user input. + +### Basic Search + +```python +results = db.search_messages("docker deployment") +``` + +### FTS5 Query Syntax + +| Syntax | Example | Meaning | +|--------|---------|---------| +| Keywords | `docker deployment` | Both terms (implicit AND) | +| Quoted phrase | `"exact phrase"` | Exact phrase match | +| Boolean OR | `docker OR kubernetes` | Either term | +| Boolean NOT | `python NOT java` | Exclude term | +| Prefix | `deploy*` | Prefix match | + +### Filtered Search + +```python +# Search only CLI sessions +results = db.search_messages("error", source_filter=["cli"]) + +# Exclude gateway sessions +results = db.search_messages("bug", exclude_sources=["telegram", "discord"]) + +# Search only user messages +results = db.search_messages("help", role_filter=["user"]) +``` + +### Search Results Format + +Each result includes: +- `id`, `session_id`, `role`, `timestamp` +- `snippet` — FTS5-generated snippet with `>>>match<<<` markers +- `context` — 1 message before and after the match (content truncated to 200 chars) +- `source`, `model`, `session_started` — from the parent session + +The `_sanitize_fts5_query()` method handles edge cases: +- Strips unmatched quotes and special characters +- Wraps hyphenated terms in quotes (`chat-send` → `"chat-send"`) +- Removes dangling boolean operators (`hello AND` → `hello`) + + +## Session Lineage + +Sessions can form chains via `parent_session_id`. This happens when context +compression triggers a session split in the gateway. + +### Query: Find Session Lineage + +```sql +-- Find all ancestors of a session +WITH RECURSIVE lineage AS ( + SELECT * FROM sessions WHERE id = ? + UNION ALL + SELECT s.* FROM sessions s + JOIN lineage l ON s.id = l.parent_session_id +) +SELECT id, title, started_at, parent_session_id FROM lineage; + +-- Find all descendants of a session +WITH RECURSIVE descendants AS ( + SELECT * FROM sessions WHERE id = ? + UNION ALL + SELECT s.* FROM sessions s + JOIN descendants d ON s.parent_session_id = d.id +) +SELECT id, title, started_at FROM descendants; +``` + +### Query: Recent Sessions with Preview + +```sql +SELECT s.*, + COALESCE( + (SELECT SUBSTR(m.content, 1, 63) + FROM messages m + WHERE m.session_id = s.id AND m.role = 'user' AND m.content IS NOT NULL + ORDER BY m.timestamp, m.id LIMIT 1), + '' + ) AS preview, + COALESCE( + (SELECT MAX(m2.timestamp) FROM messages m2 WHERE m2.session_id = s.id), + s.started_at + ) AS last_active +FROM sessions s +ORDER BY s.started_at DESC +LIMIT 20; +``` + +### Query: Token Usage Statistics + +```sql +-- Total tokens by model +SELECT model, + COUNT(*) as session_count, + SUM(input_tokens) as total_input, + SUM(output_tokens) as total_output, + SUM(estimated_cost_usd) as total_cost +FROM sessions +WHERE model IS NOT NULL +GROUP BY model +ORDER BY total_cost DESC; + +-- Sessions with highest token usage +SELECT id, title, model, input_tokens + output_tokens AS total_tokens, + estimated_cost_usd +FROM sessions +ORDER BY total_tokens DESC +LIMIT 10; +``` + + +## Export and Cleanup + +```python +# Export a single session with messages +data = db.export_session("sess_abc123") + +# Export all sessions (with messages) as list of dicts +all_data = db.export_all(source="cli") + +# Delete old sessions (only ended sessions) +deleted_count = db.prune_sessions(older_than_days=90) +deleted_count = db.prune_sessions(older_than_days=30, source="telegram") + +# Clear messages but keep the session record +db.clear_messages("sess_abc123") + +# Delete session and all messages +db.delete_session("sess_abc123") +``` + + +## Database Location + +Default path: `~/.hermes/state.db` + +This is derived from `hermes_constants.get_hermes_home()` which resolves to +`~/.hermes/` by default, or the value of `HERMES_HOME` environment variable. + +The database file, WAL file (`state.db-wal`), and shared-memory file +(`state.db-shm`) are all created in the same directory. diff --git a/website/docs/developer-guide/tools-runtime.md b/website/docs/developer-guide/tools-runtime.md index 4cb4e0d1e..f6fbc86de 100644 --- a/website/docs/developer-guide/tools-runtime.md +++ b/website/docs/developer-guide/tools-runtime.md @@ -22,6 +22,89 @@ Each tool module calls `registry.register(...)` at import time. `model_tools.py` is responsible for importing/discovering tool modules and building the schema list used by the model. +### How `registry.register()` works + +Every tool file in `tools/` calls `registry.register()` at module level to declare itself. The function signature is: + +```python +registry.register( + name="terminal", # Unique tool name (used in API schemas) + toolset="terminal", # Toolset this tool belongs to + schema={...}, # OpenAI function-calling schema (description, parameters) + handler=handle_terminal, # The function that executes when the tool is called + check_fn=check_terminal, # Optional: returns True/False for availability + requires_env=["SOME_VAR"], # Optional: env vars needed (for UI display) + is_async=False, # Whether the handler is an async coroutine + description="Run commands", # Human-readable description + emoji="💻", # Emoji for spinner/progress display +) +``` + +Each call creates a `ToolEntry` stored in the singleton `ToolRegistry._tools` dict keyed by tool name. If a name collision occurs across toolsets, a warning is logged and the later registration wins. + +### Discovery: `_discover_tools()` + +When `model_tools.py` is imported, it calls `_discover_tools()` which imports every tool module in order: + +```python +_modules = [ + "tools.web_tools", + "tools.terminal_tool", + "tools.file_tools", + "tools.vision_tools", + "tools.mixture_of_agents_tool", + "tools.image_generation_tool", + "tools.skills_tool", + "tools.browser_tool", + "tools.cronjob_tools", + "tools.rl_training_tool", + "tools.tts_tool", + "tools.todo_tool", + "tools.memory_tool", + "tools.session_search_tool", + "tools.clarify_tool", + "tools.code_execution_tool", + "tools.delegate_tool", + "tools.process_registry", + "tools.send_message_tool", + "tools.honcho_tools", + "tools.homeassistant_tool", +] +``` + +Each import triggers the module's `registry.register()` calls. Errors in optional tools (e.g., missing `fal_client` for image generation) are caught and logged — they don't prevent other tools from loading. + +After core tool discovery, MCP tools and plugin tools are also discovered: + +1. **MCP tools** — `tools.mcp_tool.discover_mcp_tools()` reads MCP server config and registers tools from external servers. +2. **Plugin tools** — `hermes_cli.plugins.discover_plugins()` loads user/project/pip plugins that may register additional tools. + +## Tool availability checking (`check_fn`) + +Each tool can optionally provide a `check_fn` — a callable that returns `True` when the tool is available and `False` otherwise. Typical checks include: + +- **API key present** — e.g., `lambda: bool(os.environ.get("SERP_API_KEY"))` for web search +- **Service running** — e.g., checking if the Honcho server is configured +- **Binary installed** — e.g., verifying `playwright` is available for browser tools + +When `registry.get_definitions()` builds the schema list for the model, it runs each tool's `check_fn()`: + +```python +# Simplified from registry.py +if entry.check_fn: + try: + available = bool(entry.check_fn()) + except Exception: + available = False # Exceptions = unavailable + if not available: + continue # Skip this tool entirely +``` + +Key behaviors: +- Check results are **cached per-call** — if multiple tools share the same `check_fn`, it only runs once. +- Exceptions in `check_fn()` are treated as "unavailable" (fail-safe). +- The `is_toolset_available()` method checks whether a toolset's `check_fn` passes, used for UI display and toolset resolution. + ## Toolset resolution Toolsets are named bundles of tools. Hermes resolves them through: @@ -31,10 +114,108 @@ Toolsets are named bundles of tools. Hermes resolves them through: - dynamic MCP toolsets - curated special-purpose sets like `hermes-acp` +### How `get_tool_definitions()` filters tools + +The main entry point is `model_tools.get_tool_definitions(enabled_toolsets, disabled_toolsets, quiet_mode)`: + +1. **If `enabled_toolsets` is provided** — only tools from those toolsets are included. Each toolset name is resolved via `resolve_toolset()` which expands composite toolsets into individual tool names. + +2. **If `disabled_toolsets` is provided** — start with ALL toolsets, then subtract the disabled ones. + +3. **If neither** — include all known toolsets. + +4. **Registry filtering** — the resolved tool name set is passed to `registry.get_definitions()`, which applies `check_fn` filtering and returns OpenAI-format schemas. + +5. **Dynamic schema patching** — after filtering, `execute_code` and `browser_navigate` schemas are dynamically adjusted to only reference tools that actually passed filtering (prevents model hallucination of unavailable tools). + +### Legacy toolset names + +Old toolset names with `_tools` suffixes (e.g., `web_tools`, `terminal_tools`) are mapped to their modern tool names via `_LEGACY_TOOLSET_MAP` for backward compatibility. + ## Dispatch At runtime, tools are dispatched through the central registry, with agent-loop exceptions for some agent-level tools such as memory/todo/session-search handling. +### Dispatch flow: model tool_call → handler execution + +When the model returns a `tool_call`, the flow is: + +``` +Model response with tool_call + ↓ +run_agent.py agent loop + ↓ +model_tools.handle_function_call(name, args, task_id, user_task) + ↓ +[Agent-loop tools?] → handled directly by agent loop (todo, memory, session_search, delegate_task) + ↓ +[Plugin pre-hook] → invoke_hook("pre_tool_call", ...) + ↓ +registry.dispatch(name, args, **kwargs) + ↓ +Look up ToolEntry by name + ↓ +[Async handler?] → bridge via _run_async() +[Sync handler?] → call directly + ↓ +Return result string (or JSON error) + ↓ +[Plugin post-hook] → invoke_hook("post_tool_call", ...) +``` + +### Error wrapping + +All tool execution is wrapped in error handling at two levels: + +1. **`registry.dispatch()`** — catches any exception from the handler and returns `{"error": "Tool execution failed: ExceptionType: message"}` as JSON. + +2. **`handle_function_call()`** — wraps the entire dispatch in a secondary try/except that returns `{"error": "Error executing tool_name: message"}`. + +This ensures the model always receives a well-formed JSON string, never an unhandled exception. + +### Agent-loop tools + +Four tools are intercepted before registry dispatch because they need agent-level state (TodoStore, MemoryStore, etc.): + +- `todo` — planning/task tracking +- `memory` — persistent memory writes +- `session_search` — cross-session recall +- `delegate_task` — spawns subagent sessions + +These tools' schemas are still registered in the registry (for `get_tool_definitions`), but their handlers return a stub error if dispatch somehow reaches them directly. + +### Async bridging + +When a tool handler is async, `_run_async()` bridges it to the sync dispatch path: + +- **CLI path (no running loop)** — uses a persistent event loop to keep cached async clients alive +- **Gateway path (running loop)** — spins up a disposable thread with `asyncio.run()` +- **Worker threads (parallel tools)** — uses per-thread persistent loops stored in thread-local storage + +## The DANGEROUS_PATTERNS approval flow + +The terminal tool integrates a dangerous-command approval system defined in `tools/approval.py`: + +1. **Pattern detection** — `DANGEROUS_PATTERNS` is a list of `(regex, description)` tuples covering destructive operations: + - Recursive deletes (`rm -rf`) + - Filesystem formatting (`mkfs`, `dd`) + - SQL destructive operations (`DROP TABLE`, `DELETE FROM` without `WHERE`) + - System config overwrites (`> /etc/`) + - Service manipulation (`systemctl stop`) + - Remote code execution (`curl | sh`) + - Fork bombs, process kills, etc. + +2. **Detection** — before executing any terminal command, `detect_dangerous_command(command)` checks against all patterns. + +3. **Approval prompt** — if a match is found: + - **CLI mode** — an interactive prompt asks the user to approve, deny, or allow permanently + - **Gateway mode** — an async approval callback sends the request to the messaging platform + - **Smart approval** — optionally, an auxiliary LLM can auto-approve low-risk commands that match patterns (e.g., `rm -rf node_modules/` is safe but matches "recursive delete") + +4. **Session state** — approvals are tracked per-session. Once you approve "recursive delete" for a session, subsequent `rm -rf` commands don't re-prompt. + +5. **Permanent allowlist** — the "allow permanently" option writes the pattern to `config.yaml`'s `command_allowlist`, persisting across sessions. + ## Terminal/runtime environments The terminal system supports multiple backends: diff --git a/website/docs/developer-guide/trajectory-format.md b/website/docs/developer-guide/trajectory-format.md index 0232846ca..f36244ed2 100644 --- a/website/docs/developer-guide/trajectory-format.md +++ b/website/docs/developer-guide/trajectory-format.md @@ -1,56 +1,233 @@ ---- -sidebar_position: 10 -title: "Trajectories & Training Format" -description: "How Hermes saves trajectories, normalizes tool calls, and produces training-friendly outputs" ---- +# Trajectory Format -# Trajectories & Training Format +Hermes Agent saves conversation trajectories in ShareGPT-compatible JSONL format +for use as training data, debugging artifacts, and reinforcement learning datasets. -Hermes can save conversation trajectories for training, evaluation, and batch data generation workflows. +Source files: `agent/trajectory.py`, `run_agent.py` (lines 1788-1975), `batch_runner.py` -Primary files: -- `agent/trajectory.py` -- `run_agent.py` -- `batch_runner.py` -- `trajectory_compressor.py` +## File Naming Convention -## What trajectories are for +Trajectories are written to files in the current working directory: -Trajectory outputs are used for: +| File | When | +|------|------| +| `trajectory_samples.jsonl` | Conversations that completed successfully (`completed=True`) | +| `failed_trajectories.jsonl` | Conversations that failed or were interrupted (`completed=False`) | -- SFT data generation -- debugging agent behavior -- benchmark/evaluation artifact capture -- post-processing and compression pipelines +The batch runner (`batch_runner.py`) writes to a custom output file per batch +(e.g., `batch_001_output.jsonl`) with additional metadata fields. -## Normalization strategy +You can override the filename via the `filename` parameter in `save_trajectory()`. -Hermes converts live conversation structure into a training-friendly format. -Important behaviors include: +## JSONL Entry Format -- representing reasoning in explicit markup -- converting tool calls into structured XML-like regions for dataset compatibility -- grouping tool outputs appropriately -- separating successful and failed trajectories +Each line in the file is a self-contained JSON object. There are two variants: -## Persistence boundaries +### CLI/Interactive Format (from `_save_trajectory`) -Trajectory files do **not** blindly mirror all runtime prompt state. +```json +{ + "conversations": [ ... ], + "timestamp": "2026-03-30T14:22:31.456789", + "model": "anthropic/claude-sonnet-4.6", + "completed": true +} +``` -Some prompt-time-only layers are intentionally excluded from persisted trajectory content so datasets are cleaner and less environment-specific. +### Batch Runner Format (from `batch_runner.py`) -## Batch runner +```json +{ + "prompt_index": 42, + "conversations": [ ... ], + "metadata": { "prompt_source": "gsm8k", "difficulty": "hard" }, + "completed": true, + "partial": false, + "api_calls": 7, + "toolsets_used": ["code_tools", "file_tools"], + "tool_stats": { + "terminal": {"count": 3, "success": 3, "failure": 0}, + "read_file": {"count": 2, "success": 2, "failure": 0}, + "write_file": {"count": 0, "success": 0, "failure": 0} + }, + "tool_error_counts": { + "terminal": 0, + "read_file": 0, + "write_file": 0 + } +} +``` -`batch_runner.py` emits richer metadata than single-session trajectory saving, including: +The `tool_stats` and `tool_error_counts` dictionaries are normalized to include +ALL possible tools (from `model_tools.TOOL_TO_TOOLSET_MAP`) with zero defaults, +ensuring consistent schema across entries for HuggingFace dataset loading. -- model/provider metadata -- toolset info -- partial/failure markers -- tool statistics -## Related docs +## Conversations Array (ShareGPT Format) -- [Environments, Benchmarks & Data Generation](./environments.md) -- [Agent Loop Internals](./agent-loop.md) +The `conversations` array uses ShareGPT role conventions: + +| API Role | ShareGPT `from` | +|----------|-----------------| +| system | `"system"` | +| user | `"human"` | +| assistant | `"gpt"` | +| tool | `"tool"` | + +### Complete Example + +```json +{ + "conversations": [ + { + "from": "system", + "value": "You are a function calling AI model. You are provided with function signatures within XML tags. You may call one or more functions to assist with the user query. If available tools are not relevant in assisting with user query, just respond in natural conversational language. Don't make assumptions about what values to plug into functions. After calling & executing the functions, you will be provided with function results within XML tags. Here are the available tools:\n\n[{\"name\": \"terminal\", \"description\": \"Execute shell commands\", \"parameters\": {\"type\": \"object\", \"properties\": {\"command\": {\"type\": \"string\"}}}, \"required\": null}]\n\nFor each function call return a JSON object, with the following pydantic model json schema for each:\n{'title': 'FunctionCall', 'type': 'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}, 'arguments': {'title': 'Arguments', 'type': 'object'}}, 'required': ['name', 'arguments']}\nEach function call should be enclosed within XML tags.\nExample:\n\n{'name': ,'arguments': }\n" + }, + { + "from": "human", + "value": "What Python version is installed?" + }, + { + "from": "gpt", + "value": "\nThe user wants to know the Python version. I should run python3 --version.\n\n\n{\"name\": \"terminal\", \"arguments\": {\"command\": \"python3 --version\"}}\n" + }, + { + "from": "tool", + "value": "\n{\"tool_call_id\": \"call_abc123\", \"name\": \"terminal\", \"content\": \"Python 3.11.6\"}\n" + }, + { + "from": "gpt", + "value": "\nGot the version. I can now answer the user.\n\nPython 3.11.6 is installed on this system." + } + ], + "timestamp": "2026-03-30T14:22:31.456789", + "model": "anthropic/claude-sonnet-4.6", + "completed": true +} +``` + + +## Normalization Rules + +### Reasoning Content Markup + +The trajectory converter normalizes ALL reasoning into `` tags, regardless +of how the model originally produced it: + +1. **Native thinking tokens** (`msg["reasoning"]` field from providers like + Anthropic, OpenAI o-series): Wrapped as `\n{reasoning}\n\n` + and prepended before the content. + +2. **REASONING_SCRATCHPAD XML** (when native thinking is disabled and the model + reasons via system-prompt-instructed XML): `` tags are + converted to `` via `convert_scratchpad_to_think()`. + +3. **Empty think blocks**: Every `gpt` turn is guaranteed to have a `` + block. If no reasoning was produced, an empty block is inserted: + `\n\n` — this ensures consistent format for training data. + +### Tool Call Normalization + +Tool calls from the API format (with `tool_call_id`, function name, arguments as +JSON string) are converted to XML-wrapped JSON: + +``` + +{"name": "terminal", "arguments": {"command": "ls -la"}} + +``` + +- Arguments are parsed from JSON strings back to objects (not double-encoded) +- If JSON parsing fails (shouldn't happen — validated during conversation), + an empty `{}` is used with a warning logged +- Multiple tool calls in one assistant turn produce multiple `` blocks + in a single `gpt` message + +### Tool Response Normalization + +All tool results following an assistant message are grouped into a single `tool` +turn with XML-wrapped JSON responses: + +``` + +{"tool_call_id": "call_abc123", "name": "terminal", "content": "output here"} + +``` + +- If tool content looks like JSON (starts with `{` or `[`), it's parsed so the + content field contains a JSON object/array rather than a string +- Multiple tool results are joined with newlines in one message +- The tool name is matched by position against the parent assistant's `tool_calls` + array + +### System Message + +The system message is generated at save time (not taken from the conversation). +It follows the Hermes function-calling prompt template with: + +- Preamble explaining the function-calling protocol +- `` XML block containing the JSON tool definitions +- Schema reference for `FunctionCall` objects +- `` example + +Tool definitions include `name`, `description`, `parameters`, and `required` +(set to `null` to match the canonical format). + + +## Loading Trajectories + +Trajectories are standard JSONL — load with any JSON-lines reader: + +```python +import json + +def load_trajectories(path: str): + """Load trajectory entries from a JSONL file.""" + entries = [] + with open(path, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + if line: + entries.append(json.loads(line)) + return entries + +# Filter to successful completions only +successful = [e for e in load_trajectories("trajectory_samples.jsonl") + if e.get("completed")] + +# Extract just the conversations for training +training_data = [e["conversations"] for e in successful] +``` + +### Loading for HuggingFace Datasets + +```python +from datasets import load_dataset + +ds = load_dataset("json", data_files="trajectory_samples.jsonl") +``` + +The normalized `tool_stats` schema ensures all entries have the same columns, +preventing Arrow schema mismatch errors during dataset loading. + + +## Controlling Trajectory Saving + +In the CLI, trajectory saving is controlled by: + +```yaml +# config.yaml +agent: + save_trajectories: true # default: false +``` + +Or via the `--save-trajectories` flag. When the agent initializes with +`save_trajectories=True`, the `_save_trajectory()` method is called at the end +of each conversation turn. + +The batch runner always saves trajectories (that's its primary purpose). + +Samples with zero reasoning across all turns are automatically discarded by the +batch runner to avoid polluting training data with non-reasoning examples. diff --git a/website/docs/getting-started/quickstart.md b/website/docs/getting-started/quickstart.md index 27cee7084..7ed83e819 100644 --- a/website/docs/getting-started/quickstart.md +++ b/website/docs/getting-started/quickstart.md @@ -54,11 +54,14 @@ hermes setup # Or configure everything at once | **Kilo Code** | KiloCode-hosted models | Set `KILOCODE_API_KEY` | | **OpenCode Zen** | Pay-as-you-go access to curated models | Set `OPENCODE_ZEN_API_KEY` | | **OpenCode Go** | $10/month subscription for open models | Set `OPENCODE_GO_API_KEY` | +| **DeepSeek** | Direct DeepSeek API access | Set `DEEPSEEK_API_KEY` | +| **GitHub Copilot** | GitHub Copilot subscription (GPT-5.x, Claude, Gemini, etc.) | OAuth via `hermes model`, or `COPILOT_GITHUB_TOKEN` / `GH_TOKEN` | +| **GitHub Copilot ACP** | Copilot ACP agent backend (spawns local `copilot` CLI) | `hermes model` (requires `copilot` CLI + `copilot login`) | | **Vercel AI Gateway** | Vercel AI Gateway routing | Set `AI_GATEWAY_API_KEY` | | **Custom Endpoint** | VLLM, SGLang, Ollama, or any OpenAI-compatible API | Set base URL + API key | :::tip -You can switch providers at any time with `hermes model` — no code changes, no lock-in. When configuring a custom endpoint, Hermes will prompt for the context window size and auto-detect it when possible. See [Context Length Detection](../user-guide/configuration.md#context-length-detection) for details. +You can switch providers at any time with `hermes model` — no code changes, no lock-in. When configuring a custom endpoint, Hermes will prompt for the context window size and auto-detect it when possible. See [Context Length Detection](../integrations/providers.md#context-length-detection) for details. ::: ## 3. Start Chatting diff --git a/website/docs/getting-started/updating.md b/website/docs/getting-started/updating.md index a44c7706a..04abcc40e 100644 --- a/website/docs/getting-started/updating.md +++ b/website/docs/getting-started/updating.md @@ -20,6 +20,43 @@ This pulls the latest code, updates dependencies, and prompts you to configure a `hermes update` automatically detects new configuration options and prompts you to add them. If you skipped that prompt, you can manually run `hermes config check` to see missing options, then `hermes config migrate` to interactively add them. ::: +### What happens during an update + +When you run `hermes update`, the following steps occur: + +1. **Git pull** — pulls the latest code from the `main` branch and updates submodules +2. **Dependency install** — runs `uv pip install -e ".[all]"` to pick up new or changed dependencies +3. **Config migration** — detects new config options added since your version and prompts you to set them +4. **Gateway auto-restart** — if the gateway service is running (systemd on Linux, launchd on macOS), it is **automatically restarted** after the update completes so the new code takes effect immediately + +Expected output looks like: + +``` +$ hermes update +Updating Hermes Agent... +📥 Pulling latest code... +Already up to date. (or: Updating abc1234..def5678) +📦 Updating dependencies... +✅ Dependencies updated +🔍 Checking for new config options... +✅ Config is up to date (or: Found 2 new options — running migration...) +🔄 Restarting gateway service... +✅ Gateway restarted +✅ Hermes Agent updated successfully! +``` + +### Checking your current version + +```bash +hermes version +``` + +Compare against the latest release at the [GitHub releases page](https://github.com/NousResearch/hermes-agent/releases) or check for available updates: + +```bash +hermes update --check +``` + ### Updating from Messaging Platforms You can also update directly from Telegram, Discord, Slack, or WhatsApp by sending: @@ -28,7 +65,7 @@ You can also update directly from Telegram, Discord, Slack, or WhatsApp by sendi /update ``` -This pulls the latest code, updates dependencies, and restarts the gateway. +This pulls the latest code, updates dependencies, and restarts the gateway. The bot will briefly go offline during the restart (typically 5–15 seconds) and then resume. ### Manual Update @@ -51,6 +88,57 @@ hermes config check hermes config migrate # Interactively add any missing options ``` +### Rollback instructions + +If an update introduces a problem, you can roll back to a previous version: + +```bash +cd /path/to/hermes-agent + +# List recent versions +git log --oneline -10 + +# Roll back to a specific commit +git checkout +git submodule update --init --recursive +uv pip install -e ".[all]" + +# Restart the gateway if running +hermes gateway restart +``` + +To roll back to a specific release tag: + +```bash +git checkout v0.6.0 +git submodule update --init --recursive +uv pip install -e ".[all]" +``` + +:::warning +Rolling back may cause config incompatibilities if new options were added. Run `hermes config check` after rolling back and remove any unrecognized options from `config.yaml` if you encounter errors. +::: + +### Note for Nix users + +If you installed via Nix flake, updates are managed through the Nix package manager: + +```bash +# Update the flake input +nix flake update hermes-agent + +# Or rebuild with the latest +nix profile upgrade hermes-agent +``` + +Nix installations are immutable — rollback is handled by Nix's generation system: + +```bash +nix profile rollback +``` + +See [Nix Setup](./nix-setup.md) for more details. + --- ## Uninstalling diff --git a/website/docs/guides/build-a-hermes-plugin.md b/website/docs/guides/build-a-hermes-plugin.md index abe1e3424..b3f6df959 100644 --- a/website/docs/guides/build-a-hermes-plugin.md +++ b/website/docs/guides/build-a-hermes-plugin.md @@ -1,5 +1,8 @@ --- -sidebar_position: 10 +sidebar_position: 8 +sidebar_label: "Build a Plugin" +title: "Build a Hermes Plugin" +description: "Step-by-step guide to building a complete Hermes plugin with tools, hooks, data files, and skills" --- # Build a Hermes Plugin diff --git a/website/docs/integrations/index.md b/website/docs/integrations/index.md new file mode 100644 index 000000000..cbd771072 --- /dev/null +++ b/website/docs/integrations/index.md @@ -0,0 +1,82 @@ +--- +title: "Integrations" +sidebar_label: "Overview" +sidebar_position: 0 +--- + +# Integrations + +Hermes Agent connects to external systems for AI inference, tool servers, IDE workflows, programmatic access, and more. These integrations extend what Hermes can do and where it can run. + +## AI Providers & Routing + +Hermes supports multiple AI inference providers out of the box. Use `hermes model` to configure interactively, or set them in `config.yaml`. + +- **[AI Providers](/docs/user-guide/features/provider-routing)** — OpenRouter, Anthropic, OpenAI, Google, and any OpenAI-compatible endpoint. Hermes auto-detects capabilities like vision, streaming, and tool use per provider. +- **[Provider Routing](/docs/user-guide/features/provider-routing)** — Fine-grained control over which underlying providers handle your OpenRouter requests. Optimize for cost, speed, or quality with sorting, whitelists, blacklists, and explicit priority ordering. +- **[Fallback Providers](/docs/user-guide/features/fallback-providers)** — Automatic failover to backup LLM providers when your primary model encounters errors. Includes primary model fallback and independent auxiliary task fallback for vision, compression, and web extraction. + +## Tool Servers (MCP) + +- **[MCP Servers](/docs/user-guide/features/mcp)** — Connect Hermes to external tool servers via Model Context Protocol. Access tools from GitHub, databases, file systems, browser stacks, internal APIs, and more without writing native Hermes tools. Supports both stdio and SSE transports, per-server tool filtering, and capability-aware resource/prompt registration. + +## Web Search Backends + +The `web_search`, `web_extract`, and `web_crawl` tools support four backend providers, configured via `config.yaml` or `hermes tools`: + +| Backend | Env Var | Search | Extract | Crawl | +|---------|---------|--------|---------|-------| +| **Firecrawl** (default) | `FIRECRAWL_API_KEY` | ✔ | ✔ | ✔ | +| **Parallel** | `PARALLEL_API_KEY` | ✔ | ✔ | — | +| **Tavily** | `TAVILY_API_KEY` | ✔ | ✔ | ✔ | +| **Exa** | `EXA_API_KEY` | ✔ | ✔ | — | + +Quick setup example: + +```yaml +web: + backend: firecrawl # firecrawl | parallel | tavily | exa +``` + +If `web.backend` is not set, the backend is auto-detected from whichever API key is available. Self-hosted Firecrawl is also supported via `FIRECRAWL_API_URL`. + +## Browser Automation + +Hermes includes full browser automation with multiple backend options for navigating websites, filling forms, and extracting information: + +- **Browserbase** — Managed cloud browsers with anti-bot tooling, CAPTCHA solving, and residential proxies +- **Browser Use** — Alternative cloud browser provider +- **Local Chrome via CDP** — Connect to your running Chrome instance using `/browser connect` +- **Local Chromium** — Headless local browser via the `agent-browser` CLI + +See [Browser Automation](/docs/user-guide/features/browser) for setup and usage. + +## Voice & TTS Providers + +Text-to-speech and speech-to-text across all messaging platforms: + +| Provider | Quality | Cost | API Key | +|----------|---------|------|---------| +| **Edge TTS** (default) | Good | Free | None needed | +| **ElevenLabs** | Excellent | Paid | `ELEVENLABS_API_KEY` | +| **OpenAI TTS** | Good | Paid | `VOICE_TOOLS_OPENAI_KEY` | +| **NeuTTS** | Good | Free | None needed | + +Speech-to-text uses Whisper for voice message transcription on Telegram, Discord, and WhatsApp. See [Voice & TTS](/docs/user-guide/features/tts) and [Voice Mode](/docs/user-guide/features/voice-mode) for details. + +## IDE & Editor Integration + +- **[IDE Integration (ACP)](/docs/user-guide/features/acp)** — Use Hermes Agent inside ACP-compatible editors such as VS Code, Zed, and JetBrains. Hermes runs as an ACP server, rendering chat messages, tool activity, file diffs, and terminal commands inside your editor. + +## Programmatic Access + +- **[API Server](/docs/user-guide/features/api-server)** — Expose Hermes as an OpenAI-compatible HTTP endpoint. Any frontend that speaks the OpenAI format — Open WebUI, LobeChat, LibreChat, NextChat, ChatBox — can connect and use Hermes as a backend with its full toolset. + +## Memory & Personalization + +- **[Honcho Memory](/docs/user-guide/features/honcho)** — AI-native persistent memory for cross-session user modeling and personalization. Honcho adds deep user modeling via dialectic reasoning on top of Hermes's built-in memory system. + +## Training & Evaluation + +- **[RL Training](/docs/user-guide/features/rl-training)** — Generate trajectory data from agent sessions for reinforcement learning and model fine-tuning. +- **[Batch Processing](/docs/user-guide/features/batch-processing)** — Run the agent across hundreds of prompts in parallel, generating structured ShareGPT-format trajectory data for training data generation or evaluation. diff --git a/website/docs/integrations/providers.md b/website/docs/integrations/providers.md new file mode 100644 index 000000000..ab4c8f354 --- /dev/null +++ b/website/docs/integrations/providers.md @@ -0,0 +1,643 @@ +--- +title: "AI Providers" +sidebar_label: "AI Providers" +sidebar_position: 1 +--- + +# AI Providers + +This page covers setting up inference providers for Hermes Agent — from cloud APIs like OpenRouter and Anthropic, to self-hosted endpoints like Ollama and vLLM, to advanced routing and fallback configurations. You need at least one provider configured to use Hermes. + +## Inference Providers + +You need at least one way to connect to an LLM. Use `hermes model` to switch providers and models interactively, or configure directly: + +| Provider | Setup | +|----------|-------| +| **Nous Portal** | `hermes model` (OAuth, subscription-based) | +| **OpenAI Codex** | `hermes model` (ChatGPT OAuth, uses Codex models) | +| **GitHub Copilot** | `hermes model` (OAuth device code flow, `COPILOT_GITHUB_TOKEN`, `GH_TOKEN`, or `gh auth token`) | +| **GitHub Copilot ACP** | `hermes model` (spawns local `copilot --acp --stdio`) | +| **Anthropic** | `hermes model` (Claude Pro/Max via Claude Code auth, Anthropic API key, or manual setup-token) | +| **OpenRouter** | `OPENROUTER_API_KEY` in `~/.hermes/.env` | +| **AI Gateway** | `AI_GATEWAY_API_KEY` in `~/.hermes/.env` (provider: `ai-gateway`) | +| **z.ai / GLM** | `GLM_API_KEY` in `~/.hermes/.env` (provider: `zai`) | +| **Kimi / Moonshot** | `KIMI_API_KEY` in `~/.hermes/.env` (provider: `kimi-coding`) | +| **MiniMax** | `MINIMAX_API_KEY` in `~/.hermes/.env` (provider: `minimax`) | +| **MiniMax China** | `MINIMAX_CN_API_KEY` in `~/.hermes/.env` (provider: `minimax-cn`) | +| **Alibaba Cloud** | `DASHSCOPE_API_KEY` in `~/.hermes/.env` (provider: `alibaba`, aliases: `dashscope`, `qwen`) | +| **Kilo Code** | `KILOCODE_API_KEY` in `~/.hermes/.env` (provider: `kilocode`) | +| **OpenCode Zen** | `OPENCODE_ZEN_API_KEY` in `~/.hermes/.env` (provider: `opencode-zen`) | +| **OpenCode Go** | `OPENCODE_GO_API_KEY` in `~/.hermes/.env` (provider: `opencode-go`) | +| **DeepSeek** | `DEEPSEEK_API_KEY` in `~/.hermes/.env` (provider: `deepseek`) | +| **Hugging Face** | `HF_TOKEN` in `~/.hermes/.env` (provider: `huggingface`, aliases: `hf`) | +| **Custom Endpoint** | `hermes model` (saved in `config.yaml`) or `OPENAI_BASE_URL` + `OPENAI_API_KEY` in `~/.hermes/.env` | + +:::tip Model key alias +In the `model:` config section, you can use either `default:` or `model:` as the key name for your model ID. Both `model: { default: my-model }` and `model: { model: my-model }` work identically. +::: + +:::info Codex Note +The OpenAI Codex provider authenticates via device code (open a URL, enter a code). Hermes stores the resulting credentials in its own auth store under `~/.hermes/auth.json` and can import existing Codex CLI credentials from `~/.codex/auth.json` when present. No Codex CLI installation is required. +::: + +:::warning +Even when using Nous Portal, Codex, or a custom endpoint, some tools (vision, web summarization, MoA) use a separate "auxiliary" model — by default Gemini Flash via OpenRouter. An `OPENROUTER_API_KEY` enables these tools automatically. You can also configure which model and provider these tools use — see [Auxiliary Models](/docs/user-guide/configuration#auxiliary-models). +::: + +### Anthropic (Native) + +Use Claude models directly through the Anthropic API — no OpenRouter proxy needed. Supports three auth methods: + +```bash +# With an API key (pay-per-token) +export ANTHROPIC_API_KEY=*** +hermes chat --provider anthropic --model claude-sonnet-4-6 + +# Preferred: authenticate through `hermes model` +# Hermes will use Claude Code's credential store directly when available +hermes model + +# Manual override with a setup-token (fallback / legacy) +export ANTHROPIC_TOKEN=*** # setup-token or manual OAuth token +hermes chat --provider anthropic + +# Auto-detect Claude Code credentials (if you already use Claude Code) +hermes chat --provider anthropic # reads Claude Code credential files automatically +``` + +When you choose Anthropic OAuth through `hermes model`, Hermes prefers Claude Code's own credential store over copying the token into `~/.hermes/.env`. That keeps refreshable Claude credentials refreshable. + +Or set it permanently: +```yaml +model: + provider: "anthropic" + default: "claude-sonnet-4-6" +``` + +:::tip Aliases +`--provider claude` and `--provider claude-code` also work as shorthand for `--provider anthropic`. +::: + +### GitHub Copilot + +Hermes supports GitHub Copilot as a first-class provider with two modes: + +**`copilot` — Direct Copilot API** (recommended). Uses your GitHub Copilot subscription to access GPT-5.x, Claude, Gemini, and other models through the Copilot API. + +```bash +hermes chat --provider copilot --model gpt-5.4 +``` + +**Authentication options** (checked in this order): + +1. `COPILOT_GITHUB_TOKEN` environment variable +2. `GH_TOKEN` environment variable +3. `GITHUB_TOKEN` environment variable +4. `gh auth token` CLI fallback + +If no token is found, `hermes model` offers an **OAuth device code login** — the same flow used by the Copilot CLI and opencode. + +:::warning Token types +The Copilot API does **not** support classic Personal Access Tokens (`ghp_*`). Supported token types: + +| Type | Prefix | How to get | +|------|--------|------------| +| OAuth token | `gho_` | `hermes model` → GitHub Copilot → Login with GitHub | +| Fine-grained PAT | `github_pat_` | GitHub Settings → Developer settings → Fine-grained tokens (needs **Copilot Requests** permission) | +| GitHub App token | `ghu_` | Via GitHub App installation | + +If your `gh auth token` returns a `ghp_*` token, use `hermes model` to authenticate via OAuth instead. +::: + +**API routing**: GPT-5+ models (except `gpt-5-mini`) automatically use the Responses API. All other models (GPT-4o, Claude, Gemini, etc.) use Chat Completions. Models are auto-detected from the live Copilot catalog. + +**`copilot-acp` — Copilot ACP agent backend**. Spawns the local Copilot CLI as a subprocess: + +```bash +hermes chat --provider copilot-acp --model copilot-acp +# Requires the GitHub Copilot CLI in PATH and an existing `copilot login` session +``` + +**Permanent config:** +```yaml +model: + provider: "copilot" + default: "gpt-5.4" +``` + +| Environment variable | Description | +|---------------------|-------------| +| `COPILOT_GITHUB_TOKEN` | GitHub token for Copilot API (first priority) | +| `HERMES_COPILOT_ACP_COMMAND` | Override the Copilot CLI binary path (default: `copilot`) | +| `HERMES_COPILOT_ACP_ARGS` | Override ACP args (default: `--acp --stdio`) | + +### First-Class Chinese AI Providers + +These providers have built-in support with dedicated provider IDs. Set the API key and use `--provider` to select: + +```bash +# z.ai / ZhipuAI GLM +hermes chat --provider zai --model glm-4-plus +# Requires: GLM_API_KEY in ~/.hermes/.env + +# Kimi / Moonshot AI +hermes chat --provider kimi-coding --model moonshot-v1-auto +# Requires: KIMI_API_KEY in ~/.hermes/.env + +# MiniMax (global endpoint) +hermes chat --provider minimax --model MiniMax-M2.7 +# Requires: MINIMAX_API_KEY in ~/.hermes/.env + +# MiniMax (China endpoint) +hermes chat --provider minimax-cn --model MiniMax-M2.7 +# Requires: MINIMAX_CN_API_KEY in ~/.hermes/.env + +# Alibaba Cloud / DashScope (Qwen models) +hermes chat --provider alibaba --model qwen3.5-plus +# Requires: DASHSCOPE_API_KEY in ~/.hermes/.env +``` + +Or set the provider permanently in `config.yaml`: +```yaml +model: + provider: "zai" # or: kimi-coding, minimax, minimax-cn, alibaba + default: "glm-4-plus" +``` + +Base URLs can be overridden with `GLM_BASE_URL`, `KIMI_BASE_URL`, `MINIMAX_BASE_URL`, `MINIMAX_CN_BASE_URL`, or `DASHSCOPE_BASE_URL` environment variables. + +### Hugging Face Inference Providers + +[Hugging Face Inference Providers](https://huggingface.co/docs/inference-providers) routes to 20+ open models through a unified OpenAI-compatible endpoint (`router.huggingface.co/v1`). Requests are automatically routed to the fastest available backend (Groq, Together, SambaNova, etc.) with automatic failover. + +```bash +# Use any available model +hermes chat --provider huggingface --model Qwen/Qwen3-235B-A22B-Thinking-2507 +# Requires: HF_TOKEN in ~/.hermes/.env + +# Short alias +hermes chat --provider hf --model deepseek-ai/DeepSeek-V3.2 +``` + +Or set it permanently in `config.yaml`: +```yaml +model: + provider: "huggingface" + default: "Qwen/Qwen3-235B-A22B-Thinking-2507" +``` + +Get your token at [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) — make sure to enable the "Make calls to Inference Providers" permission. Free tier included ($0.10/month credit, no markup on provider rates). + +You can append routing suffixes to model names: `:fastest` (default), `:cheapest`, or `:provider_name` to force a specific backend. + +The base URL can be overridden with `HF_BASE_URL`. + +## Custom & Self-Hosted LLM Providers + +Hermes Agent works with **any OpenAI-compatible API endpoint**. If a server implements `/v1/chat/completions`, you can point Hermes at it. This means you can use local models, GPU inference servers, multi-provider routers, or any third-party API. + +### General Setup + +Three ways to configure a custom endpoint: + +**Interactive setup (recommended):** +```bash +hermes model +# Select "Custom endpoint (self-hosted / VLLM / etc.)" +# Enter: API base URL, API key, Model name +``` + +**Manual config (`config.yaml`):** +```yaml +# In ~/.hermes/config.yaml +model: + default: your-model-name + provider: custom + base_url: http://localhost:8000/v1 + api_key: your-key-or-leave-empty-for-local +``` + +**Environment variables (`.env` file):** +```bash +# Add to ~/.hermes/.env +OPENAI_BASE_URL=http://localhost:8000/v1 +OPENAI_API_KEY=your-key # Any non-empty string for local servers +LLM_MODEL=your-model-name +``` + +All three approaches end up in the same runtime path. `hermes model` persists provider, model, and base URL to `config.yaml` so later sessions keep using that endpoint even if env vars are not set. + +### Switching Models with `/model` + +Once a custom endpoint is configured, you can switch models mid-session: + +``` +/model custom:qwen-2.5 # Switch to a model on your custom endpoint +/model custom # Auto-detect the model from the endpoint +/model openrouter:claude-sonnet-4 # Switch back to a cloud provider +``` + +If you have **named custom providers** configured (see below), use the triple syntax: + +``` +/model custom:local:qwen-2.5 # Use the "local" custom provider with model qwen-2.5 +/model custom:work:llama3 # Use the "work" custom provider with llama3 +``` + +When switching providers, Hermes persists the base URL and provider to config so the change survives restarts. When switching away from a custom endpoint to a built-in provider, the stale base URL is automatically cleared. + +:::tip +`/model custom` (bare, no model name) queries your endpoint's `/models` API and auto-selects the model if exactly one is loaded. Useful for local servers running a single model. +::: + +Everything below follows this same pattern — just change the URL, key, and model name. + +--- + +### Ollama — Local Models, Zero Config + +[Ollama](https://ollama.com/) runs open-weight models locally with one command. Best for: quick local experimentation, privacy-sensitive work, offline use. + +```bash +# Install and run a model +ollama pull llama3.1:70b +ollama serve # Starts on port 11434 + +# Configure Hermes +OPENAI_BASE_URL=http://localhost:11434/v1 +OPENAI_API_KEY=ollama # Any non-empty string +LLM_MODEL=llama3.1:70b +``` + +Ollama's OpenAI-compatible endpoint supports chat completions, streaming, and tool calling (for supported models). No GPU required for smaller models — Ollama handles CPU inference automatically. + +:::tip +List available models with `ollama list`. Pull any model from the [Ollama library](https://ollama.com/library) with `ollama pull `. +::: + +--- + +### vLLM — High-Performance GPU Inference + +[vLLM](https://docs.vllm.ai/) is the standard for production LLM serving. Best for: maximum throughput on GPU hardware, serving large models, continuous batching. + +```bash +# Start vLLM server +pip install vllm +vllm serve meta-llama/Llama-3.1-70B-Instruct \ + --port 8000 \ + --tensor-parallel-size 2 # Multi-GPU + +# Configure Hermes +OPENAI_BASE_URL=http://localhost:8000/v1 +OPENAI_API_KEY=dummy +LLM_MODEL=meta-llama/Llama-3.1-70B-Instruct +``` + +vLLM supports tool calling, structured output, and multi-modal models. Use `--enable-auto-tool-choice` and `--tool-call-parser hermes` for Hermes-format tool calling with NousResearch models. + +--- + +### SGLang — Fast Serving with RadixAttention + +[SGLang](https://github.com/sgl-project/sglang) is an alternative to vLLM with RadixAttention for KV cache reuse. Best for: multi-turn conversations (prefix caching), constrained decoding, structured output. + +```bash +# Start SGLang server +pip install "sglang[all]" +python -m sglang.launch_server \ + --model meta-llama/Llama-3.1-70B-Instruct \ + --port 8000 \ + --tp 2 + +# Configure Hermes +OPENAI_BASE_URL=http://localhost:8000/v1 +OPENAI_API_KEY=dummy +LLM_MODEL=meta-llama/Llama-3.1-70B-Instruct +``` + +--- + +### llama.cpp / llama-server — CPU & Metal Inference + +[llama.cpp](https://github.com/ggml-org/llama.cpp) runs quantized models on CPU, Apple Silicon (Metal), and consumer GPUs. Best for: running models without a datacenter GPU, Mac users, edge deployment. + +```bash +# Build and start llama-server +cmake -B build && cmake --build build --config Release +./build/bin/llama-server \ + -m models/llama-3.1-8b-instruct-Q4_K_M.gguf \ + --port 8080 --host 0.0.0.0 + +# Configure Hermes +OPENAI_BASE_URL=http://localhost:8080/v1 +OPENAI_API_KEY=dummy +LLM_MODEL=llama-3.1-8b-instruct +``` + +:::tip +Download GGUF models from [Hugging Face](https://huggingface.co/models?library=gguf). Q4_K_M quantization offers the best balance of quality vs. memory usage. +::: + +--- + +### LiteLLM Proxy — Multi-Provider Gateway + +[LiteLLM](https://docs.litellm.ai/) is an OpenAI-compatible proxy that unifies 100+ LLM providers behind a single API. Best for: switching between providers without config changes, load balancing, fallback chains, budget controls. + +```bash +# Install and start +pip install "litellm[proxy]" +litellm --model anthropic/claude-sonnet-4 --port 4000 + +# Or with a config file for multiple models: +litellm --config litellm_config.yaml --port 4000 + +# Configure Hermes +OPENAI_BASE_URL=http://localhost:4000/v1 +OPENAI_API_KEY=sk-your-litellm-key +LLM_MODEL=anthropic/claude-sonnet-4 +``` + +Example `litellm_config.yaml` with fallback: +```yaml +model_list: + - model_name: "best" + litellm_params: + model: anthropic/claude-sonnet-4 + api_key: sk-ant-... + - model_name: "best" + litellm_params: + model: openai/gpt-4o + api_key: sk-... +router_settings: + routing_strategy: "latency-based-routing" +``` + +--- + +### ClawRouter — Cost-Optimized Routing + +[ClawRouter](https://github.com/BlockRunAI/ClawRouter) by BlockRunAI is a local routing proxy that auto-selects models based on query complexity. It classifies requests across 14 dimensions and routes to the cheapest model that can handle the task. Payment is via USDC cryptocurrency (no API keys). + +```bash +# Install and start +npx @blockrun/clawrouter # Starts on port 8402 + +# Configure Hermes +OPENAI_BASE_URL=http://localhost:8402/v1 +OPENAI_API_KEY=dummy +LLM_MODEL=blockrun/auto # or: blockrun/eco, blockrun/premium, blockrun/agentic +``` + +Routing profiles: +| Profile | Strategy | Savings | +|---------|----------|---------| +| `blockrun/auto` | Balanced quality/cost | 74-100% | +| `blockrun/eco` | Cheapest possible | 95-100% | +| `blockrun/premium` | Best quality models | 0% | +| `blockrun/free` | Free models only | 100% | +| `blockrun/agentic` | Optimized for tool use | varies | + +:::note +ClawRouter requires a USDC-funded wallet on Base or Solana for payment. All requests route through BlockRun's backend API. Run `npx @blockrun/clawrouter doctor` to check wallet status. +::: + +--- + +### Other Compatible Providers + +Any service with an OpenAI-compatible API works. Some popular options: + +| Provider | Base URL | Notes | +|----------|----------|-------| +| [Together AI](https://together.ai) | `https://api.together.xyz/v1` | Cloud-hosted open models | +| [Groq](https://groq.com) | `https://api.groq.com/openai/v1` | Ultra-fast inference | +| [DeepSeek](https://deepseek.com) | `https://api.deepseek.com/v1` | DeepSeek models | +| [Fireworks AI](https://fireworks.ai) | `https://api.fireworks.ai/inference/v1` | Fast open model hosting | +| [Cerebras](https://cerebras.ai) | `https://api.cerebras.ai/v1` | Wafer-scale chip inference | +| [Mistral AI](https://mistral.ai) | `https://api.mistral.ai/v1` | Mistral models | +| [OpenAI](https://openai.com) | `https://api.openai.com/v1` | Direct OpenAI access | +| [Azure OpenAI](https://azure.microsoft.com) | `https://YOUR.openai.azure.com/` | Enterprise OpenAI | +| [LocalAI](https://localai.io) | `http://localhost:8080/v1` | Self-hosted, multi-model | +| [Jan](https://jan.ai) | `http://localhost:1337/v1` | Desktop app with local models | + +```bash +# Example: Together AI +OPENAI_BASE_URL=https://api.together.xyz/v1 +OPENAI_API_KEY=your-together-key +LLM_MODEL=meta-llama/Llama-3.1-70B-Instruct-Turbo +``` + +--- + +### Context Length Detection + +Hermes uses a multi-source resolution chain to detect the correct context window for your model and provider: + +1. **Config override** — `model.context_length` in config.yaml (highest priority) +2. **Custom provider per-model** — `custom_providers[].models..context_length` +3. **Persistent cache** — previously discovered values (survives restarts) +4. **Endpoint `/models`** — queries your server's API (local/custom endpoints) +5. **Anthropic `/v1/models`** — queries Anthropic's API for `max_input_tokens` (API-key users only) +6. **OpenRouter API** — live model metadata from OpenRouter +7. **Nous Portal** — suffix-matches Nous model IDs against OpenRouter metadata +8. **[models.dev](https://models.dev)** — community-maintained registry with provider-specific context lengths for 3800+ models across 100+ providers +9. **Fallback defaults** — broad model family patterns (128K default) + +For most setups this works out of the box. The system is provider-aware — the same model can have different context limits depending on who serves it (e.g., `claude-opus-4.6` is 1M on Anthropic direct but 128K on GitHub Copilot). + +To set the context length explicitly, add `context_length` to your model config: + +```yaml +model: + default: "qwen3.5:9b" + base_url: "http://localhost:8080/v1" + context_length: 131072 # tokens +``` + +For custom endpoints, you can also set context length per model: + +```yaml +custom_providers: + - name: "My Local LLM" + base_url: "http://localhost:11434/v1" + models: + qwen3.5:27b: + context_length: 32768 + deepseek-r1:70b: + context_length: 65536 +``` + +`hermes model` will prompt for context length when configuring a custom endpoint. Leave it blank for auto-detection. + +:::tip When to set this manually +- You're using Ollama with a custom `num_ctx` that's lower than the model's maximum +- You want to limit context below the model's maximum (e.g., 8k on a 128k model to save VRAM) +- You're running behind a proxy that doesn't expose `/v1/models` +::: + +--- + +### Named Custom Providers + +If you work with multiple custom endpoints (e.g., a local dev server and a remote GPU server), you can define them as named custom providers in `config.yaml`: + +```yaml +custom_providers: + - name: local + base_url: http://localhost:8080/v1 + # api_key omitted — Hermes uses "no-key-required" for keyless local servers + - name: work + base_url: https://gpu-server.internal.corp/v1 + api_key: corp-api-key + api_mode: chat_completions # optional, auto-detected from URL + - name: anthropic-proxy + base_url: https://proxy.example.com/anthropic + api_key: proxy-key + api_mode: anthropic_messages # for Anthropic-compatible proxies +``` + +Switch between them mid-session with the triple syntax: + +``` +/model custom:local:qwen-2.5 # Use the "local" endpoint with qwen-2.5 +/model custom:work:llama3-70b # Use the "work" endpoint with llama3-70b +/model custom:anthropic-proxy:claude-sonnet-4 # Use the proxy +``` + +You can also select named custom providers from the interactive `hermes model` menu. + +--- + +### Choosing the Right Setup + +| Use Case | Recommended | +|----------|-------------| +| **Just want it to work** | OpenRouter (default) or Nous Portal | +| **Local models, easy setup** | Ollama | +| **Production GPU serving** | vLLM or SGLang | +| **Mac / no GPU** | Ollama or llama.cpp | +| **Multi-provider routing** | LiteLLM Proxy or OpenRouter | +| **Cost optimization** | ClawRouter or OpenRouter with `sort: "price"` | +| **Maximum privacy** | Ollama, vLLM, or llama.cpp (fully local) | +| **Enterprise / Azure** | Azure OpenAI with custom endpoint | +| **Chinese AI models** | z.ai (GLM), Kimi/Moonshot, or MiniMax (first-class providers) | + +:::tip +You can switch between providers at any time with `hermes model` — no restart required. Your conversation history, memory, and skills carry over regardless of which provider you use. +::: + +## Optional API Keys + +| Feature | Provider | Env Variable | +|---------|----------|--------------| +| Web scraping | [Firecrawl](https://firecrawl.dev/) | `FIRECRAWL_API_KEY`, `FIRECRAWL_API_URL` | +| Browser automation | [Browserbase](https://browserbase.com/) | `BROWSERBASE_API_KEY`, `BROWSERBASE_PROJECT_ID` | +| Image generation | [FAL](https://fal.ai/) | `FAL_KEY` | +| Premium TTS voices | [ElevenLabs](https://elevenlabs.io/) | `ELEVENLABS_API_KEY` | +| OpenAI TTS + voice transcription | [OpenAI](https://platform.openai.com/api-keys) | `VOICE_TOOLS_OPENAI_KEY` | +| RL Training | [Tinker](https://tinker-console.thinkingmachines.ai/) + [WandB](https://wandb.ai/) | `TINKER_API_KEY`, `WANDB_API_KEY` | +| Cross-session user modeling | [Honcho](https://honcho.dev/) | `HONCHO_API_KEY` | + +### Self-Hosting Firecrawl + +By default, Hermes uses the [Firecrawl cloud API](https://firecrawl.dev/) for web search and scraping. If you prefer to run Firecrawl locally, you can point Hermes at a self-hosted instance instead. See Firecrawl's [SELF_HOST.md](https://github.com/firecrawl/firecrawl/blob/main/SELF_HOST.md) for complete setup instructions. + +**What you get:** No API key required, no rate limits, no per-page costs, full data sovereignty. + +**What you lose:** The cloud version uses Firecrawl's proprietary "Fire-engine" for advanced anti-bot bypassing (Cloudflare, CAPTCHAs, IP rotation). Self-hosted uses basic fetch + Playwright, so some protected sites may fail. Search uses DuckDuckGo instead of Google. + +**Setup:** + +1. Clone and start the Firecrawl Docker stack (5 containers: API, Playwright, Redis, RabbitMQ, PostgreSQL — requires ~4-8 GB RAM): + ```bash + git clone https://github.com/firecrawl/firecrawl + cd firecrawl + # In .env, set: USE_DB_AUTHENTICATION=false, HOST=0.0.0.0, PORT=3002 + docker compose up -d + ``` + +2. Point Hermes at your instance (no API key needed): + ```bash + hermes config set FIRECRAWL_API_URL http://localhost:3002 + ``` + +You can also set both `FIRECRAWL_API_KEY` and `FIRECRAWL_API_URL` if your self-hosted instance has authentication enabled. + +## OpenRouter Provider Routing + +When using OpenRouter, you can control how requests are routed across providers. Add a `provider_routing` section to `~/.hermes/config.yaml`: + +```yaml +provider_routing: + sort: "throughput" # "price" (default), "throughput", or "latency" + # only: ["anthropic"] # Only use these providers + # ignore: ["deepinfra"] # Skip these providers + # order: ["anthropic", "google"] # Try providers in this order + # require_parameters: true # Only use providers that support all request params + # data_collection: "deny" # Exclude providers that may store/train on data +``` + +**Shortcuts:** Append `:nitro` to any model name for throughput sorting (e.g., `anthropic/claude-sonnet-4:nitro`), or `:floor` for price sorting. + +## Fallback Model + +Configure a backup provider:model that Hermes switches to automatically when your primary model fails (rate limits, server errors, auth failures): + +```yaml +fallback_model: + provider: openrouter # required + model: anthropic/claude-sonnet-4 # required + # base_url: http://localhost:8000/v1 # optional, for custom endpoints + # api_key_env: MY_CUSTOM_KEY # optional, env var name for custom endpoint API key +``` + +When activated, the fallback swaps the model and provider mid-session without losing your conversation. It fires **at most once** per session. + +Supported providers: `openrouter`, `nous`, `openai-codex`, `copilot`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `custom`. + +:::tip +Fallback is configured exclusively through `config.yaml` — there are no environment variables for it. For full details on when it triggers, supported providers, and how it interacts with auxiliary tasks and delegation, see [Fallback Providers](/docs/user-guide/features/fallback-providers). +::: + +## Smart Model Routing + +Optional cheap-vs-strong routing lets Hermes keep your main model for complex work while sending very short/simple turns to a cheaper model. + +```yaml +smart_model_routing: + enabled: true + max_simple_chars: 160 + max_simple_words: 28 + cheap_model: + provider: openrouter + model: google/gemini-2.5-flash + # base_url: http://localhost:8000/v1 # optional custom endpoint + # api_key_env: MY_CUSTOM_KEY # optional env var name for that endpoint's API key +``` + +How it works: +- If a turn is short, single-line, and does not look code/tool/debug heavy, Hermes may route it to `cheap_model` +- If the turn looks complex, Hermes stays on your primary model/provider +- If the cheap route cannot be resolved cleanly, Hermes falls back to the primary model automatically + +This is intentionally conservative. It is meant for quick, low-stakes turns like: +- short factual questions +- quick rewrites +- lightweight summaries + +It will avoid routing prompts that look like: +- coding/debugging work +- tool-heavy requests +- long or multi-line analysis asks + +Use this when you want lower latency or cost without fully changing your default model. + +--- + +## See Also + +- [Configuration](/docs/user-guide/configuration) — General configuration (directory structure, config precedence, terminal backends, memory, compression, and more) +- [Environment Variables](/docs/reference/environment-variables) — Complete reference of all environment variables diff --git a/website/docs/reference/cli-commands.md b/website/docs/reference/cli-commands.md index a9f12d76b..d10c29e03 100644 --- a/website/docs/reference/cli-commands.md +++ b/website/docs/reference/cli-commands.md @@ -21,6 +21,7 @@ hermes [global-options] [subcommand/options] | Option | Description | |--------|-------------| | `--version`, `-V` | Show version and exit. | +| `--profile `, `-p ` | Select which Hermes profile to use for this invocation. Overrides the sticky default set by `hermes profile use`. | | `--resume `, `-r ` | Resume a previous session by ID or title. | | `--continue [name]`, `-c [name]` | Resume the most recent session, or the most recent session matching a title. | | `--worktree`, `-w` | Start in an isolated git worktree for parallel-agent workflows. | @@ -37,6 +38,7 @@ hermes [global-options] [subcommand/options] | `hermes setup` | Interactive setup wizard for all or part of the configuration. | | `hermes whatsapp` | Configure and pair the WhatsApp bridge. | | `hermes login` / `logout` | Authenticate with OAuth-backed providers. | +| `hermes auth` | Manage credential pools — add, list, remove, reset, set strategy. | | `hermes status` | Show agent, auth, and platform status. | | `hermes cron` | Inspect and tick the cron scheduler. | | `hermes webhook` | Manage dynamic webhook subscriptions for event-driven activation. | @@ -46,10 +48,14 @@ hermes [global-options] [subcommand/options] | `hermes skills` | Browse, install, publish, audit, and configure skills. | | `hermes honcho` | Manage Honcho cross-session memory integration. | | `hermes acp` | Run Hermes as an ACP server for editor integration. | +| `hermes mcp` | Manage MCP server configurations and run Hermes as an MCP server. | +| `hermes plugins` | Manage Hermes Agent plugins (install, enable, disable, remove). | | `hermes tools` | Configure enabled tools per platform. | | `hermes sessions` | Browse, export, prune, rename, and delete sessions. | | `hermes insights` | Show token/cost/activity analytics. | | `hermes claw` | OpenClaw migration helpers. | +| `hermes profile` | Manage profiles — multiple isolated Hermes instances. | +| `hermes completion` | Print shell completion scripts (bash/zsh). | | `hermes version` | Show version information. | | `hermes update` | Pull latest code and reinstall dependencies. | | `hermes uninstall` | Remove Hermes from the system. | @@ -67,7 +73,7 @@ Common options: | `-q`, `--query "..."` | One-shot, non-interactive prompt. | | `-m`, `--model ` | Override the model for this run. | | `-t`, `--toolsets ` | Enable a comma-separated set of toolsets. | -| `--provider ` | Force a provider: `auto`, `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `alibaba`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `kilocode`. | +| `--provider ` | Force a provider: `auto`, `openrouter`, `nous`, `openai-codex`, `copilot-acp`, `copilot`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `kilocode`. | | `-s`, `--skills ` | Preload one or more skills for the session (can be repeated or comma-separated). | | `-v`, `--verbose` | Verbose output. | | `-Q`, `--quiet` | Programmatic mode: suppress banner/spinner/tool previews. | @@ -76,6 +82,7 @@ Common options: | `--checkpoints` | Enable filesystem checkpoints before destructive file changes. | | `--yolo` | Skip approval prompts. | | `--pass-session-id` | Pass the session ID into the system prompt. | +| `--source ` | Session source tag for filtering (default: `cli`). Use `tool` for third-party integrations that should not appear in user session lists. | Examples: @@ -186,6 +193,22 @@ Useful options for `login`: - `--ca-bundle ` - `--insecure` +## `hermes auth` + +Manage credential pools for same-provider key rotation. See [Credential Pools](/docs/user-guide/features/credential-pools) for full documentation. + +```bash +hermes auth # Interactive wizard +hermes auth list # Show all pools +hermes auth list openrouter # Show specific provider +hermes auth add openrouter --api-key sk-or-v1-xxx # Add API key +hermes auth add anthropic --type oauth # Add OAuth credential +hermes auth remove openrouter 2 # Remove by index +hermes auth reset openrouter # Clear cooldowns +``` + +Subcommands: `add`, `list`, `remove`, `reset`. When called with no subcommand, launches the interactive management wizard. + ## `hermes status` ```bash @@ -507,6 +530,56 @@ hermes claw migrate --preset user-data --overwrite hermes claw migrate --source /home/user/old-openclaw ``` +## `hermes profile` + +```bash +hermes profile +``` + +Manage profiles — multiple isolated Hermes instances, each with its own config, sessions, skills, and home directory. + +| Subcommand | Description | +|------------|-------------| +| `list` | List all profiles. | +| `use ` | Set a sticky default profile. | +| `create [--clone] [--no-alias]` | Create a new profile. `--clone` copies config, `.env`, and `SOUL.md` from the active profile. | +| `delete [-y]` | Delete a profile. | +| `show ` | Show profile details (home directory, config, etc.). | +| `alias [--remove] [--name NAME]` | Manage wrapper scripts for quick profile access. | +| `rename ` | Rename a profile. | +| `export [-o FILE]` | Export a profile to a `.tar.gz` archive. | +| `import [--name NAME]` | Import a profile from a `.tar.gz` archive. | + +Examples: + +```bash +hermes profile list +hermes profile create work --clone +hermes profile use work +hermes profile alias work --name h-work +hermes profile export work -o work-backup.tar.gz +hermes profile import work-backup.tar.gz --name restored +hermes -p work chat -q "Hello from work profile" +``` + +## `hermes completion` + +```bash +hermes completion [bash|zsh] +``` + +Print a shell completion script to stdout. Source the output in your shell profile for tab-completion of Hermes commands, subcommands, and profile names. + +Examples: + +```bash +# Bash +hermes completion bash >> ~/.bashrc + +# Zsh +hermes completion zsh >> ~/.zshrc +``` + ## Maintenance commands | Command | Description | diff --git a/website/docs/reference/environment-variables.md b/website/docs/reference/environment-variables.md index 715c9fbc1..fd57ffb02 100644 --- a/website/docs/reference/environment-variables.md +++ b/website/docs/reference/environment-variables.md @@ -63,7 +63,7 @@ For native Anthropic auth, Hermes prefers Claude Code's own credential files whe | Variable | Description | |----------|-------------| -| `HERMES_INFERENCE_PROVIDER` | Override provider selection: `auto`, `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `kilocode`, `alibaba` (default: `auto`) | +| `HERMES_INFERENCE_PROVIDER` | Override provider selection: `auto`, `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `kilocode`, `alibaba`, `deepseek`, `opencode-zen`, `opencode-go`, `ai-gateway` (default: `auto`) | | `HERMES_PORTAL_BASE_URL` | Override Nous Portal URL (for development/testing) | | `NOUS_INFERENCE_BASE_URL` | Override Nous inference API URL | | `HERMES_NOUS_MIN_KEY_TTL_SECONDS` | Min agent key TTL before re-mint (default: 1800 = 30min) | @@ -80,6 +80,7 @@ For native Anthropic auth, Hermes prefers Claude Code's own credential files whe | `FIRECRAWL_API_KEY` | Web scraping ([firecrawl.dev](https://firecrawl.dev/)) | | `FIRECRAWL_API_URL` | Custom Firecrawl API endpoint for self-hosted instances (optional) | | `TAVILY_API_KEY` | Tavily API key for AI-native web search, extract, and crawl ([app.tavily.com](https://app.tavily.com/home)) | +| `EXA_API_KEY` | Exa API key for AI-native web search and contents ([exa.ai](https://exa.ai/)) | | `BROWSERBASE_API_KEY` | Browser automation ([browserbase.com](https://browserbase.com/)) | | `BROWSERBASE_PROJECT_ID` | Browserbase project ID | | `BROWSER_USE_API_KEY` | Browser Use cloud browser API key ([browser-use.com](https://browser-use.com/)) | @@ -152,6 +153,9 @@ For native Anthropic auth, Hermes prefers Claude Code's own credential files whe | `TELEGRAM_ALLOWED_USERS` | Comma-separated user IDs allowed to use the bot | | `TELEGRAM_HOME_CHANNEL` | Default Telegram chat/channel for cron delivery | | `TELEGRAM_HOME_CHANNEL_NAME` | Display name for the Telegram home channel | +| `TELEGRAM_WEBHOOK_URL` | Public HTTPS URL for webhook mode (enables webhook instead of polling) | +| `TELEGRAM_WEBHOOK_PORT` | Local listen port for webhook server (default: `8443`) | +| `TELEGRAM_WEBHOOK_SECRET` | Secret token for verifying updates come from Telegram | | `DISCORD_BOT_TOKEN` | Discord bot token | | `DISCORD_ALLOWED_USERS` | Comma-separated Discord user IDs allowed to use the bot | | `DISCORD_HOME_CHANNEL` | Default Discord channel for cron delivery | diff --git a/website/docs/reference/faq.md b/website/docs/reference/faq.md index e207420f8..50302dae8 100644 --- a/website/docs/reference/faq.md +++ b/website/docs/reference/faq.md @@ -254,7 +254,7 @@ custom_providers: context_length: 32768 ``` -See [Context Length Detection](../user-guide/configuration.md#context-length-detection) for how auto-detection works and all override options. +See [Context Length Detection](../integrations/providers.md#context-length-detection) for how auto-detection works and all override options. --- diff --git a/website/docs/reference/mcp-config-reference.md b/website/docs/reference/mcp-config-reference.md index 5f78185b9..a87478f91 100644 --- a/website/docs/reference/mcp-config-reference.md +++ b/website/docs/reference/mcp-config-reference.md @@ -48,6 +48,8 @@ mcp_servers: | `timeout` | number | both | Tool call timeout | | `connect_timeout` | number | both | Initial connection timeout | | `tools` | mapping | both | Filtering and utility-tool policy | +| `auth` | string | HTTP | Authentication method. Set to `oauth` to enable OAuth 2.1 with PKCE | +| `sampling` | mapping | both | Server-initiated LLM request policy (see MCP guide) | ## `tools` policy keys @@ -213,3 +215,33 @@ Utility tools follow the same prefixing pattern: - `mcp__read_resource` - `mcp__list_prompts` - `mcp__get_prompt` + +### Name sanitization + +Hyphens (`-`) and dots (`.`) in both server names and tool names are replaced with underscores before registration. This ensures tool names are valid identifiers for LLM function-calling APIs. + +For example, a server named `my-api` exposing a tool called `list-items.v2` becomes: + +```text +mcp_my_api_list_items_v2 +``` + +Keep this in mind when writing `include` / `exclude` filters — use the **original** MCP tool name (with hyphens/dots), not the sanitized version. + +## OAuth 2.1 authentication + +For HTTP servers that require OAuth, set `auth: oauth` on the server entry: + +```yaml +mcp_servers: + protected_api: + url: "https://mcp.example.com/mcp" + auth: oauth +``` + +Behavior: +- Hermes uses the MCP SDK's OAuth 2.1 PKCE flow (metadata discovery, dynamic client registration, token exchange, and refresh) +- On first connect, a browser window opens for authorization +- Tokens are persisted to `~/.hermes/mcp-tokens/.json` and reused across sessions +- Token refresh is automatic; re-authorization only happens when refresh fails +- Only applies to HTTP/StreamableHTTP transport (`url`-based servers) diff --git a/website/docs/reference/profile-commands.md b/website/docs/reference/profile-commands.md index a59e27574..d2d7adb8f 100644 --- a/website/docs/reference/profile-commands.md +++ b/website/docs/reference/profile-commands.md @@ -78,7 +78,7 @@ Creates a new profile. | `` | Name for the new profile. Must be a valid directory name (alphanumeric, hyphens, underscores). | | `--clone` | Copy `config.yaml`, `.env`, and `SOUL.md` from the current profile. | | `--clone-all` | Copy everything (config, memories, skills, sessions, state) from the current profile. | -| `--from ` | Clone from a specific profile instead of the current one. Used with `--clone` or `--clone-all`. | +| `--clone-from ` | Clone from a specific profile instead of the current one. Used with `--clone` or `--clone-all`. | **Examples:** @@ -93,7 +93,7 @@ hermes profile create work --clone hermes profile create backup --clone-all # Clone config from a specific profile -hermes profile create work2 --clone --from work +hermes profile create work2 --clone --clone-from work ``` ## `hermes profile delete` @@ -123,14 +123,14 @@ This permanently deletes the profile's entire directory including all config, me ## `hermes profile show` ```bash -hermes profile show [name] +hermes profile show ``` Displays details about a profile including its home directory, configured model, active platforms, and disk usage. | Argument | Description | |----------|-------------| -| `[name]` | Profile to inspect. Defaults to the current active profile if omitted. | +| `` | Profile to inspect. | **Example:** @@ -147,20 +147,28 @@ Disk: 48 MB ## `hermes profile alias` ```bash -hermes profile alias +hermes profile alias [options] ``` -Regenerates the shell alias script at `~/.local/bin/hermes-`. Useful if the alias was accidentally deleted or if you need to update it after moving your Hermes installation. +Regenerates the shell alias script at `~/.local/bin/`. Useful if the alias was accidentally deleted or if you need to update it after moving your Hermes installation. -| Argument | Description | -|----------|-------------| +| Argument / Option | Description | +|-------------------|-------------| | `` | Profile to create/update the alias for. | +| `--remove` | Remove the wrapper script instead of creating it. | +| `--name ` | Custom alias name (default: profile name). | **Example:** ```bash hermes profile alias work # Creates/updates ~/.local/bin/work + +hermes profile alias work --name mywork +# Creates ~/.local/bin/mywork + +hermes profile alias work --remove +# Removes the wrapper script ``` ## `hermes profile rename` @@ -187,39 +195,45 @@ hermes profile rename mybot assistant ## `hermes profile export` ```bash -hermes profile export +hermes profile export [options] ``` Exports a profile as a compressed tar.gz archive. -| Argument | Description | -|----------|-------------| +| Argument / Option | Description | +|-------------------|-------------| | `` | Profile to export. | -| `` | Path for the output archive (e.g., `./work-backup.tar.gz`). | +| `-o`, `--output ` | Output file path (default: `.tar.gz`). | **Example:** ```bash -hermes profile export work ./work-2026-03-29.tar.gz +hermes profile export work +# Creates work.tar.gz in the current directory + +hermes profile export work -o ./work-2026-03-29.tar.gz ``` ## `hermes profile import` ```bash -hermes profile import [name] +hermes profile import [options] ``` Imports a profile from a tar.gz archive. -| Argument | Description | -|----------|-------------| -| `` | Path to the tar.gz archive to import. | -| `[name]` | Name for the imported profile. Defaults to the original profile name from the archive. | +| Argument / Option | Description | +|-------------------|-------------| +| `` | Path to the tar.gz archive to import. | +| `--name ` | Name for the imported profile (default: inferred from archive). | **Example:** ```bash -hermes profile import ./work-2026-03-29.tar.gz work-restored +hermes profile import ./work-2026-03-29.tar.gz +# Infers profile name from the archive + +hermes profile import ./work-2026-03-29.tar.gz --name work-restored ``` ## `hermes -p` / `hermes --profile` @@ -254,7 +268,7 @@ Generates shell completion scripts. Includes completions for profile names and p | Argument | Description | |----------|-------------| -| `` | Shell to generate completions for: `bash`, `zsh`, or `fish`. | +| `` | Shell to generate completions for: `bash` or `zsh`. | **Examples:** @@ -262,7 +276,6 @@ Generates shell completion scripts. Includes completions for profile names and p # Install completions hermes completion bash >> ~/.bashrc hermes completion zsh >> ~/.zshrc -hermes completion fish > ~/.config/fish/completions/hermes.fish # Reload shell source ~/.bashrc diff --git a/website/docs/reference/slash-commands.md b/website/docs/reference/slash-commands.md index 70b15efa9..94e413445 100644 --- a/website/docs/reference/slash-commands.md +++ b/website/docs/reference/slash-commands.md @@ -31,10 +31,10 @@ Type `/` in the CLI to open the autocomplete menu. Built-in commands are case-in | `/compress` | Manually compress conversation context (flush memories + summarize) | | `/rollback` | List or restore filesystem checkpoints (usage: /rollback [number]) | | `/stop` | Kill all running background processes | -| `/queue ` (alias: `/q`) | Queue a prompt for the next turn (doesn't interrupt the current agent response) | +| `/queue ` (alias: `/q`) | Queue a prompt for the next turn (doesn't interrupt the current agent response). **Note:** `/q` is claimed by both `/queue` and `/quit`; the last registration wins, so `/q` resolves to `/quit` in practice. Use `/queue` explicitly. | | `/resume [name]` | Resume a previously-named session | | `/statusbar` (alias: `/sb`) | Toggle the context/model status bar on or off | -| `/background ` | Run a prompt in a separate background session. The agent processes your prompt independently — your current session stays free for other work. Results appear as a panel when the task finishes. See [CLI Background Sessions](/docs/user-guide/cli#background-sessions). | +| `/background ` (alias: `/bg`) | Run a prompt in a separate background session. The agent processes your prompt independently — your current session stays free for other work. Results appear as a panel when the task finishes. See [CLI Background Sessions](/docs/user-guide/cli#background-sessions). | | `/plan [request]` | Load the bundled `plan` skill to write a markdown plan instead of executing the work. Plans are saved under `.hermes/plans/` relative to the active workspace/backend working directory. | ### Configuration @@ -50,6 +50,7 @@ Type `/` in the CLI to open the autocomplete menu. Built-in commands are case-in | `/reasoning` | Manage reasoning effort and display (usage: /reasoning [level\|show\|hide]) | | `/skin` | Show or change the display skin/theme | | `/voice [on\|off\|tts\|status]` | Toggle CLI voice mode and spoken playback. Recording uses `voice.record_key` (default: `Ctrl+B`). | +| `/yolo` | Toggle YOLO mode — skip all dangerous command approval prompts. | ### Tools & Skills @@ -60,7 +61,7 @@ Type `/` in the CLI to open the autocomplete menu. Built-in commands are case-in | `/browser [connect\|disconnect\|status]` | Manage local Chrome CDP connection. `connect` attaches browser tools to a running Chrome instance (default: `ws://localhost:9222`). `disconnect` detaches. `status` shows current connection. Auto-launches Chrome if no debugger is detected. | | `/skills` | Search, install, inspect, or manage skills from online registries | | `/cron` | Manage scheduled tasks (list, add/create, edit, pause, resume, run, remove) | -| `/reload-mcp` | Reload MCP servers from config.yaml | +| `/reload-mcp` (alias: `/reload_mcp`) | Reload MCP servers from config.yaml | | `/plugins` | List installed plugins and their status | ### Info @@ -70,14 +71,15 @@ Type `/` in the CLI to open the autocomplete menu. Built-in commands are case-in | `/help` | Show this help message | | `/usage` | Show token usage, cost breakdown, and session duration | | `/insights` | Show usage insights and analytics (last 30 days) | -| `/platforms` | Show gateway/messaging platform status | +| `/platforms` (alias: `/gateway`) | Show gateway/messaging platform status | | `/paste` | Check clipboard for an image and attach it | +| `/profile` | Show active profile name and home directory | ### Exit | Command | Description | |---------|-------------| -| `/quit` | Exit the CLI (also: /exit, /q) | +| `/quit` | Exit the CLI (also: `/exit`). See note on `/q` under `/queue` above. | ### Dynamic CLI slash commands @@ -105,7 +107,7 @@ The messaging gateway supports the following built-in commands inside Telegram, | `/personality [name]` | Set a personality overlay for the session. | | `/retry` | Retry the last message. | | `/undo` | Remove the last exchange. | -| `/sethome` | Mark the current chat as the platform home channel for deliveries. | +| `/sethome` (alias: `/set-home`) | Mark the current chat as the platform home channel for deliveries. | | `/compress` | Manually compress conversation context. | | `/title [name]` | Set or show the session title. | | `/resume [name]` | Resume a previously named session. | @@ -116,7 +118,9 @@ The messaging gateway supports the following built-in commands inside Telegram, | `/rollback [number]` | List or restore filesystem checkpoints. | | `/background ` | Run a prompt in a separate background session. Results are delivered back to the same chat when the task finishes. See [Messaging Background Sessions](/docs/user-guide/messaging/#background-sessions). | | `/plan [request]` | Load the bundled `plan` skill to write a markdown plan instead of executing the work. Plans are saved under `.hermes/plans/` relative to the active workspace/backend working directory. | -| `/reload-mcp` | Reload MCP servers from config. | +| `/reload-mcp` (alias: `/reload_mcp`) | Reload MCP servers from config. | +| `/yolo` | Toggle YOLO mode — skip all dangerous command approval prompts. | +| `/commands [page]` | Browse all commands and skills (paginated). | | `/approve [session\|always]` | Approve and execute a pending dangerous command. `session` approves for this session only; `always` adds to permanent allowlist. | | `/deny` | Reject a pending dangerous command. | | `/update` | Update Hermes Agent to the latest version. | @@ -127,6 +131,6 @@ The messaging gateway supports the following built-in commands inside Telegram, - `/skin`, `/tools`, `/toolsets`, `/browser`, `/config`, `/prompt`, `/cron`, `/skills`, `/platforms`, `/paste`, `/statusbar`, and `/plugins` are **CLI-only** commands. - `/verbose` is **CLI-only by default**, but can be enabled for messaging platforms by setting `display.tool_progress_command: true` in `config.yaml`. When enabled, it cycles the `display.tool_progress` mode and saves to config. -- `/status`, `/sethome`, `/update`, `/approve`, and `/deny` are **messaging-only** commands. -- `/background`, `/voice`, `/reload-mcp`, and `/rollback` work in **both** the CLI and the messaging gateway. +- `/status`, `/sethome`, `/update`, `/approve`, `/deny`, and `/commands` are **messaging-only** commands. +- `/background`, `/voice`, `/reload-mcp`, `/rollback`, and `/yolo` work in **both** the CLI and the messaging gateway. - `/voice join`, `/voice channel`, and `/voice leave` are only meaningful on Discord. diff --git a/website/docs/reference/tools-reference.md b/website/docs/reference/tools-reference.md index 9a30bab33..275dea4fe 100644 --- a/website/docs/reference/tools-reference.md +++ b/website/docs/reference/tools-reference.md @@ -151,8 +151,8 @@ This page documents the built-in Hermes tool registry as it exists in code. Avai | Tool | Description | Requires environment | |------|-------------|----------------------| -| `web_search` | Search the web for information on any topic. Returns up to 5 relevant results with titles, URLs, and descriptions. | PARALLEL_API_KEY or FIRECRAWL_API_KEY or TAVILY_API_KEY | -| `web_extract` | Extract content from web page URLs. Returns page content in markdown format. Also works with PDF URLs — pass the PDF link directly and it converts to markdown text. Pages under 5000 chars return full markdown; larger pages are LLM-summarized. | PARALLEL_API_KEY or FIRECRAWL_API_KEY or TAVILY_API_KEY | +| `web_search` | Search the web for information on any topic. Returns up to 5 relevant results with titles, URLs, and descriptions. | EXA_API_KEY or PARALLEL_API_KEY or FIRECRAWL_API_KEY or TAVILY_API_KEY | +| `web_extract` | Extract content from web page URLs. Returns page content in markdown format. Also works with PDF URLs — pass the PDF link directly and it converts to markdown text. Pages under 5000 chars return full markdown; larger pages are LLM-summarized. | EXA_API_KEY or PARALLEL_API_KEY or FIRECRAWL_API_KEY or TAVILY_API_KEY | ## `tts` toolset diff --git a/website/docs/reference/toolsets-reference.md b/website/docs/reference/toolsets-reference.md index 83cf92e4c..7999acc01 100644 --- a/website/docs/reference/toolsets-reference.md +++ b/website/docs/reference/toolsets-reference.md @@ -19,7 +19,7 @@ Toolsets are named bundles of tools that you can enable with `hermes chat --tool | `file` | core | `patch`, `read_file`, `search_files`, `write_file` | | `hermes-acp` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_console`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `delegate_task`, `execute_code`, `memory`, `patch`, `process`, `read_file`, `search_files`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` | | `hermes-cli` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_console`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `clarify`, `cronjob`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `send_message`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `text_to_speech`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` | -| `hermes-api-server` | platform | _(same as hermes-cli)_ | +| `hermes-api-server` | platform | `browser_back`, `browser_click`, `browser_close`, `browser_console`, `browser_get_images`, `browser_navigate`, `browser_press`, `browser_scroll`, `browser_snapshot`, `browser_type`, `browser_vision`, `cronjob`, `delegate_task`, `execute_code`, `ha_call_service`, `ha_get_state`, `ha_list_entities`, `ha_list_services`, `honcho_conclude`, `honcho_context`, `honcho_profile`, `honcho_search`, `image_generate`, `memory`, `mixture_of_agents`, `patch`, `process`, `read_file`, `search_files`, `session_search`, `skill_manage`, `skill_view`, `skills_list`, `terminal`, `todo`, `vision_analyze`, `web_extract`, `web_search`, `write_file` | | `hermes-dingtalk` | platform | _(same as hermes-cli)_ | | `hermes-feishu` | platform | _(same as hermes-cli)_ | | `hermes-wecom` | platform | _(same as hermes-cli)_ | diff --git a/website/docs/user-guide/checkpoints-and-rollback.md b/website/docs/user-guide/checkpoints-and-rollback.md index f81a7d4f8..1c31acdae 100644 --- a/website/docs/user-guide/checkpoints-and-rollback.md +++ b/website/docs/user-guide/checkpoints-and-rollback.md @@ -1,5 +1,6 @@ --- sidebar_position: 8 +sidebar_label: "Checkpoints & Rollback" title: "Checkpoints and /rollback" description: "Filesystem safety nets for destructive operations using shadow git repos and automatic snapshots" --- diff --git a/website/docs/user-guide/cli.md b/website/docs/user-guide/cli.md index 1c4857d71..e37b1ddba 100644 --- a/website/docs/user-guide/cli.md +++ b/website/docs/user-guide/cli.md @@ -94,6 +94,7 @@ When resuming a previous session (`hermes -c` or `hermes --resume `), a "Pre | `Ctrl+B` | Start/stop voice recording when voice mode is enabled (`voice.record_key`, default: `ctrl+b`) | | `Ctrl+C` | Interrupt agent (double-press within 2s to force exit) | | `Ctrl+D` | Exit | +| `Ctrl+Z` | Suspend Hermes to background (Unix only). Run `fg` in the shell to resume. | | `Tab` | Accept auto-suggestion (ghost text) or autocomplete slash commands | ## Slash Commands @@ -212,6 +213,33 @@ You can interrupt the agent at any point: - In-progress terminal commands are killed immediately (SIGTERM, then SIGKILL after 1s) - Multiple messages typed during interrupt are combined into one prompt +### Busy Input Mode + +The `display.busy_input_mode` config key controls what happens when you press Enter while the agent is working: + +| Mode | Behavior | +|------|----------| +| `"interrupt"` (default) | Your message interrupts the current operation and is processed immediately | +| `"queue"` | Your message is silently queued and sent as the next turn after the agent finishes | + +```yaml +# ~/.hermes/config.yaml +display: + busy_input_mode: "queue" # or "interrupt" (default) +``` + +Queue mode is useful when you want to prepare follow-up messages without accidentally canceling in-flight work. Unknown values fall back to `"interrupt"`. + +### Suspending to Background + +On Unix systems, press **`Ctrl+Z`** to suspend Hermes to the background — just like any terminal process. The shell prints a confirmation: + +``` +Hermes Agent has been suspended. Run `fg` to bring Hermes Agent back. +``` + +Type `fg` in your shell to resume the session exactly where you left off. This is not supported on Windows. + ## Tool Progress Display The CLI shows animated feedback as the agent works: @@ -232,6 +260,18 @@ The CLI shows animated feedback as the agent works: Cycle through display modes with `/verbose`: `off → new → all → verbose`. This command can also be enabled for messaging platforms — see [configuration](/docs/user-guide/configuration#display-settings). +### Tool Preview Length + +The `display.tool_preview_length` config key controls the maximum number of characters shown in tool call preview lines (e.g. file paths, terminal commands). The default is `0`, which means no limit — full paths and commands are shown. + +```yaml +# ~/.hermes/config.yaml +display: + tool_preview_length: 80 # Truncate tool previews to 80 chars (0 = no limit) +``` + +This is useful on narrow terminals or when tool arguments contain very long file paths. + ## Session Management ### Resuming Sessions diff --git a/website/docs/user-guide/configuration.md b/website/docs/user-guide/configuration.md index c3aa96f53..107e82395 100644 --- a/website/docs/user-guide/configuration.md +++ b/website/docs/user-guide/configuration.md @@ -71,631 +71,7 @@ delegation: Multiple references in a single value work: `url: "${HOST}:${PORT}"`. If a referenced variable is not set, the placeholder is kept verbatim (`${UNDEFINED_VAR}` stays as-is). Only the `${VAR}` syntax is supported — bare `$VAR` is not expanded. -## Inference Providers - -You need at least one way to connect to an LLM. Use `hermes model` to switch providers and models interactively, or configure directly: - -| Provider | Setup | -|----------|-------| -| **Nous Portal** | `hermes model` (OAuth, subscription-based) | -| **OpenAI Codex** | `hermes model` (ChatGPT OAuth, uses Codex models) | -| **GitHub Copilot** | `hermes model` (OAuth device code flow, `COPILOT_GITHUB_TOKEN`, `GH_TOKEN`, or `gh auth token`) | -| **GitHub Copilot ACP** | `hermes model` (spawns local `copilot --acp --stdio`) | -| **Anthropic** | `hermes model` (Claude Pro/Max via Claude Code auth, Anthropic API key, or manual setup-token) | -| **OpenRouter** | `OPENROUTER_API_KEY` in `~/.hermes/.env` | -| **AI Gateway** | `AI_GATEWAY_API_KEY` in `~/.hermes/.env` (provider: `ai-gateway`) | -| **z.ai / GLM** | `GLM_API_KEY` in `~/.hermes/.env` (provider: `zai`) | -| **Kimi / Moonshot** | `KIMI_API_KEY` in `~/.hermes/.env` (provider: `kimi-coding`) | -| **MiniMax** | `MINIMAX_API_KEY` in `~/.hermes/.env` (provider: `minimax`) | -| **MiniMax China** | `MINIMAX_CN_API_KEY` in `~/.hermes/.env` (provider: `minimax-cn`) | -| **Alibaba Cloud** | `DASHSCOPE_API_KEY` in `~/.hermes/.env` (provider: `alibaba`, aliases: `dashscope`, `qwen`) | -| **Kilo Code** | `KILOCODE_API_KEY` in `~/.hermes/.env` (provider: `kilocode`) | -| **OpenCode Zen** | `OPENCODE_ZEN_API_KEY` in `~/.hermes/.env` (provider: `opencode-zen`) | -| **OpenCode Go** | `OPENCODE_GO_API_KEY` in `~/.hermes/.env` (provider: `opencode-go`) | -| **Hugging Face** | `HF_TOKEN` in `~/.hermes/.env` (provider: `huggingface`, aliases: `hf`) | -| **Custom Endpoint** | `hermes model` (saved in `config.yaml`) or `OPENAI_BASE_URL` + `OPENAI_API_KEY` in `~/.hermes/.env` | - -:::tip Model key alias -In the `model:` config section, you can use either `default:` or `model:` as the key name for your model ID. Both `model: { default: my-model }` and `model: { model: my-model }` work identically. -::: - -:::info Codex Note -The OpenAI Codex provider authenticates via device code (open a URL, enter a code). Hermes stores the resulting credentials in its own auth store under `~/.hermes/auth.json` and can import existing Codex CLI credentials from `~/.codex/auth.json` when present. No Codex CLI installation is required. -::: - -:::warning -Even when using Nous Portal, Codex, or a custom endpoint, some tools (vision, web summarization, MoA) use a separate "auxiliary" model — by default Gemini Flash via OpenRouter. An `OPENROUTER_API_KEY` enables these tools automatically. You can also configure which model and provider these tools use — see [Auxiliary Models](#auxiliary-models) below. -::: - -### Anthropic (Native) - -Use Claude models directly through the Anthropic API — no OpenRouter proxy needed. Supports three auth methods: - -```bash -# With an API key (pay-per-token) -export ANTHROPIC_API_KEY=*** -hermes chat --provider anthropic --model claude-sonnet-4-6 - -# Preferred: authenticate through `hermes model` -# Hermes will use Claude Code's credential store directly when available -hermes model - -# Manual override with a setup-token (fallback / legacy) -export ANTHROPIC_TOKEN=*** # setup-token or manual OAuth token -hermes chat --provider anthropic - -# Auto-detect Claude Code credentials (if you already use Claude Code) -hermes chat --provider anthropic # reads Claude Code credential files automatically -``` - -When you choose Anthropic OAuth through `hermes model`, Hermes prefers Claude Code's own credential store over copying the token into `~/.hermes/.env`. That keeps refreshable Claude credentials refreshable. - -Or set it permanently: -```yaml -model: - provider: "anthropic" - default: "claude-sonnet-4-6" -``` - -:::tip Aliases -`--provider claude` and `--provider claude-code` also work as shorthand for `--provider anthropic`. -::: - -### GitHub Copilot - -Hermes supports GitHub Copilot as a first-class provider with two modes: - -**`copilot` — Direct Copilot API** (recommended). Uses your GitHub Copilot subscription to access GPT-5.x, Claude, Gemini, and other models through the Copilot API. - -```bash -hermes chat --provider copilot --model gpt-5.4 -``` - -**Authentication options** (checked in this order): - -1. `COPILOT_GITHUB_TOKEN` environment variable -2. `GH_TOKEN` environment variable -3. `GITHUB_TOKEN` environment variable -4. `gh auth token` CLI fallback - -If no token is found, `hermes model` offers an **OAuth device code login** — the same flow used by the Copilot CLI and opencode. - -:::warning Token types -The Copilot API does **not** support classic Personal Access Tokens (`ghp_*`). Supported token types: - -| Type | Prefix | How to get | -|------|--------|------------| -| OAuth token | `gho_` | `hermes model` → GitHub Copilot → Login with GitHub | -| Fine-grained PAT | `github_pat_` | GitHub Settings → Developer settings → Fine-grained tokens (needs **Copilot Requests** permission) | -| GitHub App token | `ghu_` | Via GitHub App installation | - -If your `gh auth token` returns a `ghp_*` token, use `hermes model` to authenticate via OAuth instead. -::: - -**API routing**: GPT-5+ models (except `gpt-5-mini`) automatically use the Responses API. All other models (GPT-4o, Claude, Gemini, etc.) use Chat Completions. Models are auto-detected from the live Copilot catalog. - -**`copilot-acp` — Copilot ACP agent backend**. Spawns the local Copilot CLI as a subprocess: - -```bash -hermes chat --provider copilot-acp --model copilot-acp -# Requires the GitHub Copilot CLI in PATH and an existing `copilot login` session -``` - -**Permanent config:** -```yaml -model: - provider: "copilot" - default: "gpt-5.4" -``` - -| Environment variable | Description | -|---------------------|-------------| -| `COPILOT_GITHUB_TOKEN` | GitHub token for Copilot API (first priority) | -| `HERMES_COPILOT_ACP_COMMAND` | Override the Copilot CLI binary path (default: `copilot`) | -| `HERMES_COPILOT_ACP_ARGS` | Override ACP args (default: `--acp --stdio`) | - -### First-Class Chinese AI Providers - -These providers have built-in support with dedicated provider IDs. Set the API key and use `--provider` to select: - -```bash -# z.ai / ZhipuAI GLM -hermes chat --provider zai --model glm-4-plus -# Requires: GLM_API_KEY in ~/.hermes/.env - -# Kimi / Moonshot AI -hermes chat --provider kimi-coding --model moonshot-v1-auto -# Requires: KIMI_API_KEY in ~/.hermes/.env - -# MiniMax (global endpoint) -hermes chat --provider minimax --model MiniMax-M2.7 -# Requires: MINIMAX_API_KEY in ~/.hermes/.env - -# MiniMax (China endpoint) -hermes chat --provider minimax-cn --model MiniMax-M2.7 -# Requires: MINIMAX_CN_API_KEY in ~/.hermes/.env - -# Alibaba Cloud / DashScope (Qwen models) -hermes chat --provider alibaba --model qwen3.5-plus -# Requires: DASHSCOPE_API_KEY in ~/.hermes/.env -``` - -Or set the provider permanently in `config.yaml`: -```yaml -model: - provider: "zai" # or: kimi-coding, minimax, minimax-cn, alibaba - default: "glm-4-plus" -``` - -Base URLs can be overridden with `GLM_BASE_URL`, `KIMI_BASE_URL`, `MINIMAX_BASE_URL`, `MINIMAX_CN_BASE_URL`, or `DASHSCOPE_BASE_URL` environment variables. - -### Hugging Face Inference Providers - -[Hugging Face Inference Providers](https://huggingface.co/docs/inference-providers) routes to 20+ open models through a unified OpenAI-compatible endpoint (`router.huggingface.co/v1`). Requests are automatically routed to the fastest available backend (Groq, Together, SambaNova, etc.) with automatic failover. - -```bash -# Use any available model -hermes chat --provider huggingface --model Qwen/Qwen3-235B-A22B-Thinking-2507 -# Requires: HF_TOKEN in ~/.hermes/.env - -# Short alias -hermes chat --provider hf --model deepseek-ai/DeepSeek-V3.2 -``` - -Or set it permanently in `config.yaml`: -```yaml -model: - provider: "huggingface" - default: "Qwen/Qwen3-235B-A22B-Thinking-2507" -``` - -Get your token at [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) — make sure to enable the "Make calls to Inference Providers" permission. Free tier included ($0.10/month credit, no markup on provider rates). - -You can append routing suffixes to model names: `:fastest` (default), `:cheapest`, or `:provider_name` to force a specific backend. - -The base URL can be overridden with `HF_BASE_URL`. - -## Custom & Self-Hosted LLM Providers - -Hermes Agent works with **any OpenAI-compatible API endpoint**. If a server implements `/v1/chat/completions`, you can point Hermes at it. This means you can use local models, GPU inference servers, multi-provider routers, or any third-party API. - -### General Setup - -Three ways to configure a custom endpoint: - -**Interactive setup (recommended):** -```bash -hermes model -# Select "Custom endpoint (self-hosted / VLLM / etc.)" -# Enter: API base URL, API key, Model name -``` - -**Manual config (`config.yaml`):** -```yaml -# In ~/.hermes/config.yaml -model: - default: your-model-name - provider: custom - base_url: http://localhost:8000/v1 - api_key: your-key-or-leave-empty-for-local -``` - -**Environment variables (`.env` file):** -```bash -# Add to ~/.hermes/.env -OPENAI_BASE_URL=http://localhost:8000/v1 -OPENAI_API_KEY=your-key # Any non-empty string for local servers -LLM_MODEL=your-model-name -``` - -All three approaches end up in the same runtime path. `hermes model` persists provider, model, and base URL to `config.yaml` so later sessions keep using that endpoint even if env vars are not set. - -### Switching Models with `/model` - -Once a custom endpoint is configured, you can switch models mid-session: - -``` -/model custom:qwen-2.5 # Switch to a model on your custom endpoint -/model custom # Auto-detect the model from the endpoint -/model openrouter:claude-sonnet-4 # Switch back to a cloud provider -``` - -If you have **named custom providers** configured (see below), use the triple syntax: - -``` -/model custom:local:qwen-2.5 # Use the "local" custom provider with model qwen-2.5 -/model custom:work:llama3 # Use the "work" custom provider with llama3 -``` - -When switching providers, Hermes persists the base URL and provider to config so the change survives restarts. When switching away from a custom endpoint to a built-in provider, the stale base URL is automatically cleared. - -:::tip -`/model custom` (bare, no model name) queries your endpoint's `/models` API and auto-selects the model if exactly one is loaded. Useful for local servers running a single model. -::: - -Everything below follows this same pattern — just change the URL, key, and model name. - ---- - -### Ollama — Local Models, Zero Config - -[Ollama](https://ollama.com/) runs open-weight models locally with one command. Best for: quick local experimentation, privacy-sensitive work, offline use. - -```bash -# Install and run a model -ollama pull llama3.1:70b -ollama serve # Starts on port 11434 - -# Configure Hermes -OPENAI_BASE_URL=http://localhost:11434/v1 -OPENAI_API_KEY=ollama # Any non-empty string -LLM_MODEL=llama3.1:70b -``` - -Ollama's OpenAI-compatible endpoint supports chat completions, streaming, and tool calling (for supported models). No GPU required for smaller models — Ollama handles CPU inference automatically. - -:::tip -List available models with `ollama list`. Pull any model from the [Ollama library](https://ollama.com/library) with `ollama pull `. -::: - ---- - -### vLLM — High-Performance GPU Inference - -[vLLM](https://docs.vllm.ai/) is the standard for production LLM serving. Best for: maximum throughput on GPU hardware, serving large models, continuous batching. - -```bash -# Start vLLM server -pip install vllm -vllm serve meta-llama/Llama-3.1-70B-Instruct \ - --port 8000 \ - --tensor-parallel-size 2 # Multi-GPU - -# Configure Hermes -OPENAI_BASE_URL=http://localhost:8000/v1 -OPENAI_API_KEY=dummy -LLM_MODEL=meta-llama/Llama-3.1-70B-Instruct -``` - -vLLM supports tool calling, structured output, and multi-modal models. Use `--enable-auto-tool-choice` and `--tool-call-parser hermes` for Hermes-format tool calling with NousResearch models. - ---- - -### SGLang — Fast Serving with RadixAttention - -[SGLang](https://github.com/sgl-project/sglang) is an alternative to vLLM with RadixAttention for KV cache reuse. Best for: multi-turn conversations (prefix caching), constrained decoding, structured output. - -```bash -# Start SGLang server -pip install "sglang[all]" -python -m sglang.launch_server \ - --model meta-llama/Llama-3.1-70B-Instruct \ - --port 8000 \ - --tp 2 - -# Configure Hermes -OPENAI_BASE_URL=http://localhost:8000/v1 -OPENAI_API_KEY=dummy -LLM_MODEL=meta-llama/Llama-3.1-70B-Instruct -``` - ---- - -### llama.cpp / llama-server — CPU & Metal Inference - -[llama.cpp](https://github.com/ggml-org/llama.cpp) runs quantized models on CPU, Apple Silicon (Metal), and consumer GPUs. Best for: running models without a datacenter GPU, Mac users, edge deployment. - -```bash -# Build and start llama-server -cmake -B build && cmake --build build --config Release -./build/bin/llama-server \ - -m models/llama-3.1-8b-instruct-Q4_K_M.gguf \ - --port 8080 --host 0.0.0.0 - -# Configure Hermes -OPENAI_BASE_URL=http://localhost:8080/v1 -OPENAI_API_KEY=dummy -LLM_MODEL=llama-3.1-8b-instruct -``` - -:::tip -Download GGUF models from [Hugging Face](https://huggingface.co/models?library=gguf). Q4_K_M quantization offers the best balance of quality vs. memory usage. -::: - ---- - -### LiteLLM Proxy — Multi-Provider Gateway - -[LiteLLM](https://docs.litellm.ai/) is an OpenAI-compatible proxy that unifies 100+ LLM providers behind a single API. Best for: switching between providers without config changes, load balancing, fallback chains, budget controls. - -```bash -# Install and start -pip install "litellm[proxy]" -litellm --model anthropic/claude-sonnet-4 --port 4000 - -# Or with a config file for multiple models: -litellm --config litellm_config.yaml --port 4000 - -# Configure Hermes -OPENAI_BASE_URL=http://localhost:4000/v1 -OPENAI_API_KEY=sk-your-litellm-key -LLM_MODEL=anthropic/claude-sonnet-4 -``` - -Example `litellm_config.yaml` with fallback: -```yaml -model_list: - - model_name: "best" - litellm_params: - model: anthropic/claude-sonnet-4 - api_key: sk-ant-... - - model_name: "best" - litellm_params: - model: openai/gpt-4o - api_key: sk-... -router_settings: - routing_strategy: "latency-based-routing" -``` - ---- - -### ClawRouter — Cost-Optimized Routing - -[ClawRouter](https://github.com/BlockRunAI/ClawRouter) by BlockRunAI is a local routing proxy that auto-selects models based on query complexity. It classifies requests across 14 dimensions and routes to the cheapest model that can handle the task. Payment is via USDC cryptocurrency (no API keys). - -```bash -# Install and start -npx @blockrun/clawrouter # Starts on port 8402 - -# Configure Hermes -OPENAI_BASE_URL=http://localhost:8402/v1 -OPENAI_API_KEY=dummy -LLM_MODEL=blockrun/auto # or: blockrun/eco, blockrun/premium, blockrun/agentic -``` - -Routing profiles: -| Profile | Strategy | Savings | -|---------|----------|---------| -| `blockrun/auto` | Balanced quality/cost | 74-100% | -| `blockrun/eco` | Cheapest possible | 95-100% | -| `blockrun/premium` | Best quality models | 0% | -| `blockrun/free` | Free models only | 100% | -| `blockrun/agentic` | Optimized for tool use | varies | - -:::note -ClawRouter requires a USDC-funded wallet on Base or Solana for payment. All requests route through BlockRun's backend API. Run `npx @blockrun/clawrouter doctor` to check wallet status. -::: - ---- - -### Other Compatible Providers - -Any service with an OpenAI-compatible API works. Some popular options: - -| Provider | Base URL | Notes | -|----------|----------|-------| -| [Together AI](https://together.ai) | `https://api.together.xyz/v1` | Cloud-hosted open models | -| [Groq](https://groq.com) | `https://api.groq.com/openai/v1` | Ultra-fast inference | -| [DeepSeek](https://deepseek.com) | `https://api.deepseek.com/v1` | DeepSeek models | -| [Fireworks AI](https://fireworks.ai) | `https://api.fireworks.ai/inference/v1` | Fast open model hosting | -| [Cerebras](https://cerebras.ai) | `https://api.cerebras.ai/v1` | Wafer-scale chip inference | -| [Mistral AI](https://mistral.ai) | `https://api.mistral.ai/v1` | Mistral models | -| [OpenAI](https://openai.com) | `https://api.openai.com/v1` | Direct OpenAI access | -| [Azure OpenAI](https://azure.microsoft.com) | `https://YOUR.openai.azure.com/` | Enterprise OpenAI | -| [LocalAI](https://localai.io) | `http://localhost:8080/v1` | Self-hosted, multi-model | -| [Jan](https://jan.ai) | `http://localhost:1337/v1` | Desktop app with local models | - -```bash -# Example: Together AI -OPENAI_BASE_URL=https://api.together.xyz/v1 -OPENAI_API_KEY=your-together-key -LLM_MODEL=meta-llama/Llama-3.1-70B-Instruct-Turbo -``` - ---- - -### Context Length Detection - -Hermes uses a multi-source resolution chain to detect the correct context window for your model and provider: - -1. **Config override** — `model.context_length` in config.yaml (highest priority) -2. **Custom provider per-model** — `custom_providers[].models..context_length` -3. **Persistent cache** — previously discovered values (survives restarts) -4. **Endpoint `/models`** — queries your server's API (local/custom endpoints) -5. **Anthropic `/v1/models`** — queries Anthropic's API for `max_input_tokens` (API-key users only) -6. **OpenRouter API** — live model metadata from OpenRouter -7. **Nous Portal** — suffix-matches Nous model IDs against OpenRouter metadata -8. **[models.dev](https://models.dev)** — community-maintained registry with provider-specific context lengths for 3800+ models across 100+ providers -9. **Fallback defaults** — broad model family patterns (128K default) - -For most setups this works out of the box. The system is provider-aware — the same model can have different context limits depending on who serves it (e.g., `claude-opus-4.6` is 1M on Anthropic direct but 128K on GitHub Copilot). - -To set the context length explicitly, add `context_length` to your model config: - -```yaml -model: - default: "qwen3.5:9b" - base_url: "http://localhost:8080/v1" - context_length: 131072 # tokens -``` - -For custom endpoints, you can also set context length per model: - -```yaml -custom_providers: - - name: "My Local LLM" - base_url: "http://localhost:11434/v1" - models: - qwen3.5:27b: - context_length: 32768 - deepseek-r1:70b: - context_length: 65536 -``` - -`hermes model` will prompt for context length when configuring a custom endpoint. Leave it blank for auto-detection. - -:::tip When to set this manually -- You're using Ollama with a custom `num_ctx` that's lower than the model's maximum -- You want to limit context below the model's maximum (e.g., 8k on a 128k model to save VRAM) -- You're running behind a proxy that doesn't expose `/v1/models` -::: - ---- - -### Named Custom Providers - -If you work with multiple custom endpoints (e.g., a local dev server and a remote GPU server), you can define them as named custom providers in `config.yaml`: - -```yaml -custom_providers: - - name: local - base_url: http://localhost:8080/v1 - # api_key omitted — Hermes uses "no-key-required" for keyless local servers - - name: work - base_url: https://gpu-server.internal.corp/v1 - api_key: corp-api-key - api_mode: chat_completions # optional, auto-detected from URL - - name: anthropic-proxy - base_url: https://proxy.example.com/anthropic - api_key: proxy-key - api_mode: anthropic_messages # for Anthropic-compatible proxies -``` - -Switch between them mid-session with the triple syntax: - -``` -/model custom:local:qwen-2.5 # Use the "local" endpoint with qwen-2.5 -/model custom:work:llama3-70b # Use the "work" endpoint with llama3-70b -/model custom:anthropic-proxy:claude-sonnet-4 # Use the proxy -``` - -You can also select named custom providers from the interactive `hermes model` menu. - ---- - -### Choosing the Right Setup - -| Use Case | Recommended | -|----------|-------------| -| **Just want it to work** | OpenRouter (default) or Nous Portal | -| **Local models, easy setup** | Ollama | -| **Production GPU serving** | vLLM or SGLang | -| **Mac / no GPU** | Ollama or llama.cpp | -| **Multi-provider routing** | LiteLLM Proxy or OpenRouter | -| **Cost optimization** | ClawRouter or OpenRouter with `sort: "price"` | -| **Maximum privacy** | Ollama, vLLM, or llama.cpp (fully local) | -| **Enterprise / Azure** | Azure OpenAI with custom endpoint | -| **Chinese AI models** | z.ai (GLM), Kimi/Moonshot, or MiniMax (first-class providers) | - -:::tip -You can switch between providers at any time with `hermes model` — no restart required. Your conversation history, memory, and skills carry over regardless of which provider you use. -::: - -## Optional API Keys - -| Feature | Provider | Env Variable | -|---------|----------|--------------| -| Web scraping | [Firecrawl](https://firecrawl.dev/) | `FIRECRAWL_API_KEY`, `FIRECRAWL_API_URL` | -| Browser automation | [Browserbase](https://browserbase.com/) | `BROWSERBASE_API_KEY`, `BROWSERBASE_PROJECT_ID` | -| Image generation | [FAL](https://fal.ai/) | `FAL_KEY` | -| Premium TTS voices | [ElevenLabs](https://elevenlabs.io/) | `ELEVENLABS_API_KEY` | -| OpenAI TTS + voice transcription | [OpenAI](https://platform.openai.com/api-keys) | `VOICE_TOOLS_OPENAI_KEY` | -| RL Training | [Tinker](https://tinker-console.thinkingmachines.ai/) + [WandB](https://wandb.ai/) | `TINKER_API_KEY`, `WANDB_API_KEY` | -| Cross-session user modeling | [Honcho](https://honcho.dev/) | `HONCHO_API_KEY` | - -### Self-Hosting Firecrawl - -By default, Hermes uses the [Firecrawl cloud API](https://firecrawl.dev/) for web search and scraping. If you prefer to run Firecrawl locally, you can point Hermes at a self-hosted instance instead. See Firecrawl's [SELF_HOST.md](https://github.com/firecrawl/firecrawl/blob/main/SELF_HOST.md) for complete setup instructions. - -**What you get:** No API key required, no rate limits, no per-page costs, full data sovereignty. - -**What you lose:** The cloud version uses Firecrawl's proprietary "Fire-engine" for advanced anti-bot bypassing (Cloudflare, CAPTCHAs, IP rotation). Self-hosted uses basic fetch + Playwright, so some protected sites may fail. Search uses DuckDuckGo instead of Google. - -**Setup:** - -1. Clone and start the Firecrawl Docker stack (5 containers: API, Playwright, Redis, RabbitMQ, PostgreSQL — requires ~4-8 GB RAM): - ```bash - git clone https://github.com/firecrawl/firecrawl - cd firecrawl - # In .env, set: USE_DB_AUTHENTICATION=false, HOST=0.0.0.0, PORT=3002 - docker compose up -d - ``` - -2. Point Hermes at your instance (no API key needed): - ```bash - hermes config set FIRECRAWL_API_URL http://localhost:3002 - ``` - -You can also set both `FIRECRAWL_API_KEY` and `FIRECRAWL_API_URL` if your self-hosted instance has authentication enabled. - -## OpenRouter Provider Routing - -When using OpenRouter, you can control how requests are routed across providers. Add a `provider_routing` section to `~/.hermes/config.yaml`: - -```yaml -provider_routing: - sort: "throughput" # "price" (default), "throughput", or "latency" - # only: ["anthropic"] # Only use these providers - # ignore: ["deepinfra"] # Skip these providers - # order: ["anthropic", "google"] # Try providers in this order - # require_parameters: true # Only use providers that support all request params - # data_collection: "deny" # Exclude providers that may store/train on data -``` - -**Shortcuts:** Append `:nitro` to any model name for throughput sorting (e.g., `anthropic/claude-sonnet-4:nitro`), or `:floor` for price sorting. - -## Fallback Model - -Configure a backup provider:model that Hermes switches to automatically when your primary model fails (rate limits, server errors, auth failures): - -```yaml -fallback_model: - provider: openrouter # required - model: anthropic/claude-sonnet-4 # required - # base_url: http://localhost:8000/v1 # optional, for custom endpoints - # api_key_env: MY_CUSTOM_KEY # optional, env var name for custom endpoint API key -``` - -When activated, the fallback swaps the model and provider mid-session without losing your conversation. It fires **at most once** per session. - -Supported providers: `openrouter`, `nous`, `openai-codex`, `copilot`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `custom`. - -:::tip -Fallback is configured exclusively through `config.yaml` — there are no environment variables for it. For full details on when it triggers, supported providers, and how it interacts with auxiliary tasks and delegation, see [Fallback Providers](/docs/user-guide/features/fallback-providers). -::: - -## Smart Model Routing - -Optional cheap-vs-strong routing lets Hermes keep your main model for complex work while sending very short/simple turns to a cheaper model. - -```yaml -smart_model_routing: - enabled: true - max_simple_chars: 160 - max_simple_words: 28 - cheap_model: - provider: openrouter - model: google/gemini-2.5-flash - # base_url: http://localhost:8000/v1 # optional custom endpoint - # api_key_env: MY_CUSTOM_KEY # optional env var name for that endpoint's API key -``` - -How it works: -- If a turn is short, single-line, and does not look code/tool/debug heavy, Hermes may route it to `cheap_model` -- If the turn looks complex, Hermes stays on your primary model/provider -- If the cheap route cannot be resolved cleanly, Hermes falls back to the primary model automatically - -This is intentionally conservative. It is meant for quick, low-stakes turns like: -- short factual questions -- quick rewrites -- lightweight summaries - -It will avoid routing prompts that look like: -- coding/debugging work -- tool-heavy requests -- long or multi-line analysis asks - -Use this when you want lower latency or cost without fully changing your default model. +For AI provider setup (OpenRouter, Anthropic, Copilot, custom endpoints, self-hosted LLMs, fallback models, etc.), see [AI Providers](/docs/integrations/providers). ## Terminal Backend Configuration @@ -706,6 +82,10 @@ terminal: backend: local # local | docker | ssh | modal | daytona | singularity cwd: "." # Working directory ("." = current dir for local, "/root" for containers) timeout: 180 # Per-command timeout in seconds + env_passthrough: [] # Env var names to forward to sandboxed execution (terminal + execute_code) + singularity_image: "docker://nikolaik/python-nodejs:python3.11-nodejs20" # Container image for Singularity backend + modal_image: "nikolaik/python-nodejs:python3.11-nodejs20" # Container image for Modal backend + daytona_image: "nikolaik/python-nodejs:python3.11-nodejs20" # Container image for Daytona backend ``` ### Backend Overview @@ -1012,6 +392,8 @@ All compression settings live in `config.yaml` (no environment variables). compression: enabled: true # Toggle compression on/off threshold: 0.50 # Compress at this % of context limit + target_ratio: 0.20 # Fraction of threshold to preserve as recent tail + protect_last_n: 20 # Min recent messages to keep uncompressed summary_model: "google/gemini-3-flash-preview" # Model for summarization summary_provider: "auto" # Provider: "auto", "openrouter", "nous", "codex", "main", etc. summary_base_url: null # Custom OpenAI-compatible endpoint (overrides provider) @@ -1096,6 +478,18 @@ If auto-compression is disabled, the warning tells you context may be truncated Context pressure is automatic — no configuration needed. It fires purely as a user-facing notification and does not modify the message stream or inject anything into the model's context. +## Credential Pool Strategies + +When you have multiple API keys or OAuth tokens for the same provider, configure the rotation strategy: + +```yaml +credential_pool_strategies: + openrouter: round_robin # cycle through keys evenly + anthropic: least_used # always pick the least-used key +``` + +Options: `fill_first` (default), `round_robin`, `least_used`, `random`. See [Credential Pools](/docs/user-guide/features/credential-pools) for full documentation. + ## Auxiliary Models Hermes uses lightweight "auxiliary" models for side tasks like image analysis, web page summarization, and browser screenshot analysis. By default, these use **Gemini Flash** via auto-detection — you don't need to configure anything. @@ -1146,6 +540,38 @@ auxiliary: # Context compression timeout (separate from compression.* config) compression: timeout: 120 # seconds — compression summarizes long conversations, needs more time + + # Session search — summarizes past session matches + session_search: + provider: "auto" + model: "" + base_url: "" + api_key: "" + timeout: 30 + + # Skills hub — skill matching and search + skills_hub: + provider: "auto" + model: "" + base_url: "" + api_key: "" + timeout: 30 + + # MCP tool dispatch + mcp: + provider: "auto" + model: "" + base_url: "" + api_key: "" + timeout: 30 + + # Memory flush — summarizes conversation for persistent memory + flush_memories: + provider: "auto" + model: "" + base_url: "" + api_key: "" + timeout: 30 ``` :::tip @@ -1153,7 +579,7 @@ Each auxiliary task has a configurable `timeout` (in seconds). Defaults: vision ::: :::info -Context compression has its own top-level `compression:` block with `summary_provider`, `summary_model`, and `summary_base_url` — see [Context Compression](#context-compression) above. The fallback model uses a `fallback_model:` block — see [Fallback Model](#fallback-model) above. All three follow the same provider/model/base_url pattern. +Context compression has its own top-level `compression:` block with `summary_provider`, `summary_model`, and `summary_base_url` — see [Context Compression](#context-compression) above. The fallback model uses a `fallback_model:` block — see [Fallback Model](/docs/integrations/providers#fallback-model). All three follow the same provider/model/base_url pattern. ::: ### Changing the Vision Model @@ -1340,6 +766,7 @@ display: streaming: false # Stream tokens to terminal as they arrive (real-time output) background_process_notifications: all # all | result | error | off (gateway only) show_cost: false # Show estimated $ cost in the CLI status bar + tool_preview_length: 0 # Max chars for tool call previews (0 = no limit, show full paths/commands) ``` ### Theme mode @@ -1445,12 +872,15 @@ When enabled, responses appear token-by-token inside a streaming box. Tool calls ```yaml streaming: enabled: true # Enable progressive message editing + transport: edit # "edit" (progressive message editing) or "off" edit_interval: 0.3 # Seconds between message edits buffer_threshold: 40 # Characters before forcing an edit flush cursor: " ▉" # Cursor shown during streaming ``` -When enabled, the bot sends a message on the first token, then progressively edits it as more tokens arrive. Platforms that don't support message editing (Signal, Email) gracefully skip streaming and deliver the final response normally. +When enabled, the bot sends a message on the first token, then progressively edits it as more tokens arrive. Platforms that don't support message editing (Signal, Email, Home Assistant) are auto-detected on the first attempt — streaming is gracefully disabled for that session with no flood of messages. + +**Overflow handling:** If the streamed text exceeds the platform's message length limit (~4096 chars), the current message is finalized and a new one starts automatically. :::note Streaming is disabled by default. Enable it in `~/.hermes/config.yaml` to try the streaming UX. @@ -1514,23 +944,6 @@ Usage: type `/status`, `/disk`, `/update`, or `/gpu` in the CLI or any messaging - **Type** — only `exec` is supported (runs a shell command); other types show an error - **Works everywhere** — CLI, Telegram, Discord, Slack, WhatsApp, Signal, Email, Home Assistant -## Gateway Streaming - -Enable progressive token delivery on messaging platforms. When streaming is enabled, responses appear character-by-character in Telegram, Discord, and Slack via message editing, rather than waiting for the full response. - -```yaml -streaming: - enabled: false # Enable streaming token delivery (default: off) - transport: edit # "edit" (progressive message editing) or "off" - edit_interval: 0.3 # Min seconds between message edits - buffer_threshold: 40 # Characters accumulated before forcing an edit - cursor: " ▉" # Cursor character shown during streaming -``` - -**Platform support:** Telegram, Discord, and Slack support edit-based streaming. Platforms that don't support message editing (Signal, Email, Home Assistant) are auto-detected on the first attempt — streaming is gracefully disabled for that session with no flood of messages. - -**Overflow handling:** If the streamed text exceeds the platform's message length limit (~4096 chars), the current message is finalized and a new one starts automatically. - ## Human Delay Simulate human-like response pacing in messaging platforms: @@ -1554,11 +967,11 @@ code_execution: ## Web Search Backends -The `web_search`, `web_extract`, and `web_crawl` tools support three backend providers. Configure the backend in `config.yaml` or via `hermes tools`: +The `web_search`, `web_extract`, and `web_crawl` tools support four backend providers. Configure the backend in `config.yaml` or via `hermes tools`: ```yaml web: - backend: firecrawl # firecrawl | parallel | tavily + backend: firecrawl # firecrawl | parallel | tavily | exa ``` | Backend | Env Var | Search | Extract | Crawl | @@ -1566,8 +979,9 @@ web: | **Firecrawl** (default) | `FIRECRAWL_API_KEY` | ✔ | ✔ | ✔ | | **Parallel** | `PARALLEL_API_KEY` | ✔ | ✔ | — | | **Tavily** | `TAVILY_API_KEY` | ✔ | ✔ | ✔ | +| **Exa** | `EXA_API_KEY` | ✔ | ✔ | — | -**Backend selection:** If `web.backend` is not set, the backend is auto-detected from available API keys. If only `TAVILY_API_KEY` is set, Tavily is used. If only `PARALLEL_API_KEY` is set, Parallel is used. Otherwise Firecrawl is the default. +**Backend selection:** If `web.backend` is not set, the backend is auto-detected from available API keys. If only `EXA_API_KEY` is set, Exa is used. If only `TAVILY_API_KEY` is set, Tavily is used. If only `PARALLEL_API_KEY` is set, Parallel is used. Otherwise Firecrawl is the default. **Self-hosted Firecrawl:** Set `FIRECRAWL_API_URL` to point at your own instance. When a custom URL is set, the API key becomes optional (set `USE_DB_AUTHENTICATION=false` on the server to disable auth). @@ -1580,11 +994,60 @@ Configure browser automation behavior: ```yaml browser: inactivity_timeout: 120 # Seconds before auto-closing idle sessions + command_timeout: 30 # Timeout in seconds for browser commands (screenshot, navigate, etc.) record_sessions: false # Auto-record browser sessions as WebM videos to ~/.hermes/browser_recordings/ ``` The browser toolset supports multiple providers. See the [Browser feature page](/docs/user-guide/features/browser) for details on Browserbase, Browser Use, and local Chrome CDP setup. +## Timezone + +Override the server-local timezone with an IANA timezone string. Affects timestamps in logs, cron scheduling, and system prompt time injection. + +```yaml +timezone: "America/New_York" # IANA timezone (default: "" = server-local time) +``` + +Supported values: any IANA timezone identifier (e.g. `America/New_York`, `Europe/London`, `Asia/Kolkata`, `UTC`). Leave empty or omit for server-local time. + +## Discord + +Configure Discord-specific behavior for the messaging gateway: + +```yaml +discord: + require_mention: true # Require @mention to respond in server channels + free_response_channels: "" # Comma-separated channel IDs where bot responds without @mention + auto_thread: true # Auto-create threads on @mention in channels +``` + +- `require_mention` — when `true` (default), the bot only responds in server channels when mentioned with `@BotName`. DMs always work without mention. +- `free_response_channels` — comma-separated list of channel IDs where the bot responds to every message without requiring a mention. +- `auto_thread` — when `true` (default), mentions in channels automatically create a thread for the conversation, keeping channels clean (similar to Slack threading). + +## Security + +Pre-execution security scanning and secret redaction: + +```yaml +security: + redact_secrets: true # Redact API key patterns in tool output and logs + tirith_enabled: true # Enable Tirith security scanning for terminal commands + tirith_path: "tirith" # Path to tirith binary (default: "tirith" in $PATH) + tirith_timeout: 5 # Seconds to wait for tirith scan before timing out + tirith_fail_open: true # Allow command execution if tirith is unavailable + website_blocklist: # See Website Blocklist section below + enabled: false + domains: [] + shared_files: [] +``` + +- `redact_secrets` — automatically detects and redacts patterns that look like API keys, tokens, and passwords in tool output before it enters the conversation context and logs. +- `tirith_enabled` — when `true`, terminal commands are scanned by [Tirith](https://github.com/StackGuardian/tirith) before execution to detect potentially dangerous operations. +- `tirith_path` — path to the tirith binary. Set this if tirith is installed in a non-standard location. +- `tirith_timeout` — maximum seconds to wait for a tirith scan. Commands proceed if the scan times out. +- `tirith_fail_open` — when `true` (default), commands are allowed to execute if tirith is unavailable or fails. Set to `false` to block commands when tirith cannot verify them. + ## Website Blocklist Block specific domains from being accessed by the agent's web and browser tools: @@ -1635,7 +1098,7 @@ Setting `approvals.mode: off` disables all safety checks for terminal commands. ## Checkpoints -Automatic filesystem snapshots before destructive file operations. See the [Checkpoints feature page](/docs/user-guide/features/checkpoints) for details. +Automatic filesystem snapshots before destructive file operations. See the [Checkpoints & Rollback](/docs/user-guide/checkpoints-and-rollback) for details. ```yaml checkpoints: diff --git a/website/docs/user-guide/docker.md b/website/docs/user-guide/docker.md index 229919774..2940b8678 100644 --- a/website/docs/user-guide/docker.md +++ b/website/docs/user-guide/docker.md @@ -1,10 +1,17 @@ +--- +sidebar_position: 7 +title: "Docker" +description: "Running Hermes Agent in Docker and using Docker as a terminal backend" +--- + # Hermes Agent — Docker -Want to run Hermes Agent, but without installing packages on your host? This'll sort you out. +There are two distinct ways Docker intersects with Hermes Agent: -This will let you run the agent in a container, with the most relevant modes outlined below. +1. **Running Hermes IN Docker** — the agent itself runs inside a container (this page's primary focus) +2. **Docker as a terminal backend** — the agent runs on your host but executes commands inside a Docker sandbox (see [Configuration → terminal.backend](./configuration.md)) -The container stores all user data (config, API keys, sessions, skills, memories) in a single directory mounted from the host at `/opt/data`. The image itself is stateless and can be upgraded by pulling a new version without losing any configuration. +This page covers option 1. The container stores all user data (config, API keys, sessions, skills, memories) in a single directory mounted from the host at `/opt/data`. The image itself is stateless and can be upgraded by pulling a new version without losing any configuration. ## Quick start @@ -41,6 +48,110 @@ docker run -it --rm \ nousresearch/hermes-agent ``` +## Persistent volumes + +The `/opt/data` volume is the single source of truth for all Hermes state. It maps to your host's `~/.hermes/` directory and contains: + +| Path | Contents | +|------|----------| +| `.env` | API keys and secrets | +| `config.yaml` | All Hermes configuration | +| `SOUL.md` | Agent personality/identity | +| `sessions/` | Conversation history | +| `memories/` | Persistent memory store | +| `skills/` | Installed skills | +| `cron/` | Scheduled job definitions | +| `hooks/` | Event hooks | +| `logs/` | Runtime logs | +| `skins/` | Custom CLI skins | + +:::warning +Never run two Hermes containers against the same data directory simultaneously — session files and memory stores are not designed for concurrent access. +::: + +## Environment variable forwarding + +API keys are read from `/opt/data/.env` inside the container. You can also pass environment variables directly: + +```sh +docker run -it --rm \ + -v ~/.hermes:/opt/data \ + -e ANTHROPIC_API_KEY="sk-ant-..." \ + -e OPENAI_API_KEY="sk-..." \ + nousresearch/hermes-agent +``` + +Direct `-e` flags override values from `.env`. This is useful for CI/CD or secrets-manager integrations where you don't want keys on disk. + +## Docker Compose example + +For persistent gateway deployment, a `docker-compose.yaml` is convenient: + +```yaml +version: "3.8" +services: + hermes: + image: nousresearch/hermes-agent:latest + container_name: hermes + restart: unless-stopped + command: gateway run + volumes: + - ~/.hermes:/opt/data + # Uncomment to forward specific env vars instead of using .env file: + # environment: + # - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} + # - OPENAI_API_KEY=${OPENAI_API_KEY} + # - TELEGRAM_BOT_TOKEN=${TELEGRAM_BOT_TOKEN} + deploy: + resources: + limits: + memory: 4G + cpus: "2.0" +``` + +Start with `docker compose up -d` and view logs with `docker compose logs -f hermes`. + +## Resource limits + +The Hermes container needs moderate resources. Recommended minimums: + +| Resource | Minimum | Recommended | +|----------|---------|-------------| +| Memory | 1 GB | 2–4 GB | +| CPU | 1 core | 2 cores | +| Disk (data volume) | 500 MB | 2+ GB (grows with sessions/skills) | + +Browser automation (Playwright/Chromium) is the most memory-hungry feature. If you don't need browser tools, 1 GB is sufficient. With browser tools active, allocate at least 2 GB. + +Set limits in Docker: + +```sh +docker run -d \ + --name hermes \ + --restart unless-stopped \ + --memory=4g --cpus=2 \ + -v ~/.hermes:/opt/data \ + nousresearch/hermes-agent gateway run +``` + +## What the Dockerfile does + +The official image is based on `debian:13.4` and includes: + +- Python 3 with all Hermes dependencies (`pip install -e ".[all]"`) +- Node.js + npm (for browser automation and WhatsApp bridge) +- Playwright with Chromium (`npx playwright install --with-deps chromium`) +- ripgrep and ffmpeg as system utilities +- The WhatsApp bridge (`scripts/whatsapp-bridge/`) + +The entrypoint script (`docker/entrypoint.sh`) bootstraps the data volume on first run: +- Creates the directory structure (`sessions/`, `memories/`, `skills/`, etc.) +- Copies `.env.example` → `.env` if no `.env` exists +- Copies default `config.yaml` if missing +- Copies default `SOUL.md` if missing +- Syncs bundled skills using a manifest-based approach (preserves user edits) +- Then runs `hermes` with whatever arguments you pass + ## Upgrading Pull the latest image and recreate the container. Your data directory is untouched. @@ -52,5 +163,62 @@ docker run -d \ --name hermes \ --restart unless-stopped \ -v ~/.hermes:/opt/data \ - nousresearch/hermes-agent + nousresearch/hermes-agent gateway run +``` + +Or with Docker Compose: + +```sh +docker compose pull +docker compose up -d +``` + +## Skills and credential files + +When using Docker as the execution environment (not the methods above, but when the agent runs commands inside a Docker sandbox), Hermes automatically bind-mounts the skills directory (`~/.hermes/skills/`) and any credential files declared by skills into the container as read-only volumes. This means skill scripts, templates, and references are available inside the sandbox without manual configuration. + +The same syncing happens for SSH and Modal backends — skills and credential files are uploaded via rsync or the Modal mount API before each command. + +## Troubleshooting + +### Container exits immediately + +Check logs: `docker logs hermes`. Common causes: +- Missing or invalid `.env` file — run interactively first to complete setup +- Port conflicts if running with exposed ports + +### "Permission denied" errors + +The container runs as root by default. If your host `~/.hermes/` was created by a non-root user, permissions should work. If you get errors, ensure the data directory is writable: + +```sh +chmod -R 755 ~/.hermes +``` + +### Browser tools not working + +Playwright needs shared memory. Add `--shm-size=1g` to your Docker run command: + +```sh +docker run -d \ + --name hermes \ + --shm-size=1g \ + -v ~/.hermes:/opt/data \ + nousresearch/hermes-agent gateway run +``` + +### Gateway not reconnecting after network issues + +The `--restart unless-stopped` flag handles most transient failures. If the gateway is stuck, restart the container: + +```sh +docker restart hermes +``` + +### Checking container health + +```sh +docker logs --tail 50 hermes # Recent logs +docker exec hermes hermes version # Verify version +docker stats hermes # Resource usage ``` diff --git a/website/docs/user-guide/features/api-server.md b/website/docs/user-guide/features/api-server.md index 6739ad7ab..71732285e 100644 --- a/website/docs/user-guide/features/api-server.md +++ b/website/docs/user-guide/features/api-server.md @@ -8,7 +8,7 @@ description: "Expose hermes-agent as an OpenAI-compatible API for any frontend" The API server exposes hermes-agent as an OpenAI-compatible HTTP endpoint. Any frontend that speaks the OpenAI format — Open WebUI, LobeChat, LibreChat, NextChat, ChatBox, and hundreds more — can connect to hermes-agent and use it as a backend. -Your agent handles requests with its full toolset (terminal, file operations, web search, memory, skills) and returns the final response. Tool calls execute invisibly server-side. +Your agent handles requests with its full toolset (terminal, file operations, web search, memory, skills) and returns the final response. When streaming, tool progress indicators appear inline so frontends can show what the agent is doing. ## Quick Start @@ -85,6 +85,8 @@ Standard OpenAI Chat Completions format. Stateless — the full conversation is **Streaming** (`"stream": true`): Returns Server-Sent Events (SSE) with token-by-token response chunks. When streaming is enabled in config, tokens are emitted live as the LLM generates them. When disabled, the full response is sent as a single SSE chunk. +**Tool progress in streams**: When the agent calls tools during a streaming request, brief progress indicators are injected into the content stream as the tools start executing (e.g. `` `💻 pwd` ``, `` `🔍 Python docs` ``). These appear as inline markdown before the agent's response text, giving frontends like Open WebUI real-time visibility into tool execution. + ### POST /v1/responses OpenAI Responses API format. Supports server-side conversation state via `previous_response_id` — the server stores full conversation history (including tool calls and results) so multi-turn context is preserved without the client managing it. diff --git a/website/docs/user-guide/features/checkpoints.md b/website/docs/user-guide/features/checkpoints.md deleted file mode 100644 index aed879fc2..000000000 --- a/website/docs/user-guide/features/checkpoints.md +++ /dev/null @@ -1,30 +0,0 @@ -# Filesystem Checkpoints - -Hermes automatically snapshots your working directory before making file changes, giving you a safety net to roll back if something goes wrong. Checkpoints are **enabled by default**. - -## Quick Reference - -| Command | Description | -|---------|-------------| -| `/rollback` | List all checkpoints with change stats | -| `/rollback ` | Restore to checkpoint N (also undoes last chat turn) | -| `/rollback diff ` | Preview diff between checkpoint N and current state | -| `/rollback ` | Restore a single file from checkpoint N | - -## What Triggers Checkpoints - -- **File tools** — `write_file` and `patch` -- **Destructive terminal commands** — `rm`, `mv`, `sed -i`, output redirects (`>`), `git reset`/`clean` - -## Configuration - -```yaml -# ~/.hermes/config.yaml -checkpoints: - enabled: true # default: true - max_snapshots: 50 # max checkpoints per directory -``` - -## Learn More - -For the full guide — how shadow repos work, diff previews, file-level restore, conversation undo, safety guards, and best practices — see **[Checkpoints and /rollback](../checkpoints-and-rollback.md)**. diff --git a/website/docs/user-guide/features/context-references.md b/website/docs/user-guide/features/context-references.md index 2b58f80ca..18624150e 100644 --- a/website/docs/user-guide/features/context-references.md +++ b/website/docs/user-guide/features/context-references.md @@ -1,5 +1,6 @@ --- sidebar_position: 9 +sidebar_label: "Context References" title: "Context References" description: "Inline @-syntax for attaching files, folders, git diffs, and URLs directly into your messages" --- diff --git a/website/docs/user-guide/features/credential-pools.md b/website/docs/user-guide/features/credential-pools.md new file mode 100644 index 000000000..275e08a04 --- /dev/null +++ b/website/docs/user-guide/features/credential-pools.md @@ -0,0 +1,230 @@ +--- +title: Credential Pools +description: Pool multiple API keys or OAuth tokens per provider for automatic rotation and rate limit recovery. +sidebar_label: Credential Pools +sidebar_position: 9 +--- + +# Credential Pools + +Credential pools let you register multiple API keys or OAuth tokens for the same provider. When one key hits a rate limit or billing quota, Hermes automatically rotates to the next healthy key — keeping your session alive without switching providers. + +This is different from [fallback providers](./fallback-providers.md), which switch to a *different* provider entirely. Credential pools are same-provider rotation; fallback providers are cross-provider failover. Pools are tried first — if all pool keys are exhausted, *then* the fallback provider activates. + +## How It Works + +``` +Your request + → Pick key from pool (round_robin / least_used / fill_first / random) + → Send to provider + → 429 rate limit? + → Retry same key once (transient blip) + → Second 429 → rotate to next pool key + → All keys exhausted → fallback_model (different provider) + → 402 billing error? + → Immediately rotate to next pool key (24h cooldown) + → 401 auth expired? + → Try refreshing the token (OAuth) + → Refresh failed → rotate to next pool key + → Success → continue normally +``` + +## Quick Start + +If you already have an API key set in `.env`, Hermes auto-discovers it as a 1-key pool. To benefit from pooling, add more keys: + +```bash +# Add a second OpenRouter key +hermes auth add openrouter --api-key sk-or-v1-your-second-key + +# Add a second Anthropic key +hermes auth add anthropic --type api-key --api-key sk-ant-api03-your-second-key + +# Add an Anthropic OAuth credential (Claude Code subscription) +hermes auth add anthropic --type oauth +# Opens browser for OAuth login +``` + +Check your pools: + +```bash +hermes auth list +``` + +Output: +``` +openrouter (2 credentials): + #1 OPENROUTER_API_KEY api_key env:OPENROUTER_API_KEY ← + #2 backup-key api_key manual + +anthropic (3 credentials): + #1 hermes_pkce oauth hermes_pkce ← + #2 claude_code oauth claude_code + #3 ANTHROPIC_API_KEY api_key env:ANTHROPIC_API_KEY +``` + +The `←` marks the currently selected credential. + +## Interactive Management + +Run `hermes auth` with no subcommand for an interactive wizard: + +```bash +hermes auth +``` + +This shows your full pool status and offers a menu: + +``` +What would you like to do? + 1. Add a credential + 2. Remove a credential + 3. Reset cooldowns for a provider + 4. Set rotation strategy for a provider + 5. Exit +``` + +For providers that support both API keys and OAuth (Anthropic, Nous, Codex), the add flow asks which type: + +``` +anthropic supports both API keys and OAuth login. + 1. API key (paste a key from the provider dashboard) + 2. OAuth login (authenticate via browser) +Type [1/2]: +``` + +## CLI Commands + +| Command | Description | +|---------|-------------| +| `hermes auth` | Interactive pool management wizard | +| `hermes auth list` | Show all pools and credentials | +| `hermes auth list ` | Show a specific provider's pool | +| `hermes auth add ` | Add a credential (prompts for type and key) | +| `hermes auth add --type api-key --api-key ` | Add an API key non-interactively | +| `hermes auth add --type oauth` | Add an OAuth credential via browser login | +| `hermes auth remove ` | Remove credential by 1-based index | +| `hermes auth reset ` | Clear all cooldowns/exhaustion status | + +## Rotation Strategies + +Configure via `hermes auth` → "Set rotation strategy" or in `config.yaml`: + +```yaml +credential_pool_strategies: + openrouter: round_robin + anthropic: least_used +``` + +| Strategy | Behavior | +|----------|----------| +| `fill_first` (default) | Use the first healthy key until it's exhausted, then move to the next | +| `round_robin` | Cycle through keys evenly, rotating after each selection | +| `least_used` | Always pick the key with the lowest request count | +| `random` | Random selection among healthy keys | + +## Error Recovery + +The pool handles different errors differently: + +| Error | Behavior | Cooldown | +|-------|----------|----------| +| **429 Rate Limit** | Retry same key once (transient). Second consecutive 429 rotates to next key | 1 hour | +| **402 Billing/Quota** | Immediately rotate to next key | 24 hours | +| **401 Auth Expired** | Try refreshing the OAuth token first. Rotate only if refresh fails | — | +| **All keys exhausted** | Fall through to `fallback_model` if configured | — | + +The `has_retried_429` flag resets on every successful API call, so a single transient 429 doesn't trigger rotation. + +## Custom Endpoint Pools + +Custom OpenAI-compatible endpoints (Together.ai, RunPod, local servers) get their own pools, keyed by the endpoint name from `custom_providers` in config.yaml. + +When you set up a custom endpoint via `hermes model`, it auto-generates a name like "Together.ai" or "Local (localhost:8080)". This name becomes the pool key. + +```bash +# After setting up a custom endpoint via hermes model: +hermes auth list +# Shows: +# Together.ai (1 credential): +# #1 config key api_key config:Together.ai ← + +# Add a second key for the same endpoint: +hermes auth add Together.ai --api-key sk-together-second-key +``` + +Custom endpoint pools are stored in `auth.json` under `credential_pool` with a `custom:` prefix: + +```json +{ + "credential_pool": { + "openrouter": [...], + "custom:together.ai": [...] + } +} +``` + +## Auto-Discovery + +Hermes automatically discovers credentials from multiple sources and seeds the pool on startup: + +| Source | Example | Auto-seeded? | +|--------|---------|-------------| +| Environment variables | `OPENROUTER_API_KEY`, `ANTHROPIC_API_KEY` | Yes | +| OAuth tokens (auth.json) | Codex device code, Nous device code | Yes | +| Claude Code credentials | `~/.claude/.credentials.json` | Yes (Anthropic) | +| Hermes PKCE OAuth | `~/.hermes/auth.json` | Yes (Anthropic) | +| Custom endpoint config | `model.api_key` in config.yaml | Yes (custom endpoints) | +| Manual entries | Added via `hermes auth add` | Persisted in auth.json | + +Auto-seeded entries are updated on each pool load — if you remove an env var, its pool entry is automatically pruned. Manual entries (added via `hermes auth add`) are never auto-pruned. + +## Thread Safety + +The credential pool uses a threading lock for all state mutations (`select()`, `mark_exhausted_and_rotate()`, `try_refresh_current()`, `mark_used()`). This ensures safe concurrent access when the gateway handles multiple chat sessions simultaneously. + +## Architecture + +For the full data flow diagram, see [`docs/credential-pool-flow.excalidraw`](https://excalidraw.com/#json=2Ycqhqpi6f12E_3ITyiwh,c7u9jSt5BwrmiVzHGbm87g) in the repository. + +The credential pool integrates at the provider resolution layer: + +1. **`agent/credential_pool.py`** — Pool manager: storage, selection, rotation, cooldowns +2. **`hermes_cli/auth_commands.py`** — CLI commands and interactive wizard +3. **`hermes_cli/runtime_provider.py`** — Pool-aware credential resolution +4. **`run_agent.py`** — Error recovery: 429/402/401 → pool rotation → fallback + +## Storage + +Pool state is stored in `~/.hermes/auth.json` under the `credential_pool` key: + +```json +{ + "version": 1, + "credential_pool": { + "openrouter": [ + { + "id": "abc123", + "label": "OPENROUTER_API_KEY", + "auth_type": "api_key", + "priority": 0, + "source": "env:OPENROUTER_API_KEY", + "access_token": "sk-or-v1-...", + "last_status": "ok", + "request_count": 142 + } + ] + }, + "credential_pool_strategies": { + "openrouter": "round_robin" + } +} +``` + +Strategies are stored in `config.yaml` (not `auth.json`): + +```yaml +credential_pool_strategies: + openrouter: round_robin + anthropic: least_used +``` diff --git a/website/docs/user-guide/features/cron.md b/website/docs/user-guide/features/cron.md index 2d0a4c836..f8b1d2c5a 100644 --- a/website/docs/user-guide/features/cron.md +++ b/website/docs/user-guide/features/cron.md @@ -193,6 +193,40 @@ When scheduling jobs, you specify where the output goes: The agent's final response is automatically delivered. You do not need to call `send_message` in the cron prompt. +### Response wrapping + +By default, delivered cron output is wrapped with a header and footer so the recipient knows it came from a scheduled task: + +``` +Cronjob Response: Morning feeds +------------- + + + +Note: The agent cannot see this message, and therefore cannot respond to it. +``` + +To deliver the raw agent output without the wrapper, set `cron.wrap_response` to `false`: + +```yaml +# ~/.hermes/config.yaml +cron: + wrap_response: false +``` + +### Silent suppression + +If the agent's final response starts with `[SILENT]`, delivery is suppressed entirely. The output is still saved locally for audit (in `~/.hermes/cron/output/`), but no message is sent to the delivery target. + +This is useful for monitoring jobs that should only report when something is wrong: + +```text +Check if nginx is running. If everything is healthy, respond with only [SILENT]. +Otherwise, report the issue. +``` + +Failed jobs always deliver regardless of the `[SILENT]` marker — only successful runs can be silenced. + ## Schedule formats The agent's final response is automatically delivered — you do **not** need to include `send_message` in the cron prompt for that same destination. If a cron run calls `send_message` to the exact target the scheduler will already deliver to, Hermes skips that duplicate send and tells the model to put the user-facing content in the final response instead. Use `send_message` only for additional or different targets. diff --git a/website/docs/user-guide/features/fallback-providers.md b/website/docs/user-guide/features/fallback-providers.md index e46f69e35..315866378 100644 --- a/website/docs/user-guide/features/fallback-providers.md +++ b/website/docs/user-guide/features/fallback-providers.md @@ -7,12 +7,13 @@ sidebar_position: 8 # Fallback Providers -Hermes Agent has two separate fallback systems that keep your sessions running when providers hit issues: +Hermes Agent has three layers of resilience that keep your sessions running when providers hit issues: -1. **Primary model fallback** — automatically switches to a backup provider:model when your main model fails -2. **Auxiliary task fallback** — independent provider resolution for side tasks like vision, compression, and web extraction +1. **[Credential pools](./credential-pools.md)** — rotate across multiple API keys for the *same* provider (tried first) +2. **Primary model fallback** — automatically switches to a *different* provider:model when your main model fails +3. **Auxiliary task fallback** — independent provider resolution for side tasks like vision, compression, and web extraction -Both are optional and work independently. +Credential pools handle same-provider rotation (e.g., multiple OpenRouter keys). This page covers cross-provider fallback. Both are optional and work independently. ## Primary Model Fallback diff --git a/website/docs/user-guide/features/mcp.md b/website/docs/user-guide/features/mcp.md index 9b8326d46..b136af15c 100644 --- a/website/docs/user-guide/features/mcp.md +++ b/website/docs/user-guide/features/mcp.md @@ -168,9 +168,7 @@ So a server that exposes callable tools but no resources/prompts will not get th ## Per-server filtering -This is the main feature added by the PR work. - -You can now control which tools each MCP server contributes to Hermes. +You can control which tools each MCP server contributes to Hermes, allowing fine-grained management of your tool namespace. ### Disable a server entirely @@ -277,6 +275,14 @@ That keeps the tool list clean. Hermes discovers MCP servers at startup and registers their tools into the normal tool registry. +### Dynamic Tool Discovery + +MCP servers can notify Hermes when their available tools change at runtime by sending a `notifications/tools/list_changed` notification. When Hermes receives this notification, it automatically re-fetches the server's tool list and updates the registry — no manual `/reload-mcp` required. + +This is useful for MCP servers whose capabilities change dynamically (e.g. a server that adds tools when a new database schema is loaded, or removes tools when a service goes offline). + +The refresh is lock-protected so rapid-fire notifications from the same server don't cause overlapping refreshes. Prompt and resource change notifications (`prompts/list_changed`, `resources/list_changed`) are received but not yet acted on. + ### Reloading If you change MCP config, use: @@ -285,7 +291,7 @@ If you change MCP config, use: /reload-mcp ``` -This reloads MCP servers from config and refreshes the available tool list. +This reloads MCP servers from config and refreshes the available tool list. For runtime tool changes pushed by the server itself, see [Dynamic Tool Discovery](#dynamic-tool-discovery) above. ### Toolsets @@ -403,6 +409,39 @@ Because Hermes now only registers those wrappers when both are true: This is intentional and keeps the tool list honest. +## MCP Sampling Support + +MCP servers can request LLM inference from Hermes via the `sampling/createMessage` protocol. This allows an MCP server to ask Hermes to generate text on its behalf — useful for servers that need LLM capabilities but don't have their own model access. + +Sampling is **enabled by default** for all MCP servers (when the MCP SDK supports it). Configure it per-server under the `sampling` key: + +```yaml +mcp_servers: + my_server: + command: "my-mcp-server" + sampling: + enabled: true # Enable sampling (default: true) + model: "openai/gpt-4o" # Override model for sampling requests (optional) + max_tokens_cap: 4096 # Max tokens per sampling response (default: 4096) + timeout: 30 # Timeout in seconds per request (default: 30) + max_rpm: 10 # Rate limit: max requests per minute (default: 10) + max_tool_rounds: 5 # Max tool-use rounds in sampling loops (default: 5) + allowed_models: [] # Allowlist of model names the server may request (empty = any) + log_level: "info" # Audit log level: debug, info, or warning (default: info) +``` + +The sampling handler includes a sliding-window rate limiter, per-request timeouts, and tool-loop depth limits to prevent runaway usage. Metrics (request count, errors, tokens used) are tracked per server instance. + +To disable sampling for a specific server: + +```yaml +mcp_servers: + untrusted_server: + url: "https://mcp.example.com" + sampling: + enabled: false +``` + ## Running Hermes as an MCP server In addition to connecting **to** MCP servers, Hermes can also **be** an MCP server. This lets other MCP-capable agents (Claude Code, Cursor, Codex, or any MCP client) use Hermes's messaging capabilities — list conversations, read message history, and send messages across all your connected platforms. diff --git a/website/docs/user-guide/features/overview.md b/website/docs/user-guide/features/overview.md new file mode 100644 index 000000000..568797dfc --- /dev/null +++ b/website/docs/user-guide/features/overview.md @@ -0,0 +1,49 @@ +--- +title: "Features Overview" +sidebar_label: "Overview" +sidebar_position: 1 +--- + +# Features Overview + +Hermes Agent includes a rich set of capabilities that extend far beyond basic chat. From persistent memory and file-aware context to browser automation and voice conversations, these features work together to make Hermes a powerful autonomous assistant. + +## Core + +- **[Tools & Toolsets](tools.md)** — Tools are functions that extend the agent's capabilities. They're organized into logical toolsets that can be enabled or disabled per platform, covering web search, terminal execution, file editing, memory, delegation, and more. +- **[Skills System](skills.md)** — On-demand knowledge documents the agent can load when needed. Skills follow a progressive disclosure pattern to minimize token usage and are compatible with the [agentskills.io](https://agentskills.io/specification) open standard. +- **[Persistent Memory](memory.md)** — Bounded, curated memory that persists across sessions. Hermes remembers your preferences, projects, environment, and things it has learned via `MEMORY.md` and `USER.md`. +- **[Context Files](context-files.md)** — Hermes automatically discovers and loads project context files (`.hermes.md`, `AGENTS.md`, `CLAUDE.md`, `SOUL.md`, `.cursorrules`) that shape how it behaves in your project. +- **[Context References](context-references.md)** — Type `@` followed by a reference to inject files, folders, git diffs, and URLs directly into your messages. Hermes expands the reference inline and appends the content automatically. +- **[Checkpoints](../checkpoints-and-rollback.md)** — Hermes automatically snapshots your working directory before making file changes, giving you a safety net to roll back with `/rollback` if something goes wrong. + +## Automation + +- **[Scheduled Tasks (Cron)](cron.md)** — Schedule tasks to run automatically with natural language or cron expressions. Jobs can attach skills, deliver results to any platform, and support pause/resume/edit operations. +- **[Subagent Delegation](delegation.md)** — The `delegate_task` tool spawns child agent instances with isolated context, restricted toolsets, and their own terminal sessions. Run up to 3 concurrent subagents for parallel workstreams. +- **[Code Execution](code-execution.md)** — The `execute_code` tool lets the agent write Python scripts that call Hermes tools programmatically, collapsing multi-step workflows into a single LLM turn via sandboxed RPC execution. +- **[Event Hooks](hooks.md)** — Run custom code at key lifecycle points. Gateway hooks handle logging, alerts, and webhooks; plugin hooks handle tool interception, metrics, and guardrails. +- **[Batch Processing](batch-processing.md)** — Run the Hermes agent across hundreds or thousands of prompts in parallel, generating structured ShareGPT-format trajectory data for training data generation or evaluation. + +## Media & Web + +- **[Voice Mode](voice-mode.md)** — Full voice interaction across CLI and messaging platforms. Talk to the agent using your microphone, hear spoken replies, and have live voice conversations in Discord voice channels. +- **[Browser Automation](browser.md)** — Full browser automation with multiple backends: Browserbase cloud, Browser Use cloud, local Chrome via CDP, or local Chromium. Navigate websites, fill forms, and extract information. +- **[Vision & Image Paste](vision.md)** — Multimodal vision support. Paste images from your clipboard into the CLI and ask the agent to analyze, describe, or work with them using any vision-capable model. +- **[Image Generation](image-generation.md)** — Generate images from text prompts using FAL.ai's FLUX 2 Pro model with automatic 2x upscaling via the Clarity Upscaler. +- **[Voice & TTS](tts.md)** — Text-to-speech output and voice message transcription across all messaging platforms, with four provider options: Edge TTS (free), ElevenLabs, OpenAI TTS, and NeuTTS. + +## Integrations + +- **[Provider Routing](provider-routing.md)** — Fine-grained control over which AI providers handle your requests. Optimize for cost, speed, or quality with sorting, whitelists, blacklists, and priority ordering. +- **[Fallback Providers](fallback-providers.md)** — Automatic failover to backup LLM providers when your primary model encounters errors, including independent fallback for auxiliary tasks like vision and compression. +- **[API Server](api-server.md)** — Expose Hermes as an OpenAI-compatible HTTP endpoint. Connect any frontend that speaks the OpenAI format — Open WebUI, LobeChat, LibreChat, and more. +- **[IDE Integration (ACP)](acp.md)** — Use Hermes inside ACP-compatible editors such as VS Code, Zed, and JetBrains. Chat, tool activity, file diffs, and terminal commands render inside your editor. +- **[Honcho Memory](honcho.md)** — AI-native persistent memory for cross-session user modeling and personalization via dialectic reasoning. +- **[RL Training](rl-training.md)** — Generate trajectory data from agent sessions for reinforcement learning and model fine-tuning. + +## Customization + +- **[Personality & SOUL.md](personality.md)** — Fully customizable agent personality. `SOUL.md` is the primary identity file — the first thing in the system prompt — and you can swap in built-in or custom `/personality` presets per session. +- **[Skins & Themes](skins.md)** — Customize the CLI's visual presentation: banner colors, spinner faces and verbs, response-box labels, branding text, and the tool activity prefix. +- **[Plugins](plugins.md)** — Add custom tools, hooks, and integrations without modifying core code. Drop a directory into `~/.hermes/plugins/` with a `plugin.yaml` and Python code. diff --git a/website/docs/user-guide/features/plugins.md b/website/docs/user-guide/features/plugins.md index 0f2e20f17..1b10faff7 100644 --- a/website/docs/user-guide/features/plugins.md +++ b/website/docs/user-guide/features/plugins.md @@ -1,10 +1,13 @@ --- -sidebar_position: 20 +sidebar_position: 11 +sidebar_label: "Plugins" +title: "Plugins" +description: "Extend Hermes with custom tools, hooks, and integrations via the plugin system" --- # Plugins -Hermes has a plugin system for adding custom tools, hooks, slash commands, and integrations without modifying core code. +Hermes has a plugin system for adding custom tools, hooks, and integrations without modifying core code. **→ [Build a Hermes Plugin](/docs/guides/build-a-hermes-plugin)** — step-by-step guide with a complete working example. @@ -22,6 +25,56 @@ Drop a directory into `~/.hermes/plugins/` with a `plugin.yaml` and Python code: Start Hermes — your tools appear alongside built-in tools. The model can call them immediately. +### Minimal working example + +Here is a complete plugin that adds a `hello_world` tool and logs every tool call via a hook. + +**`~/.hermes/plugins/hello-world/plugin.yaml`** + +```yaml +name: hello-world +version: "1.0" +description: A minimal example plugin +``` + +**`~/.hermes/plugins/hello-world/__init__.py`** + +```python +"""Minimal Hermes plugin — registers a tool and a hook.""" + + +def register(ctx): + # --- Tool: hello_world --- + schema = { + "name": "hello_world", + "description": "Returns a friendly greeting for the given name.", + "parameters": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name to greet", + } + }, + "required": ["name"], + }, + } + + def handle_hello(params): + name = params.get("name", "World") + return f"Hello, {name}! 👋 (from the hello-world plugin)" + + ctx.register_tool("hello_world", schema, handle_hello) + + # --- Hook: log every tool call --- + def on_tool_call(tool_name, params, result): + print(f"[hello-world] tool called: {tool_name}") + + ctx.register_hook("post_tool_call", on_tool_call) +``` + +Drop both files into `~/.hermes/plugins/hello-world/`, restart Hermes, and the model can immediately call `hello_world`. The hook prints a log line after every tool invocation. + Project-local plugins under `./.hermes/plugins/` are disabled by default. Enable them only for trusted repositories by setting `HERMES_ENABLE_PROJECT_PLUGINS=true` before starting Hermes. ## What plugins can do @@ -30,7 +83,7 @@ Project-local plugins under `./.hermes/plugins/` are disabled by default. Enable |-----------|-----| | Add tools | `ctx.register_tool(name, schema, handler)` | | Add hooks | `ctx.register_hook("post_tool_call", callback)` | -| Add slash commands | `ctx.register_command("mycommand", handler)` | +| Inject messages | `ctx.inject_message(content, role="user")` — see [Injecting Messages](#injecting-messages) | | Ship data files | `Path(__file__).parent / "data" / "file.yaml"` | | Bundle skills | Copy `skill.md` to `~/.hermes/skills/` at load time | | Gate on env vars | `requires_env: [API_KEY]` in plugin.yaml | @@ -57,34 +110,6 @@ Plugins can register callbacks for these lifecycle events. See the **[Event Hook | `on_session_start` | New session created (first turn only) | | `on_session_end` | End of every `run_conversation` call | -## Slash commands - -Plugins can register slash commands that work in both CLI and messaging platforms: - -```python -def register(ctx): - ctx.register_command( - name="greet", - handler=lambda args: f"Hello, {args or 'world'}!", - description="Greet someone", - args_hint="[name]", - aliases=("hi",), - ) -``` - -The handler receives the argument string (everything after `/greet`) and returns a string to display. Registered commands automatically appear in `/help`, tab autocomplete, Telegram bot menu, and Slack subcommand mapping. - -| Parameter | Description | -|-----------|-------------| -| `name` | Command name without slash | -| `handler` | Callable that takes `args: str` and returns `str | None` | -| `description` | Shown in `/help` | -| `args_hint` | Usage hint, e.g. `"[name]"` | -| `aliases` | Tuple of alternative names | -| `cli_only` | Only available in CLI | -| `gateway_only` | Only available in messaging platforms | -| `gateway_config_gate` | Config dotpath (e.g. `"display.my_option"`). When set on a `cli_only` command, the command becomes available in the gateway if the config value is truthy. | - ## Managing plugins ```bash @@ -109,4 +134,27 @@ plugins: In a running session, `/plugins` shows which plugins are currently loaded. +## Injecting Messages + +Plugins can inject messages into the active conversation using `ctx.inject_message()`: + +```python +ctx.inject_message("New data arrived from the webhook", role="user") +``` + +**Signature:** `ctx.inject_message(content: str, role: str = "user") -> bool` + +How it works: + +- If the agent is **idle** (waiting for user input), the message is queued as the next input and starts a new turn. +- If the agent is **mid-turn** (actively running), the message interrupts the current operation — the same as a user typing a new message and pressing Enter. +- For non-`"user"` roles, the content is prefixed with `[role]` (e.g. `[system] ...`). +- Returns `True` if the message was queued successfully, `False` if no CLI reference is available (e.g. in gateway mode). + +This enables plugins like remote control viewers, messaging bridges, or webhook receivers to feed messages into the conversation from external sources. + +:::note +`inject_message` is only available in CLI mode. In gateway mode, there is no CLI reference and the method returns `False`. +::: + See the **[full guide](/docs/guides/build-a-hermes-plugin)** for handler contracts, schema format, hook behavior, error handling, and common mistakes. diff --git a/website/docs/user-guide/features/skins.md b/website/docs/user-guide/features/skins.md index cb8b38c7f..5aec20cdf 100644 --- a/website/docs/user-guide/features/skins.md +++ b/website/docs/user-guide/features/skins.md @@ -30,28 +30,150 @@ display: ## Built-in skins -| Skin | Description | Agent branding | -|------|-------------|----------------| -| `default` | Classic Hermes — gold and kawaii | `Hermes Agent` | -| `ares` | War-god theme — crimson and bronze | `Ares Agent` | -| `mono` | Monochrome — clean grayscale | `Hermes Agent` | -| `slate` | Cool blue — developer-focused | `Hermes Agent` | -| `poseidon` | Ocean-god theme — deep blue and seafoam | `Poseidon Agent` | -| `sisyphus` | Sisyphean theme — austere grayscale with persistence | `Sisyphus Agent` | -| `charizard` | Volcanic theme — burnt orange and ember | `Charizard Agent` | +| Skin | Description | Agent branding | Visual character | +|------|-------------|----------------|------------------| +| `default` | Classic Hermes — gold and kawaii | `Hermes Agent` | Warm gold borders, cornsilk text, kawaii faces in spinners. The familiar caduceus banner. Clean and inviting. | +| `ares` | War-god theme — crimson and bronze | `Ares Agent` | Deep crimson borders with bronze accents. Aggressive spinner verbs ("forging", "marching", "tempering steel"). Custom sword-and-shield ASCII art banner. | +| `mono` | Monochrome — clean grayscale | `Hermes Agent` | All grays — no color. Borders are `#555555`, text is `#c9d1d9`. Ideal for minimal terminal setups or screen recordings. | +| `slate` | Cool blue — developer-focused | `Hermes Agent` | Royal blue borders (`#4169e1`), soft blue text. Calm and professional. No custom spinner — uses default faces. | +| `poseidon` | Ocean-god theme — deep blue and seafoam | `Poseidon Agent` | Deep blue to seafoam gradient. Ocean-themed spinners ("charting currents", "sounding the depth"). Trident ASCII art banner. | +| `sisyphus` | Sisyphean theme — austere grayscale with persistence | `Sisyphus Agent` | Light grays with stark contrast. Boulder-themed spinners ("pushing uphill", "resetting the boulder", "enduring the loop"). Boulder-and-hill ASCII art banner. | +| `charizard` | Volcanic theme — burnt orange and ember | `Charizard Agent` | Warm burnt orange to ember gradient. Fire-themed spinners ("banking into the draft", "measuring burn"). Dragon-silhouette ASCII art banner. | -## What a skin can customize +## Complete list of configurable keys -| Area | Keys | -|------|------| -| Banner + response colors | `colors.banner_*`, `colors.response_border` | -| Spinner animation | `spinner.waiting_faces`, `spinner.thinking_faces`, `spinner.thinking_verbs`, `spinner.wings` | -| Branding text | `branding.agent_name`, `branding.welcome`, `branding.response_label`, `branding.prompt_symbol` | -| Tool activity prefix | `tool_prefix` | +### Colors (`colors:`) + +Controls all color values throughout the CLI. Values are hex color strings. + +| Key | Description | Default (`default` skin) | +|-----|-------------|--------------------------| +| `banner_border` | Panel border around the startup banner | `#CD7F32` (bronze) | +| `banner_title` | Title text color in the banner | `#FFD700` (gold) | +| `banner_accent` | Section headers in the banner (Available Tools, etc.) | `#FFBF00` (amber) | +| `banner_dim` | Muted text in the banner (separators, secondary labels) | `#B8860B` (dark goldenrod) | +| `banner_text` | Body text in the banner (tool names, skill names) | `#FFF8DC` (cornsilk) | +| `ui_accent` | General UI accent color (highlights, active elements) | `#FFBF00` | +| `ui_label` | UI labels and tags | `#4dd0e1` (teal) | +| `ui_ok` | Success indicators (checkmarks, completion) | `#4caf50` (green) | +| `ui_error` | Error indicators (failures, blocked) | `#ef5350` (red) | +| `ui_warn` | Warning indicators (caution, approval prompts) | `#ffa726` (orange) | +| `prompt` | Interactive prompt text color | `#FFF8DC` | +| `input_rule` | Horizontal rule above the input area | `#CD7F32` | +| `response_border` | Border around the agent's response box (ANSI escape) | `#FFD700` | +| `session_label` | Session label color | `#DAA520` | +| `session_border` | Session ID dim border color | `#8B8682` | + +### Spinner (`spinner:`) + +Controls the animated spinner shown while waiting for API responses. + +| Key | Type | Description | Example | +|-----|------|-------------|---------| +| `waiting_faces` | list of strings | Faces cycled while waiting for API response | `["(⚔)", "(⛨)", "(▲)"]` | +| `thinking_faces` | list of strings | Faces cycled during model reasoning | `["(⚔)", "(⌁)", "(<>)"]` | +| `thinking_verbs` | list of strings | Verbs shown in spinner messages | `["forging", "plotting", "hammering plans"]` | +| `wings` | list of [left, right] pairs | Decorative brackets around the spinner | `[["⟪⚔", "⚔⟫"], ["⟪▲", "▲⟫"]]` | + +When spinner values are empty (like in `default` and `mono`), hardcoded defaults from `display.py` are used. + +### Branding (`branding:`) + +Text strings used throughout the CLI interface. + +| Key | Description | Default | +|-----|-------------|---------| +| `agent_name` | Name shown in banner title and status display | `Hermes Agent` | +| `welcome` | Welcome message shown at CLI startup | `Welcome to Hermes Agent! Type your message or /help for commands.` | +| `goodbye` | Message shown on exit | `Goodbye! ⚕` | +| `response_label` | Label on the response box header | ` ⚕ Hermes ` | +| `prompt_symbol` | Symbol before the user input prompt | `❯ ` | +| `help_header` | Header text for the `/help` command output | `(^_^)? Available Commands` | + +### Other top-level keys + +| Key | Type | Description | Default | +|-----|------|-------------|---------| +| `tool_prefix` | string | Character prefixed to tool output lines in the CLI | `┊` | +| `tool_emojis` | dict | Per-tool emoji overrides for spinners and progress (`{tool_name: emoji}`) | `{}` | +| `banner_logo` | string | Rich-markup ASCII art logo (replaces the default HERMES_AGENT banner) | `""` | +| `banner_hero` | string | Rich-markup hero art (replaces the default caduceus art) | `""` | ## Custom skins -Create YAML files under `~/.hermes/skins/`. User skins inherit missing values from the built-in `default` skin. +Create YAML files under `~/.hermes/skins/`. User skins inherit missing values from the built-in `default` skin, so you only need to specify the keys you want to change. + +### Full custom skin YAML template + +```yaml +# ~/.hermes/skins/mytheme.yaml +# Complete skin template — all keys shown. Delete any you don't need; +# missing values automatically inherit from the 'default' skin. + +name: mytheme +description: My custom theme + +colors: + banner_border: "#CD7F32" + banner_title: "#FFD700" + banner_accent: "#FFBF00" + banner_dim: "#B8860B" + banner_text: "#FFF8DC" + ui_accent: "#FFBF00" + ui_label: "#4dd0e1" + ui_ok: "#4caf50" + ui_error: "#ef5350" + ui_warn: "#ffa726" + prompt: "#FFF8DC" + input_rule: "#CD7F32" + response_border: "#FFD700" + session_label: "#DAA520" + session_border: "#8B8682" + +spinner: + waiting_faces: + - "(⚔)" + - "(⛨)" + - "(▲)" + thinking_faces: + - "(⚔)" + - "(⌁)" + - "(<>)" + thinking_verbs: + - "processing" + - "analyzing" + - "computing" + - "evaluating" + wings: + - ["⟪⚡", "⚡⟫"] + - ["⟪●", "●⟫"] + +branding: + agent_name: "My Agent" + welcome: "Welcome to My Agent! Type your message or /help for commands." + goodbye: "See you later! ⚡" + response_label: " ⚡ My Agent " + prompt_symbol: "⚡ ❯ " + help_header: "(⚡) Available Commands" + +tool_prefix: "┊" + +# Per-tool emoji overrides (optional) +tool_emojis: + terminal: "⚔" + web_search: "🔮" + read_file: "📄" + +# Custom ASCII art banners (optional, Rich markup supported) +# banner_logo: | +# [bold #FFD700] MY AGENT [/] +# banner_hero: | +# [#FFD700] Custom art here [/] +``` + +### Minimal custom skin example + +Since everything inherits from `default`, a minimal skin only needs to change what's different: ```yaml name: cyberpunk @@ -78,4 +200,7 @@ tool_prefix: "▏" - Built-in skins load from `hermes_cli/skin_engine.py`. - Unknown skins automatically fall back to `default`. -- `/skin` updates the active CLI theme immediately for the current session. \ No newline at end of file +- `/skin` updates the active CLI theme immediately for the current session. +- User skins in `~/.hermes/skins/` take precedence over built-in skins with the same name. +- Skin changes via `/skin` are session-only. To make a skin your permanent default, set it in `config.yaml`. +- The `banner_logo` and `banner_hero` fields support Rich console markup (e.g., `[bold #FF0000]text[/]`) for colored ASCII art. diff --git a/website/docs/user-guide/git-worktrees.md b/website/docs/user-guide/git-worktrees.md index 708170622..33d29506e 100644 --- a/website/docs/user-guide/git-worktrees.md +++ b/website/docs/user-guide/git-worktrees.md @@ -1,5 +1,6 @@ --- -sidebar_position: 9 +sidebar_position: 3 +sidebar_label: "Git Worktrees" title: "Git Worktrees" description: "Run multiple Hermes agents safely on the same repository using git worktrees and isolated checkouts" --- diff --git a/website/docs/user-guide/messaging/discord.md b/website/docs/user-guide/messaging/discord.md index df97930a6..2f40283ec 100644 --- a/website/docs/user-guide/messaging/discord.md +++ b/website/docs/user-guide/messaging/discord.md @@ -19,6 +19,7 @@ Before setup, here's the part most people want to know: how Hermes behaves once | **Free-response channels** | You can make specific channels mention-free with `DISCORD_FREE_RESPONSE_CHANNELS`, or disable mentions globally with `DISCORD_REQUIRE_MENTION=false`. | | **Threads** | Hermes replies in the same thread. Mention rules still apply unless that thread or its parent channel is configured as free-response. Threads stay isolated from the parent channel for session history. | | **Shared channels with multiple users** | By default, Hermes isolates session history per user inside the channel for safety and clarity. Two people talking in the same channel do not share one transcript unless you explicitly disable that. | +| **Messages mentioning other users** | When `DISCORD_IGNORE_NO_MENTION` is `true` (the default), Hermes stays silent if a message @mentions other users but does **not** mention the bot. This prevents the bot from jumping into conversations directed at other people. Set to `false` if you want the bot to respond to all messages regardless of who is mentioned. This only applies in server channels, not DMs. | :::tip If you want a normal bot-help channel where people can talk to Hermes without tagging it every time, add that channel to `DISCORD_FREE_RESPONSE_CHANNELS`. @@ -253,6 +254,9 @@ DISCORD_ALLOWED_USERS=284102345871466496 # Optional: channels where bot responds without @mention (comma-separated channel IDs) # DISCORD_FREE_RESPONSE_CHANNELS=1234567890,9876543210 + +# Optional: ignore messages that @mention other users but NOT the bot (default: true) +# DISCORD_IGNORE_NO_MENTION=true ``` Optional behavior settings in `~/.hermes/config.yaml`: diff --git a/website/docs/user-guide/messaging/feishu.md b/website/docs/user-guide/messaging/feishu.md index 1b7141e78..47901e353 100644 --- a/website/docs/user-guide/messaging/feishu.md +++ b/website/docs/user-guide/messaging/feishu.md @@ -18,7 +18,7 @@ The integration supports both connection modes: | Context | Behavior | |---------|----------| | Direct messages | Hermes responds to every message. | -| Group chats | Hermes responds when the bot is addressed in the chat. | +| Group chats | Hermes responds only when the bot is @mentioned in the chat. | | Shared group chats | By default, session history is isolated per user inside a shared chat. | This shared-chat behavior is controlled by `config.yaml`: @@ -46,12 +46,16 @@ Keep the App Secret private. Anyone with it can impersonate your app. ### Recommended: WebSocket mode -Use WebSocket mode when Hermes runs on your laptop, workstation, or a private server. No public URL is required. +Use WebSocket mode when Hermes runs on your laptop, workstation, or a private server. No public URL is required. The official Lark SDK opens and maintains a persistent outbound WebSocket connection with automatic reconnection. ```bash FEISHU_CONNECTION_MODE=websocket ``` +**Requirements:** The `websockets` Python package must be installed. The SDK handles connection lifecycle, heartbeats, and auto-reconnection internally. + +**How it works:** The adapter runs the Lark SDK's WebSocket client in a background executor thread. Inbound events (messages, reactions, card actions) are dispatched to the main asyncio loop. On disconnect, the SDK will attempt to reconnect automatically. + ### Optional: Webhook mode Use webhook mode only when you already run Hermes behind a reachable HTTP endpoint. @@ -60,12 +64,24 @@ Use webhook mode only when you already run Hermes behind a reachable HTTP endpoi FEISHU_CONNECTION_MODE=webhook ``` -In webhook mode, Hermes serves a Feishu endpoint at: +In webhook mode, Hermes starts an HTTP server (via `aiohttp`) and serves a Feishu endpoint at: ```text /feishu/webhook ``` +**Requirements:** The `aiohttp` Python package must be installed. + +You can customize the webhook server bind address and path: + +```bash +FEISHU_WEBHOOK_HOST=127.0.0.1 # default: 127.0.0.1 +FEISHU_WEBHOOK_PORT=8765 # default: 8765 +FEISHU_WEBHOOK_PATH=/feishu/webhook # default: /feishu/webhook +``` + +When Feishu sends a URL verification challenge (`type: url_verification`), the webhook responds automatically so you can complete the subscription setup in the Feishu developer console. + ## Step 3: Configure Hermes ### Option A: Interactive Setup @@ -116,13 +132,233 @@ FEISHU_HOME_CHANNEL=oc_xxx ## Security -For production use, set an allowlist: +### User Allowlist + +For production use, set an allowlist of Feishu Open IDs: ```bash FEISHU_ALLOWED_USERS=ou_xxx,ou_yyy ``` -If you leave the allowlist empty, anyone who can reach the bot may be able to use it. +If you leave the allowlist empty, anyone who can reach the bot may be able to use it. In group chats, the allowlist is checked against the sender's open_id before the message is processed. + +### Webhook Encryption Key + +When running in webhook mode, set an encryption key to enable signature verification of inbound webhook payloads: + +```bash +FEISHU_ENCRYPT_KEY=your-encrypt-key +``` + +This key is found in the **Event Subscriptions** section of your Feishu app configuration. When set, the adapter verifies every webhook request using the signature algorithm: + +``` +SHA256(timestamp + nonce + encrypt_key + body) +``` + +The computed hash is compared against the `x-lark-signature` header using timing-safe comparison. Requests with invalid or missing signatures are rejected with HTTP 401. + +:::tip +In WebSocket mode, signature verification is handled by the SDK itself, so `FEISHU_ENCRYPT_KEY` is optional. In webhook mode, it is strongly recommended for production. +::: + +### Verification Token + +An additional layer of authentication that checks the `token` field inside webhook payloads: + +```bash +FEISHU_VERIFICATION_TOKEN=your-verification-token +``` + +This token is also found in the **Event Subscriptions** section of your Feishu app. When set, every inbound webhook payload must contain a matching `token` in its `header` object. Mismatched tokens are rejected with HTTP 401. + +Both `FEISHU_ENCRYPT_KEY` and `FEISHU_VERIFICATION_TOKEN` can be used together for defense in depth. + +## Group Message Policy + +The `FEISHU_GROUP_POLICY` environment variable controls whether and how Hermes responds in group chats: + +```bash +FEISHU_GROUP_POLICY=allowlist # default +``` + +| Value | Behavior | +|-------|----------| +| `open` | Hermes responds to @mentions from any user in any group. | +| `allowlist` | Hermes only responds to @mentions from users listed in `FEISHU_ALLOWED_USERS`. | +| `disabled` | Hermes ignores all group messages entirely. | + +In all modes, the bot must be explicitly @mentioned (or @all) in the group before the message is processed. Direct messages bypass this gate. + +### Bot Identity for @Mention Gating + +For precise @mention detection in groups, the adapter needs to know the bot's identity. It can be provided explicitly: + +```bash +FEISHU_BOT_OPEN_ID=ou_xxx +FEISHU_BOT_USER_ID=xxx +FEISHU_BOT_NAME=MyBot +``` + +If none of these are set, the adapter will attempt to auto-discover the bot name via the Application Info API on startup. For this to work, grant the `admin:app.info:readonly` or `application:application:self_manage` permission scope. + +## Interactive Card Actions + +When users click buttons or interact with interactive cards sent by the bot, the adapter routes these as synthetic `/card` command events: + +- Button clicks become: `/card button {"key": "value", ...}` +- The action's `value` payload from the card definition is included as JSON. +- Card actions are deduplicated with a 15-minute window to prevent double processing. + +Card action events are dispatched with `MessageType.COMMAND`, so they flow through the normal command processing pipeline. + +To use this feature, enable the **Interactive Card** event in your Feishu app's event subscriptions (`card.action.trigger`). + +## Media Support + +### Inbound (receiving) + +The adapter receives and caches the following media types from users: + +| Type | Extensions | How it's processed | +|------|-----------|-------------------| +| **Images** | .jpg, .jpeg, .png, .gif, .webp, .bmp | Downloaded via Feishu API and cached locally | +| **Audio** | .ogg, .mp3, .wav, .m4a, .aac, .flac, .opus, .webm | Downloaded and cached; small text files are auto-extracted | +| **Video** | .mp4, .mov, .avi, .mkv, .webm, .m4v, .3gp | Downloaded and cached as documents | +| **Files** | .pdf, .doc, .docx, .xls, .xlsx, .ppt, .pptx, and more | Downloaded and cached as documents | + +Media from rich-text (post) messages, including inline images and file attachments, is also extracted and cached. + +For small text-based documents (.txt, .md), the file content is automatically injected into the message text so the agent can read it directly without needing tools. + +### Outbound (sending) + +| Method | What it sends | +|--------|--------------| +| `send` | Text or rich post messages (auto-detected based on markdown content) | +| `send_image` / `send_image_file` | Uploads image to Feishu, then sends as native image bubble (with optional caption) | +| `send_document` | Uploads file to Feishu API, then sends as file attachment | +| `send_voice` | Uploads audio file as a Feishu file attachment | +| `send_video` | Uploads video and sends as native media message | +| `send_animation` | GIFs are downgraded to file attachments (Feishu has no native GIF bubble) | + +File upload routing is automatic based on extension: + +- `.ogg`, `.opus` → uploaded as `opus` audio +- `.mp4`, `.mov`, `.avi`, `.m4v` → uploaded as `mp4` media +- `.pdf`, `.doc(x)`, `.xls(x)`, `.ppt(x)` → uploaded with their document type +- Everything else → uploaded as a generic stream file + +## Markdown Rendering and Post Fallback + +When outbound text contains markdown formatting (headings, bold, lists, code blocks, links, etc.), the adapter automatically sends it as a Feishu **post** message with an embedded `md` tag rather than as plain text. This enables rich rendering in the Feishu client. + +If the Feishu API rejects the post payload (e.g., due to unsupported markdown constructs), the adapter automatically falls back to sending as plain text with markdown stripped. This two-stage fallback ensures messages are always delivered. + +Plain text messages (no markdown detected) are sent as the simple `text` message type. + +## ACK Emoji Reactions + +When the adapter receives an inbound message, it immediately adds an ✅ (OK) emoji reaction to signal that the message was received and is being processed. This provides visual feedback before the agent completes its response. + +The reaction is persistent — it remains on the message after the response is sent, serving as a receipt marker. + +User reactions on bot messages are also tracked. If a user adds or removes an emoji reaction on a message sent by the bot, it is routed as a synthetic text event (`reaction:added:EMOJI_TYPE` or `reaction:removed:EMOJI_TYPE`) so the agent can respond to feedback. + +## Burst Protection and Batching + +The adapter includes debouncing for rapid message bursts to avoid overwhelming the agent: + +### Text Batching + +When a user sends multiple text messages in quick succession, they are merged into a single event before being dispatched: + +| Setting | Env Var | Default | +|---------|---------|---------| +| Quiet period | `HERMES_FEISHU_TEXT_BATCH_DELAY_SECONDS` | 0.6s | +| Max messages per batch | `HERMES_FEISHU_TEXT_BATCH_MAX_MESSAGES` | 8 | +| Max characters per batch | `HERMES_FEISHU_TEXT_BATCH_MAX_CHARS` | 4000 | + +### Media Batching + +Multiple media attachments sent in quick succession (e.g., dragging several images) are merged into a single event: + +| Setting | Env Var | Default | +|---------|---------|---------| +| Quiet period | `HERMES_FEISHU_MEDIA_BATCH_DELAY_SECONDS` | 0.8s | + +### Per-Chat Serialization + +Messages within the same chat are processed serially (one at a time) to maintain conversation coherence. Each chat has its own lock, so messages in different chats are processed concurrently. + +## Rate Limiting (Webhook Mode) + +In webhook mode, the adapter enforces per-IP rate limiting to protect against abuse: + +- **Window:** 60-second sliding window +- **Limit:** 120 requests per window per (app_id, path, IP) triple +- **Tracking cap:** Up to 4096 unique keys tracked (prevents unbounded memory growth) + +Requests that exceed the limit receive HTTP 429 (Too Many Requests). + +### Webhook Anomaly Tracking + +The adapter tracks consecutive error responses per IP address. After 25 consecutive errors from the same IP within a 6-hour window, a warning is logged. This helps detect misconfigured clients or probing attempts. + +Additional webhook protections: +- **Body size limit:** 1 MB maximum +- **Body read timeout:** 30 seconds +- **Content-Type enforcement:** Only `application/json` is accepted + +## Deduplication + +Inbound messages are deduplicated using message IDs with a 24-hour TTL. The dedup state is persisted across restarts to `~/.hermes/feishu_seen_message_ids.json`. + +| Setting | Env Var | Default | +|---------|---------|---------| +| Cache size | `HERMES_FEISHU_DEDUP_CACHE_SIZE` | 2048 entries | + +## All Environment Variables + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `FEISHU_APP_ID` | ✅ | — | Feishu/Lark App ID | +| `FEISHU_APP_SECRET` | ✅ | — | Feishu/Lark App Secret | +| `FEISHU_DOMAIN` | — | `feishu` | `feishu` (China) or `lark` (international) | +| `FEISHU_CONNECTION_MODE` | — | `websocket` | `websocket` or `webhook` | +| `FEISHU_ALLOWED_USERS` | — | _(empty)_ | Comma-separated open_id list for user allowlist | +| `FEISHU_HOME_CHANNEL` | — | — | Chat ID for cron/notification output | +| `FEISHU_ENCRYPT_KEY` | — | _(empty)_ | Encrypt key for webhook signature verification | +| `FEISHU_VERIFICATION_TOKEN` | — | _(empty)_ | Verification token for webhook payload auth | +| `FEISHU_GROUP_POLICY` | — | `allowlist` | Group message policy: `open`, `allowlist`, `disabled` | +| `FEISHU_BOT_OPEN_ID` | — | _(empty)_ | Bot's open_id (for @mention detection) | +| `FEISHU_BOT_USER_ID` | — | _(empty)_ | Bot's user_id (for @mention detection) | +| `FEISHU_BOT_NAME` | — | _(empty)_ | Bot's display name (for @mention detection) | +| `FEISHU_WEBHOOK_HOST` | — | `127.0.0.1` | Webhook server bind address | +| `FEISHU_WEBHOOK_PORT` | — | `8765` | Webhook server port | +| `FEISHU_WEBHOOK_PATH` | — | `/feishu/webhook` | Webhook endpoint path | +| `HERMES_FEISHU_DEDUP_CACHE_SIZE` | — | `2048` | Max deduplicated message IDs to track | +| `HERMES_FEISHU_TEXT_BATCH_DELAY_SECONDS` | — | `0.6` | Text burst debounce quiet period | +| `HERMES_FEISHU_TEXT_BATCH_MAX_MESSAGES` | — | `8` | Max messages merged per text batch | +| `HERMES_FEISHU_TEXT_BATCH_MAX_CHARS` | — | `4000` | Max characters merged per text batch | +| `HERMES_FEISHU_MEDIA_BATCH_DELAY_SECONDS` | — | `0.8` | Media burst debounce quiet period | + +## Troubleshooting + +| Problem | Fix | +|---------|-----| +| `lark-oapi not installed` | Install the SDK: `pip install lark-oapi` | +| `websockets not installed; websocket mode unavailable` | Install websockets: `pip install websockets` | +| `aiohttp not installed; webhook mode unavailable` | Install aiohttp: `pip install aiohttp` | +| `FEISHU_APP_ID or FEISHU_APP_SECRET not set` | Set both env vars or configure via `hermes gateway setup` | +| `Another local Hermes gateway is already using this Feishu app_id` | Only one Hermes instance can use the same app_id at a time. Stop the other gateway first. | +| Bot doesn't respond in groups | Ensure the bot is @mentioned, check `FEISHU_GROUP_POLICY`, and verify the sender is in `FEISHU_ALLOWED_USERS` if policy is `allowlist` | +| `Webhook rejected: invalid verification token` | Ensure `FEISHU_VERIFICATION_TOKEN` matches the token in your Feishu app's Event Subscriptions config | +| `Webhook rejected: invalid signature` | Ensure `FEISHU_ENCRYPT_KEY` matches the encrypt key in your Feishu app config | +| Post messages show as plain text | The Feishu API rejected the post payload; this is normal fallback behavior. Check logs for details. | +| Images/files not received by bot | Grant `im:message` and `im:resource` permission scopes to your Feishu app | +| Bot identity not auto-detected | Grant `admin:app.info:readonly` scope, or set `FEISHU_BOT_OPEN_ID` / `FEISHU_BOT_NAME` manually | +| `Webhook rate limit exceeded` | More than 120 requests/minute from the same IP. This is usually a misconfiguration or loop. | ## Toolset diff --git a/website/docs/user-guide/messaging/index.md b/website/docs/user-guide/messaging/index.md index 9073e45ff..fa662305b 100644 --- a/website/docs/user-guide/messaging/index.md +++ b/website/docs/user-guide/messaging/index.md @@ -10,6 +10,26 @@ Chat with Hermes from Telegram, Discord, Slack, WhatsApp, Signal, SMS, Email, Ho For the full voice feature set — including CLI microphone mode, spoken replies in messaging, and Discord voice-channel conversations — see [Voice Mode](/docs/user-guide/features/voice-mode) and [Use Voice Mode with Hermes](/docs/guides/use-voice-mode-with-hermes). +## Platform Comparison + +| Platform | Voice | Images | Files | Threads | Reactions | Typing | Streaming | +|----------|:-----:|:------:|:-----:|:-------:|:---------:|:------:|:---------:| +| Telegram | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | +| Discord | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| Slack | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| WhatsApp | — | ✅ | ✅ | — | — | ✅ | ✅ | +| Signal | — | ✅ | ✅ | — | — | ✅ | ✅ | +| SMS | — | — | — | — | — | — | — | +| Email | — | ✅ | ✅ | ✅ | — | — | — | +| Home Assistant | — | — | — | — | — | — | — | +| Mattermost | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | +| Matrix | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | +| DingTalk | — | — | — | — | — | ✅ | ✅ | +| Feishu/Lark | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| WeCom | ✅ | ✅ | ✅ | — | — | ✅ | ✅ | + +**Voice** = TTS audio replies and/or voice message transcription. **Images** = send/receive images. **Files** = send/receive file attachments. **Threads** = threaded conversations. **Reactions** = emoji reactions on messages. **Typing** = typing indicator while processing. **Streaming** = progressive message updates via editing. + ## Architecture ```mermaid diff --git a/website/docs/user-guide/messaging/matrix.md b/website/docs/user-guide/messaging/matrix.md index 020e15bd6..70b8855a2 100644 --- a/website/docs/user-guide/messaging/matrix.md +++ b/website/docs/user-guide/messaging/matrix.md @@ -352,3 +352,4 @@ For more information on securing your Hermes Agent deployment, see the [Security - **Federation**: If you're on a federated homeserver, the bot can communicate with users from other servers — just add their full `@user:server` IDs to `MATRIX_ALLOWED_USERS`. - **Auto-join**: The bot automatically accepts room invites and joins. It starts responding immediately after joining. - **Media support**: Hermes can send and receive images, audio, video, and file attachments. Media is uploaded to your homeserver using the Matrix content repository API. +- **Native voice messages (MSC3245)**: The Matrix adapter automatically tags outgoing voice messages with the `org.matrix.msc3245.voice` flag. This means TTS responses and voice audio are rendered as **native voice bubbles** in Element and other clients that support MSC3245, rather than as generic audio file attachments. Incoming voice messages with the MSC3245 flag are also correctly identified and routed to speech-to-text transcription. No configuration is needed — this works automatically. diff --git a/website/docs/user-guide/messaging/open-webui.md b/website/docs/user-guide/messaging/open-webui.md index a3eb5fbc0..7d4eaee36 100644 --- a/website/docs/user-guide/messaging/open-webui.md +++ b/website/docs/user-guide/messaging/open-webui.md @@ -147,12 +147,16 @@ When you send a message in Open WebUI: 1. Open WebUI sends a `POST /v1/chat/completions` request with your message and conversation history 2. Hermes Agent creates an AIAgent instance with its full toolset 3. The agent processes your request — it may call tools (terminal, file operations, web search, etc.) -4. Tool calls happen invisibly server-side -5. The agent's final text response is returned to Open WebUI +4. As tools execute, **inline progress messages stream to the UI** so you can see what the agent is doing (e.g. `` `💻 ls -la` ``, `` `🔍 Python 3.12 release` ``) +5. The agent's final text response streams back to Open WebUI 6. Open WebUI displays the response in its chat interface Your agent has access to all the same tools and capabilities as when using the CLI or Telegram — the only difference is the frontend. +:::tip Tool Progress +With streaming enabled (the default), you'll see brief inline indicators as tools run — the tool emoji and its key argument. These appear in the response stream before the agent's final answer, giving you visibility into what's happening behind the scenes. +::: + ## Configuration Reference ### Hermes Agent (API server) diff --git a/website/docs/user-guide/messaging/slack.md b/website/docs/user-guide/messaging/slack.md index f011dcd78..21511f77d 100644 --- a/website/docs/user-guide/messaging/slack.md +++ b/website/docs/user-guide/messaging/slack.md @@ -237,6 +237,60 @@ Make sure the bot has been **invited to the channel** (`/invite @Hermes Agent`). --- +## Multi-Workspace Support + +Hermes can connect to **multiple Slack workspaces** simultaneously using a single gateway instance. Each workspace is authenticated independently with its own bot user ID. + +### Configuration + +Provide multiple bot tokens as a **comma-separated list** in `SLACK_BOT_TOKEN`: + +```bash +# Multiple bot tokens — one per workspace +SLACK_BOT_TOKEN=xoxb-workspace1-token,xoxb-workspace2-token,xoxb-workspace3-token + +# A single app-level token is still used for Socket Mode +SLACK_APP_TOKEN=xapp-your-app-token +``` + +Or in `~/.hermes/config.yaml`: + +```yaml +platforms: + slack: + token: "xoxb-workspace1-token,xoxb-workspace2-token" +``` + +### OAuth Token File + +In addition to tokens in the environment or config, Hermes also loads tokens from an **OAuth token file** at: + +``` +~/.hermes/platforms/slack/slack_tokens.json +``` + +This file is a JSON object mapping team IDs to token entries: + +```json +{ + "T01ABC2DEF3": { + "token": "xoxb-workspace-token-here", + "team_name": "My Workspace" + } +} +``` + +Tokens from this file are merged with any tokens specified via `SLACK_BOT_TOKEN`. Duplicate tokens are automatically deduplicated. + +### How it works + +- The **first token** in the list is the primary token, used for the Socket Mode connection (AsyncApp). +- Each token is authenticated via `auth.test` on startup. The gateway maps each `team_id` to its own `WebClient` and `bot_user_id`. +- When a message arrives, Hermes uses the correct workspace-specific client to respond. +- The primary `bot_user_id` (from the first token) is used for backward compatibility with features that expect a single bot identity. + +--- + ## Voice Messages Hermes supports voice on Slack: diff --git a/website/docs/user-guide/messaging/sms.md b/website/docs/user-guide/messaging/sms.md index 0aa835ffe..84a3b8fa2 100644 --- a/website/docs/user-guide/messaging/sms.md +++ b/website/docs/user-guide/messaging/sms.md @@ -1,5 +1,6 @@ --- sidebar_position: 8 +sidebar_label: "SMS (Twilio)" title: "SMS (Twilio)" description: "Set up Hermes Agent as an SMS chatbot via Twilio" --- diff --git a/website/docs/user-guide/messaging/telegram.md b/website/docs/user-guide/messaging/telegram.md index be99eaa75..473619ccf 100644 --- a/website/docs/user-guide/messaging/telegram.md +++ b/website/docs/user-guide/messaging/telegram.md @@ -112,6 +112,66 @@ hermes gateway The bot should come online within seconds. Send it a message on Telegram to verify. +## Webhook Mode + +By default, Hermes connects to Telegram using **long polling** — the gateway makes outbound requests to Telegram's servers to fetch new updates. This works well for local and always-on deployments. + +For **cloud deployments** (Fly.io, Railway, Render, etc.), **webhook mode** is more cost-effective. These platforms can auto-wake suspended machines on inbound HTTP traffic, but not on outbound connections. Since polling is outbound, a polling bot can never sleep. Webhook mode flips the direction — Telegram pushes updates to your bot's HTTPS URL, enabling sleep-when-idle deployments. + +| | Polling (default) | Webhook | +|---|---|---| +| Direction | Gateway → Telegram (outbound) | Telegram → Gateway (inbound) | +| Best for | Local, always-on servers | Cloud platforms with auto-wake | +| Setup | No extra config | Set `TELEGRAM_WEBHOOK_URL` | +| Idle cost | Machine must stay running | Machine can sleep between messages | + +### Configuration + +Add the following to `~/.hermes/.env`: + +```bash +TELEGRAM_WEBHOOK_URL=https://my-app.fly.dev/telegram +# TELEGRAM_WEBHOOK_PORT=8443 # optional, default 8443 +# TELEGRAM_WEBHOOK_SECRET=mysecret # optional, recommended +``` + +| Variable | Required | Description | +|----------|----------|-------------| +| `TELEGRAM_WEBHOOK_URL` | Yes | Public HTTPS URL where Telegram will send updates. The URL path is auto-extracted (e.g., `/telegram` from the example above). | +| `TELEGRAM_WEBHOOK_PORT` | No | Local port the webhook server listens on (default: `8443`). | +| `TELEGRAM_WEBHOOK_SECRET` | No | Secret token for verifying that updates actually come from Telegram. **Strongly recommended** for production deployments. | + +When `TELEGRAM_WEBHOOK_URL` is set, the gateway starts an HTTP webhook server instead of polling. When unset, polling mode is used — no behavior change from previous versions. + +### Cloud deployment example (Fly.io) + +1. Add the env vars to your Fly.io app secrets: + +```bash +fly secrets set TELEGRAM_WEBHOOK_URL=https://my-app.fly.dev/telegram +fly secrets set TELEGRAM_WEBHOOK_SECRET=$(openssl rand -hex 32) +``` + +2. Expose the webhook port in your `fly.toml`: + +```toml +[[services]] + internal_port = 8443 + protocol = "tcp" + + [[services.ports]] + handlers = ["tls", "http"] + port = 443 +``` + +3. Deploy: + +```bash +fly deploy +``` + +The gateway log should show: `[telegram] Connected to Telegram (webhook mode)`. + ## Home Channel Use the `/sethome` command in any Telegram chat (DM or group) to designate it as the **home channel**. Scheduled tasks (cron jobs) deliver their results to this channel. @@ -258,6 +318,73 @@ Topics created outside of the config (e.g., by manually calling the Telegram API - **Privacy policy:** Telegram now requires bots to have a privacy policy. Set one via BotFather with `/setprivacy_policy`, or Telegram may auto-generate a placeholder. This is particularly important if your bot is public-facing. - **Message streaming:** Bot API 9.x added support for streaming long responses, which can improve perceived latency for lengthy agent replies. +## Webhook Mode + +By default, the Telegram adapter connects via **long polling** — the gateway makes outbound connections to Telegram's servers. This works everywhere but keeps a persistent connection open. + +**Webhook mode** is an alternative where Telegram pushes updates to your server over HTTPS. This is ideal for **serverless and cloud deployments** (Fly.io, Railway, etc.) where inbound HTTP can wake a suspended machine. + +### Configuration + +Set the `TELEGRAM_WEBHOOK_URL` environment variable to enable webhook mode: + +```bash +# Required — your public HTTPS endpoint +TELEGRAM_WEBHOOK_URL=https://app.fly.dev/telegram + +# Optional — local listen port (default: 8443) +TELEGRAM_WEBHOOK_PORT=8443 + +# Optional — secret token for update verification (auto-generated if not set) +TELEGRAM_WEBHOOK_SECRET=my-secret-token +``` + +Or in `~/.hermes/config.yaml`: + +```yaml +telegram: + webhook_mode: true +``` + +When `TELEGRAM_WEBHOOK_URL` is set, the gateway starts an HTTP server listening on `0.0.0.0:` and registers the webhook URL with Telegram. The URL path is extracted from the webhook URL (defaults to `/telegram`). + +:::warning +Telegram requires a **valid TLS certificate** on the webhook endpoint. Self-signed certificates will be rejected. Use a reverse proxy (nginx, Caddy) or a platform that provides TLS termination (Fly.io, Railway, Cloudflare Tunnel). +::: + +## DNS-over-HTTPS Fallback IPs + +In some restricted networks, `api.telegram.org` may resolve to an IP that is unreachable. The Telegram adapter includes a **fallback IP** mechanism that transparently retries connections against alternative IPs while preserving the correct TLS hostname and SNI. + +### How it works + +1. If `TELEGRAM_FALLBACK_IPS` is set, those IPs are used directly. +2. Otherwise, the adapter automatically queries **Google DNS** and **Cloudflare DNS** via DNS-over-HTTPS (DoH) to discover alternative IPs for `api.telegram.org`. +3. IPs returned by DoH that differ from the system DNS result are used as fallbacks. +4. If DoH is also blocked, a hardcoded seed IP (`149.154.167.220`) is used as a last resort. +5. Once a fallback IP succeeds, it becomes "sticky" — subsequent requests use it directly without retrying the primary path first. + +### Configuration + +```bash +# Explicit fallback IPs (comma-separated) +TELEGRAM_FALLBACK_IPS=149.154.167.220,149.154.167.221 +``` + +Or in `~/.hermes/config.yaml`: + +```yaml +platforms: + telegram: + extra: + fallback_ips: + - "149.154.167.220" +``` + +:::tip +You usually don't need to configure this manually. The auto-discovery via DoH handles most restricted-network scenarios. The `TELEGRAM_FALLBACK_IPS` env var is only needed if DoH is also blocked on your network. +::: + ## Troubleshooting | Problem | Solution | @@ -268,6 +395,7 @@ Topics created outside of the config (e.g., by manually calling the Telegram API | Voice messages not transcribed | Verify STT is available: install `faster-whisper` for local transcription, or set `GROQ_API_KEY` / `VOICE_TOOLS_OPENAI_KEY` in `~/.hermes/.env`. | | Voice replies are files, not bubbles | Install `ffmpeg` (needed for Edge TTS Opus conversion). | | Bot token revoked/invalid | Generate a new token via `/revoke` then `/newbot` or `/token` in BotFather. Update your `.env` file. | +| Webhook not receiving updates | Verify `TELEGRAM_WEBHOOK_URL` is publicly reachable (test with `curl`). Ensure your platform/reverse proxy routes inbound HTTPS traffic from the URL's port to the local listen port configured by `TELEGRAM_WEBHOOK_PORT` (they do not need to be the same number). Ensure SSL/TLS is active — Telegram only sends to HTTPS URLs. Check firewall rules. | ## Exec Approval diff --git a/website/docs/user-guide/messaging/wecom.md b/website/docs/user-guide/messaging/wecom.md index e5a551b8f..1a078a892 100644 --- a/website/docs/user-guide/messaging/wecom.md +++ b/website/docs/user-guide/messaging/wecom.md @@ -13,6 +13,7 @@ Connect Hermes to [WeCom](https://work.weixin.qq.com/) (企业微信), Tencent's - A WeCom organization account - An AI Bot created in the WeCom Admin Console - The Bot ID and Secret from the bot's credentials page +- Python packages: `aiohttp` and `httpx` ## Setup @@ -56,10 +57,12 @@ hermes gateway start - **WebSocket transport** — persistent connection, no public endpoint needed - **DM and group messaging** — configurable access policies +- **Per-group sender allowlists** — fine-grained control over who can interact in each group - **Media support** — images, files, voice, video upload and download - **AES-encrypted media** — automatic decryption for inbound attachments - **Quote context** — preserves reply threading - **Markdown rendering** — rich text responses +- **Reply-mode streaming** — correlates responses to inbound message context - **Auto-reconnect** — exponential backoff on connection drops ## Configuration Options @@ -75,12 +78,187 @@ Set these in `config.yaml` under `platforms.wecom.extra`: | `group_policy` | `open` | Group access: `open`, `allowlist`, `disabled` | | `allow_from` | `[]` | User IDs allowed for DMs (when dm_policy=allowlist) | | `group_allow_from` | `[]` | Group IDs allowed (when group_policy=allowlist) | +| `groups` | `{}` | Per-group configuration (see below) | + +## Access Policies + +### DM Policy + +Controls who can send direct messages to the bot: + +| Value | Behavior | +|-------|----------| +| `open` | Anyone can DM the bot (default) | +| `allowlist` | Only user IDs in `allow_from` can DM | +| `disabled` | All DMs are ignored | +| `pairing` | Pairing mode (for initial setup) | + +```bash +WECOM_DM_POLICY=allowlist +``` + +### Group Policy + +Controls which groups the bot responds in: + +| Value | Behavior | +|-------|----------| +| `open` | Bot responds in all groups (default) | +| `allowlist` | Bot only responds in group IDs listed in `group_allow_from` | +| `disabled` | All group messages are ignored | + +```bash +WECOM_GROUP_POLICY=allowlist +``` + +### Per-Group Sender Allowlists + +For fine-grained control, you can restrict which users are allowed to interact with the bot within specific groups. This is configured in `config.yaml`: + +```yaml +platforms: + wecom: + enabled: true + extra: + bot_id: "your-bot-id" + secret: "your-secret" + group_policy: "allowlist" + group_allow_from: + - "group_id_1" + - "group_id_2" + groups: + group_id_1: + allow_from: + - "user_alice" + - "user_bob" + group_id_2: + allow_from: + - "user_charlie" + "*": + allow_from: + - "user_admin" +``` + +**How it works:** + +1. The `group_policy` and `group_allow_from` controls determine whether a group is allowed at all. +2. If a group passes the top-level check, the `groups..allow_from` list (if present) further restricts which senders within that group can interact with the bot. +3. A wildcard `"*"` group entry serves as a default for groups not explicitly listed. +4. Allowlist entries support the `*` wildcard to allow all users, and entries are case-insensitive. +5. Entries can optionally use the `wecom:user:` or `wecom:group:` prefix format — the prefix is stripped automatically. + +If no `allow_from` is configured for a group, all users in that group are allowed (assuming the group itself passes the top-level policy check). + +## Media Support + +### Inbound (receiving) + +The adapter receives media attachments from users and caches them locally for agent processing: + +| Type | How it's handled | +|------|-----------------| +| **Images** | Downloaded and cached locally. Supports both URL-based and base64-encoded images. | +| **Files** | Downloaded and cached. Filename is preserved from the original message. | +| **Voice** | Voice message text transcription is extracted if available. | +| **Mixed messages** | WeCom mixed-type messages (text + images) are parsed and all components extracted. | + +**Quoted messages:** Media from quoted (replied-to) messages is also extracted, so the agent has context about what the user is replying to. + +### AES-Encrypted Media Decryption + +WeCom encrypts some inbound media attachments with AES-256-CBC. The adapter handles this automatically: + +- When an inbound media item includes an `aeskey` field, the adapter downloads the encrypted bytes and decrypts them using AES-256-CBC with PKCS#7 padding. +- The AES key is the base64-decoded value of the `aeskey` field (must be exactly 32 bytes). +- The IV is derived from the first 16 bytes of the key. +- This requires the `cryptography` Python package (`pip install cryptography`). + +No configuration is needed — decryption happens transparently when encrypted media is received. + +### Outbound (sending) + +| Method | What it sends | Size limit | +|--------|--------------|------------| +| `send` | Markdown text messages | 4000 chars | +| `send_image` / `send_image_file` | Native image messages | 10 MB | +| `send_document` | File attachments | 20 MB | +| `send_voice` | Voice messages (AMR format only for native voice) | 2 MB | +| `send_video` | Video messages | 10 MB | + +**Chunked upload:** Files are uploaded in 512 KB chunks through a three-step protocol (init → chunks → finish). The adapter handles this automatically. + +**Automatic downgrade:** When media exceeds the native type's size limit but is under the absolute 20 MB file limit, it is automatically sent as a generic file attachment instead: + +- Images > 10 MB → sent as file +- Videos > 10 MB → sent as file +- Voice > 2 MB → sent as file +- Non-AMR audio → sent as file (WeCom only supports AMR for native voice) + +Files exceeding the absolute 20 MB limit are rejected with an informational message sent to the chat. + +## Reply-Mode Stream Responses + +When the bot receives a message via the WeCom callback, the adapter remembers the inbound request ID. If a response is sent while the request context is still active, the adapter uses WeCom's reply-mode (`aibot_respond_msg`) with streaming to correlate the response directly to the inbound message. This provides a more natural conversation experience in the WeCom client. + +If the inbound request context has expired or is unavailable, the adapter falls back to proactive message sending via `aibot_send_msg`. + +Reply-mode also works for media: uploaded media can be sent as a reply to the originating message. + +## Connection and Reconnection + +The adapter maintains a persistent WebSocket connection to WeCom's gateway at `wss://openws.work.weixin.qq.com`. + +### Connection Lifecycle + +1. **Connect:** Opens a WebSocket connection and sends an `aibot_subscribe` authentication frame with the bot_id and secret. +2. **Heartbeat:** Sends application-level ping frames every 30 seconds to keep the connection alive. +3. **Listen:** Continuously reads inbound frames and dispatches message callbacks. + +### Reconnection Behavior + +On connection loss, the adapter uses exponential backoff to reconnect: + +| Attempt | Delay | +|---------|-------| +| 1st retry | 2 seconds | +| 2nd retry | 5 seconds | +| 3rd retry | 10 seconds | +| 4th retry | 30 seconds | +| 5th+ retry | 60 seconds | + +After each successful reconnection, the backoff counter resets to zero. All pending request futures are failed on disconnect so callers don't hang indefinitely. + +### Deduplication + +Inbound messages are deduplicated using message IDs with a 5-minute window and a maximum cache of 1000 entries. This prevents double-processing of messages during reconnection or network hiccups. + +## All Environment Variables + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `WECOM_BOT_ID` | ✅ | — | WeCom AI Bot ID | +| `WECOM_SECRET` | ✅ | — | WeCom AI Bot Secret | +| `WECOM_ALLOWED_USERS` | — | _(empty)_ | Comma-separated user IDs for the gateway-level allowlist | +| `WECOM_HOME_CHANNEL` | — | — | Chat ID for cron/notification output | +| `WECOM_WEBSOCKET_URL` | — | `wss://openws.work.weixin.qq.com` | WebSocket gateway URL | +| `WECOM_DM_POLICY` | — | `open` | DM access policy | +| `WECOM_GROUP_POLICY` | — | `open` | Group access policy | ## Troubleshooting | Problem | Fix | |---------|-----| -| "WECOM_BOT_ID and WECOM_SECRET are required" | Set both env vars or configure in setup wizard | -| "invalid secret (errcode=40013)" | Verify the secret matches your bot's credentials | -| "Timed out waiting for subscribe acknowledgement" | Check network connectivity to `openws.work.weixin.qq.com` | -| Bot doesn't respond in groups | Check `group_policy` setting and group allowlist | +| `WECOM_BOT_ID and WECOM_SECRET are required` | Set both env vars or configure in setup wizard | +| `WeCom startup failed: aiohttp not installed` | Install aiohttp: `pip install aiohttp` | +| `WeCom startup failed: httpx not installed` | Install httpx: `pip install httpx` | +| `invalid secret (errcode=40013)` | Verify the secret matches your bot's credentials | +| `Timed out waiting for subscribe acknowledgement` | Check network connectivity to `openws.work.weixin.qq.com` | +| Bot doesn't respond in groups | Check `group_policy` setting and ensure the group ID is in `group_allow_from` | +| Bot ignores certain users in a group | Check per-group `allow_from` lists in the `groups` config section | +| Media decryption fails | Install `cryptography`: `pip install cryptography` | +| `cryptography is required for WeCom media decryption` | The inbound media is AES-encrypted. Install: `pip install cryptography` | +| Voice messages sent as files | WeCom only supports AMR format for native voice. Other formats are auto-downgraded to file. | +| `File too large` error | WeCom has a 20 MB absolute limit on all file uploads. Compress or split the file. | +| Images sent as files | Images > 10 MB exceed the native image limit and are auto-downgraded to file attachments. | +| `Timeout sending message to WeCom` | The WebSocket may have disconnected. Check logs for reconnection messages. | +| `WeCom websocket closed during authentication` | Network issue or incorrect credentials. Verify bot_id and secret. | diff --git a/website/docs/user-guide/security.md b/website/docs/user-guide/security.md index 4d51161e1..195583639 100644 --- a/website/docs/user-guide/security.md +++ b/website/docs/user-guide/security.md @@ -22,6 +22,61 @@ The security model has five layers: Before executing any command, Hermes checks it against a curated list of dangerous patterns. If a match is found, the user must explicitly approve it. +### Approval Modes + +The approval system supports three modes, configured via `approvals.mode` in `~/.hermes/config.yaml`: + +```yaml +approvals: + mode: manual # manual | smart | off + timeout: 60 # seconds to wait for user response (default: 60) +``` + +| Mode | Behavior | +|------|----------| +| **manual** (default) | Always prompt the user for approval on dangerous commands | +| **smart** | Use an auxiliary LLM to assess risk. Low-risk commands (e.g., `python -c "print('hello')"`) are auto-approved. Genuinely dangerous commands are auto-denied. Uncertain cases escalate to a manual prompt. | +| **off** | Disable all approval checks — equivalent to running with `--yolo`. All commands execute without prompts. | + +:::warning +Setting `approvals.mode: off` disables all safety prompts. Use only in trusted environments (CI/CD, containers, etc.). +::: + +### YOLO Mode + +YOLO mode bypasses **all** dangerous command approval prompts for the current session. It can be activated three ways: + +1. **CLI flag**: Start a session with `hermes --yolo` or `hermes chat --yolo` +2. **Slash command**: Type `/yolo` during a session to toggle it on/off +3. **Environment variable**: Set `HERMES_YOLO_MODE=1` + +The `/yolo` command is a **toggle** — each use flips the mode on or off: + +``` +> /yolo + ⚡ YOLO mode ON — all commands auto-approved. Use with caution. + +> /yolo + ⚠ YOLO mode OFF — dangerous commands will require approval. +``` + +YOLO mode is available in both CLI and gateway sessions. Internally, it sets the `HERMES_YOLO_MODE` environment variable which is checked before every command execution. + +:::danger +YOLO mode disables **all** dangerous command safety checks for the session. Use only when you fully trust the commands being generated (e.g., well-tested automation scripts in disposable environments). +::: + +### Approval Timeout + +When a dangerous command prompt appears, the user has a configurable amount of time to respond. If no response is given within the timeout, the command is **denied** by default (fail-closed). + +Configure the timeout in `~/.hermes/config.yaml`: + +```yaml +approvals: + timeout: 60 # seconds (default: 60) +``` + ### What Triggers Approval The following patterns trigger approval prompts (defined in `tools/approval.py`): @@ -30,21 +85,32 @@ The following patterns trigger approval prompts (defined in `tools/approval.py`) |---------|-------------| | `rm -r` / `rm --recursive` | Recursive delete | | `rm ... /` | Delete in root path | -| `chmod 777` | World-writable permissions | +| `chmod 777/666` / `o+w` / `a+w` | World/other-writable permissions | +| `chmod --recursive` with unsafe perms | Recursive world/other-writable (long flag) | +| `chown -R root` / `chown --recursive root` | Recursive chown to root | | `mkfs` | Format filesystem | | `dd if=` | Disk copy | +| `> /dev/sd` | Write to block device | | `DROP TABLE/DATABASE` | SQL DROP | | `DELETE FROM` (without WHERE) | SQL DELETE without WHERE | | `TRUNCATE TABLE` | SQL TRUNCATE | | `> /etc/` | Overwrite system config | | `systemctl stop/disable/mask` | Stop/disable system services | | `kill -9 -1` | Kill all processes | -| `curl ... \| sh` | Pipe remote content to shell | -| `bash -c`, `python -e` | Shell/script execution via flags | -| `find -exec rm`, `find -delete` | Find with destructive actions | +| `pkill -9` | Force kill processes | | Fork bomb patterns | Fork bombs | +| `bash -c` / `sh -c` / `zsh -c` / `ksh -c` | Shell command execution via `-c` flag (including combined flags like `-lc`) | +| `python -e` / `perl -e` / `ruby -e` / `node -c` | Script execution via `-e`/`-c` flag | +| `curl ... \| sh` / `wget ... \| sh` | Pipe remote content to shell | +| `bash <(curl ...)` / `sh <(wget ...)` | Execute remote script via process substitution | +| `tee` to `/etc/`, `~/.ssh/`, `~/.hermes/.env` | Overwrite sensitive file via tee | +| `>` / `>>` to `/etc/`, `~/.ssh/`, `~/.hermes/.env` | Overwrite sensitive file via redirection | +| `xargs rm` | xargs with rm | +| `find -exec rm` / `find -delete` | Find with destructive actions | +| `cp`/`mv`/`install` to `/etc/` | Copy/move file into system config | +| `sed -i` / `sed --in-place` on `/etc/` | In-place edit of system config | | `pkill`/`killall` hermes/gateway | Self-termination prevention | -| `gateway run` with `&`/`disown`/`nohup` | Prevents starting gateway outside service manager | +| `gateway run` with `&`/`disown`/`nohup`/`setsid` | Prevents starting gateway outside service manager | :::info **Container bypass**: When running in `docker`, `singularity`, `modal`, or `daytona` backends, dangerous command checks are **skipped** because the container itself is the security boundary. Destructive commands inside a container can't harm the host. diff --git a/website/docs/user-guide/skills/godmode.md b/website/docs/user-guide/skills/godmode.md index 419478ba1..c95dc54c8 100644 --- a/website/docs/user-guide/skills/godmode.md +++ b/website/docs/user-guide/skills/godmode.md @@ -1,4 +1,6 @@ --- +sidebar_position: 1 +sidebar_label: "G0DM0D3 (Godmode)" title: "G0DM0D3 — Godmode Jailbreaking" description: "Automated LLM jailbreaking using G0DM0D3 techniques — system prompt templates, input obfuscation, and multi-model racing" --- diff --git a/website/sidebars.ts b/website/sidebars.ts index 4c7bfc2e2..fa76f4ce3 100644 --- a/website/sidebars.ts +++ b/website/sidebars.ts @@ -16,61 +16,37 @@ const sidebars: SidebarsConfig = { }, { type: 'category', - label: 'Guides & Tutorials', - collapsed: true, - items: [ - 'guides/tips', - 'guides/daily-briefing-bot', - 'guides/team-telegram-assistant', - 'guides/python-library', - 'guides/use-mcp-with-hermes', - 'guides/use-soul-with-hermes', - 'guides/use-voice-mode-with-hermes', - 'guides/migrate-from-openclaw', - ], - }, - { - type: 'category', - label: 'User Guide', + label: 'Using Hermes', collapsed: true, items: [ 'user-guide/cli', 'user-guide/configuration', 'user-guide/sessions', - 'user-guide/security', - 'user-guide/docker', 'user-guide/profiles', + 'user-guide/git-worktrees', + 'user-guide/docker', + 'user-guide/security', + 'user-guide/checkpoints-and-rollback', + ], + }, + { + type: 'category', + label: 'Features', + collapsed: true, + items: [ + 'user-guide/features/overview', { type: 'category', - label: 'Messaging Gateway', - items: [ - 'user-guide/messaging/index', - 'user-guide/messaging/telegram', - 'user-guide/messaging/discord', - 'user-guide/messaging/slack', - 'user-guide/messaging/whatsapp', - 'user-guide/messaging/signal', - 'user-guide/messaging/email', - 'user-guide/messaging/homeassistant', - 'user-guide/messaging/mattermost', - 'user-guide/messaging/matrix', - 'user-guide/messaging/dingtalk', - 'user-guide/messaging/feishu', - 'user-guide/messaging/wecom', - 'user-guide/messaging/open-webui', - 'user-guide/messaging/webhooks', - ], - }, - { - type: 'category', - label: 'Core Features', + label: 'Core', items: [ 'user-guide/features/tools', 'user-guide/features/skills', 'user-guide/features/memory', 'user-guide/features/context-files', + 'user-guide/features/context-references', 'user-guide/features/personality', 'user-guide/features/skins', + 'user-guide/features/plugins', ], }, { @@ -81,11 +57,12 @@ const sidebars: SidebarsConfig = { 'user-guide/features/delegation', 'user-guide/features/code-execution', 'user-guide/features/hooks', + 'user-guide/features/batch-processing', ], }, { type: 'category', - label: 'Web & Media', + label: 'Media & Web', items: [ 'user-guide/features/voice-mode', 'user-guide/features/browser', @@ -94,23 +71,10 @@ const sidebars: SidebarsConfig = { 'user-guide/features/tts', ], }, - { - type: 'category', - label: 'Integrations', - items: [ - 'user-guide/features/api-server', - 'user-guide/features/acp', - 'user-guide/features/mcp', - 'user-guide/features/honcho', - 'user-guide/features/provider-routing', - 'user-guide/features/fallback-providers', - ], - }, { type: 'category', label: 'Advanced', items: [ - 'user-guide/features/batch-processing', 'user-guide/features/rl-training', ], }, @@ -125,25 +89,98 @@ const sidebars: SidebarsConfig = { }, { type: 'category', - label: 'Developer Guide', + label: 'Messaging Platforms', + collapsed: true, + items: [ + 'user-guide/messaging/index', + 'user-guide/messaging/telegram', + 'user-guide/messaging/discord', + 'user-guide/messaging/slack', + 'user-guide/messaging/whatsapp', + 'user-guide/messaging/signal', + 'user-guide/messaging/email', + 'user-guide/messaging/sms', + 'user-guide/messaging/homeassistant', + 'user-guide/messaging/mattermost', + 'user-guide/messaging/matrix', + 'user-guide/messaging/dingtalk', + 'user-guide/messaging/feishu', + 'user-guide/messaging/wecom', + 'user-guide/messaging/open-webui', + 'user-guide/messaging/webhooks', + ], + }, + { + type: 'category', + label: 'Integrations', + collapsed: true, + items: [ + 'integrations/index', + 'integrations/providers', + 'user-guide/features/mcp', + 'user-guide/features/acp', + 'user-guide/features/api-server', + 'user-guide/features/honcho', + 'user-guide/features/provider-routing', + 'user-guide/features/fallback-providers', + ], + }, + { + type: 'category', + label: 'Guides & Tutorials', + collapsed: true, + items: [ + 'guides/tips', + 'guides/build-a-hermes-plugin', + 'guides/daily-briefing-bot', + 'guides/team-telegram-assistant', + 'guides/python-library', + 'guides/use-mcp-with-hermes', + 'guides/use-soul-with-hermes', + 'guides/use-voice-mode-with-hermes', + 'guides/migrate-from-openclaw', + ], + }, + { + type: 'category', + label: 'Developer Guide', + collapsed: true, items: [ - 'developer-guide/architecture', - 'developer-guide/agent-loop', - 'developer-guide/provider-runtime', - 'developer-guide/adding-providers', - 'developer-guide/prompt-assembly', - 'developer-guide/context-compression-and-caching', - 'developer-guide/gateway-internals', - 'developer-guide/session-storage', - 'developer-guide/tools-runtime', - 'developer-guide/acp-internals', - 'developer-guide/trajectory-format', - 'developer-guide/cron-internals', - 'developer-guide/environments', - 'developer-guide/adding-tools', - 'developer-guide/creating-skills', - 'developer-guide/extending-the-cli', 'developer-guide/contributing', + { + type: 'category', + label: 'Architecture', + items: [ + 'developer-guide/architecture', + 'developer-guide/agent-loop', + 'developer-guide/prompt-assembly', + 'developer-guide/context-compression-and-caching', + 'developer-guide/gateway-internals', + 'developer-guide/session-storage', + 'developer-guide/provider-runtime', + ], + }, + { + type: 'category', + label: 'Extending', + items: [ + 'developer-guide/adding-tools', + 'developer-guide/adding-providers', + 'developer-guide/creating-skills', + 'developer-guide/extending-the-cli', + ], + }, + { + type: 'category', + label: 'Internals', + items: [ + 'developer-guide/tools-runtime', + 'developer-guide/acp-internals', + 'developer-guide/cron-internals', + 'developer-guide/environments', + 'developer-guide/trajectory-format', + ], + }, ], }, { @@ -152,13 +189,13 @@ const sidebars: SidebarsConfig = { items: [ 'reference/cli-commands', 'reference/slash-commands', + 'reference/profile-commands', + 'reference/environment-variables', 'reference/tools-reference', 'reference/toolsets-reference', 'reference/mcp-config-reference', 'reference/skills-catalog', 'reference/optional-skills-catalog', - 'reference/profile-commands', - 'reference/environment-variables', 'reference/faq', ], },