mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
Fix variable name breakage (run_agent, hermes_constants, etc.) where import rewriter changed 'import X' to 'import hermes_agent.Y' but test code still referenced 'X' as a variable name. Fix package-vs-module confusion (cli.auth, cli.models, cli.ui) where single files became directories. Fix hardcoded file paths in tests pointing to old locations. Fix tool registry to discover tools in subpackage directories. Fix stale import in hermes_agent/tools/__init__.py. Part of #14182, #14183
4274 lines
163 KiB
Python
4274 lines
163 KiB
Python
"""
|
||
Configuration management for Hermes Agent.
|
||
|
||
Config files are stored in ~/.hermes/ for easy access:
|
||
- ~/.hermes/config.yaml - All settings (model, toolsets, terminal, etc.)
|
||
- ~/.hermes/.env - API keys and secrets
|
||
|
||
This module provides:
|
||
- hermes config - Show current configuration
|
||
- hermes config edit - Open config in editor
|
||
- hermes config set - Set a specific value
|
||
- hermes config wizard - Re-run setup wizard
|
||
"""
|
||
|
||
import copy
|
||
import logging
|
||
import os
|
||
import platform
|
||
import re
|
||
import stat
|
||
import subprocess
|
||
import sys
|
||
import tempfile
|
||
from dataclasses import dataclass
|
||
from pathlib import Path
|
||
from typing import Dict, Any, Optional, List, Tuple, TypedDict, Union
|
||
|
||
logger = logging.getLogger(__name__)
|
||
|
||
_IS_WINDOWS = platform.system() == "Windows"
|
||
_ENV_VAR_NAME_RE = re.compile(r"^[A-Za-z_][A-Za-z0-9_]*$")
|
||
_LAST_EXPANDED_CONFIG_BY_PATH: Dict[str, Any] = {}
|
||
# Env var names written to .env that aren't in OPTIONAL_ENV_VARS
|
||
# (managed by setup/provider flows directly).
|
||
_EXTRA_ENV_KEYS = frozenset({
|
||
"OPENAI_API_KEY", "OPENAI_BASE_URL",
|
||
"ANTHROPIC_API_KEY", "ANTHROPIC_TOKEN",
|
||
"DISCORD_HOME_CHANNEL", "TELEGRAM_HOME_CHANNEL",
|
||
"SIGNAL_ACCOUNT", "SIGNAL_HTTP_URL",
|
||
"SIGNAL_ALLOWED_USERS", "SIGNAL_GROUP_ALLOWED_USERS",
|
||
"DINGTALK_CLIENT_ID", "DINGTALK_CLIENT_SECRET",
|
||
"FEISHU_APP_ID", "FEISHU_APP_SECRET", "FEISHU_ENCRYPT_KEY", "FEISHU_VERIFICATION_TOKEN",
|
||
"WECOM_BOT_ID", "WECOM_SECRET",
|
||
"WECOM_CALLBACK_CORP_ID", "WECOM_CALLBACK_CORP_SECRET", "WECOM_CALLBACK_AGENT_ID",
|
||
"WECOM_CALLBACK_TOKEN", "WECOM_CALLBACK_ENCODING_AES_KEY",
|
||
"WECOM_CALLBACK_HOST", "WECOM_CALLBACK_PORT",
|
||
"WEIXIN_ACCOUNT_ID", "WEIXIN_TOKEN", "WEIXIN_BASE_URL", "WEIXIN_CDN_BASE_URL",
|
||
"WEIXIN_HOME_CHANNEL", "WEIXIN_HOME_CHANNEL_NAME", "WEIXIN_DM_POLICY", "WEIXIN_GROUP_POLICY",
|
||
"WEIXIN_ALLOWED_USERS", "WEIXIN_GROUP_ALLOWED_USERS", "WEIXIN_ALLOW_ALL_USERS",
|
||
"BLUEBUBBLES_SERVER_URL", "BLUEBUBBLES_PASSWORD",
|
||
"QQ_APP_ID", "QQ_CLIENT_SECRET", "QQBOT_HOME_CHANNEL", "QQBOT_HOME_CHANNEL_NAME",
|
||
"QQ_HOME_CHANNEL", "QQ_HOME_CHANNEL_NAME", # legacy aliases (pre-rename, still read for back-compat)
|
||
"QQ_ALLOWED_USERS", "QQ_GROUP_ALLOWED_USERS", "QQ_ALLOW_ALL_USERS", "QQ_MARKDOWN_SUPPORT",
|
||
"QQ_STT_API_KEY", "QQ_STT_BASE_URL", "QQ_STT_MODEL",
|
||
"TERMINAL_ENV", "TERMINAL_SSH_KEY", "TERMINAL_SSH_PORT",
|
||
"WHATSAPP_MODE", "WHATSAPP_ENABLED",
|
||
"MATTERMOST_HOME_CHANNEL", "MATTERMOST_REPLY_MODE",
|
||
"MATRIX_PASSWORD", "MATRIX_ENCRYPTION", "MATRIX_DEVICE_ID", "MATRIX_HOME_ROOM",
|
||
"MATRIX_REQUIRE_MENTION", "MATRIX_FREE_RESPONSE_ROOMS", "MATRIX_AUTO_THREAD",
|
||
"MATRIX_RECOVERY_KEY",
|
||
})
|
||
import yaml
|
||
|
||
from hermes_agent.cli.ui.colors import Colors, color
|
||
from hermes_agent.cli.default_soul import DEFAULT_SOUL_MD
|
||
|
||
|
||
# =============================================================================
|
||
# Managed mode (NixOS declarative config)
|
||
# =============================================================================
|
||
|
||
_MANAGED_TRUE_VALUES = ("true", "1", "yes")
|
||
_MANAGED_SYSTEM_NAMES = {
|
||
"brew": "Homebrew",
|
||
"homebrew": "Homebrew",
|
||
"nix": "NixOS",
|
||
"nixos": "NixOS",
|
||
}
|
||
|
||
|
||
def get_managed_system() -> Optional[str]:
|
||
"""Return the package manager owning this install, if any."""
|
||
raw = os.getenv("HERMES_MANAGED", "").strip()
|
||
if raw:
|
||
normalized = raw.lower()
|
||
if normalized in _MANAGED_TRUE_VALUES:
|
||
return "NixOS"
|
||
return _MANAGED_SYSTEM_NAMES.get(normalized, raw)
|
||
|
||
managed_marker = get_hermes_home() / ".managed"
|
||
if managed_marker.exists():
|
||
return "NixOS"
|
||
return None
|
||
|
||
|
||
def is_managed() -> bool:
|
||
"""Check if Hermes is running in package-manager-managed mode.
|
||
|
||
Two signals: the HERMES_MANAGED env var (set by the systemd service),
|
||
or a .managed marker file in HERMES_HOME (set by the NixOS activation
|
||
script, so interactive shells also see it).
|
||
"""
|
||
return get_managed_system() is not None
|
||
|
||
|
||
def get_managed_update_command() -> Optional[str]:
|
||
"""Return the preferred upgrade command for a managed install."""
|
||
managed_system = get_managed_system()
|
||
if managed_system == "Homebrew":
|
||
return "brew upgrade hermes-agent"
|
||
if managed_system == "NixOS":
|
||
return "sudo nixos-rebuild switch"
|
||
return None
|
||
|
||
|
||
def recommended_update_command() -> str:
|
||
"""Return the best update command for the current installation."""
|
||
return get_managed_update_command() or "hermes update"
|
||
|
||
|
||
def format_managed_message(action: str = "modify this Hermes installation") -> str:
|
||
"""Build a user-facing error for managed installs."""
|
||
managed_system = get_managed_system() or "a package manager"
|
||
raw = os.getenv("HERMES_MANAGED", "").strip().lower()
|
||
|
||
if managed_system == "NixOS":
|
||
env_hint = "true" if raw in _MANAGED_TRUE_VALUES else raw or "true"
|
||
return (
|
||
f"Cannot {action}: this Hermes installation is managed by NixOS "
|
||
f"(HERMES_MANAGED={env_hint}).\n"
|
||
"Edit services.hermes-agent.settings in your configuration.nix and run:\n"
|
||
" sudo nixos-rebuild switch"
|
||
)
|
||
|
||
if managed_system == "Homebrew":
|
||
env_hint = raw or "homebrew"
|
||
return (
|
||
f"Cannot {action}: this Hermes installation is managed by Homebrew "
|
||
f"(HERMES_MANAGED={env_hint}).\n"
|
||
"Use:\n"
|
||
" brew upgrade hermes-agent"
|
||
)
|
||
|
||
return (
|
||
f"Cannot {action}: this Hermes installation is managed by {managed_system}.\n"
|
||
"Use your package manager to upgrade or reinstall Hermes."
|
||
)
|
||
|
||
def managed_error(action: str = "modify configuration"):
|
||
"""Print user-friendly error for managed mode."""
|
||
print(format_managed_message(action), file=sys.stderr)
|
||
|
||
|
||
# =============================================================================
|
||
# Container-aware CLI (NixOS container mode)
|
||
# =============================================================================
|
||
|
||
def get_container_exec_info() -> Optional[dict]:
|
||
"""Read container mode metadata from HERMES_HOME/.container-mode.
|
||
|
||
Returns a dict with keys: backend, container_name, exec_user, hermes_bin
|
||
or None if container mode is not active, we're already inside the
|
||
container, or HERMES_DEV=1 is set.
|
||
|
||
The .container-mode file is written by the NixOS activation script when
|
||
container.enable = true. It tells the host CLI to exec into the container
|
||
instead of running locally.
|
||
"""
|
||
if os.environ.get("HERMES_DEV") == "1":
|
||
return None
|
||
|
||
from hermes_agent.constants import is_container
|
||
if is_container():
|
||
return None
|
||
|
||
container_mode_file = get_hermes_home() / ".container-mode"
|
||
|
||
try:
|
||
info = {}
|
||
with open(container_mode_file, "r") as f:
|
||
for line in f:
|
||
line = line.strip()
|
||
if "=" in line and not line.startswith("#"):
|
||
key, _, value = line.partition("=")
|
||
info[key.strip()] = value.strip()
|
||
except FileNotFoundError:
|
||
return None
|
||
# All other exceptions (PermissionError, malformed data, etc.) propagate
|
||
|
||
backend = info.get("backend", "docker")
|
||
container_name = info.get("container_name", "hermes-agent")
|
||
exec_user = info.get("exec_user", "hermes")
|
||
hermes_bin = info.get("hermes_bin", "/data/current-package/bin/hermes")
|
||
|
||
return {
|
||
"backend": backend,
|
||
"container_name": container_name,
|
||
"exec_user": exec_user,
|
||
"hermes_bin": hermes_bin,
|
||
}
|
||
|
||
|
||
# =============================================================================
|
||
# Config paths
|
||
# =============================================================================
|
||
|
||
# Re-export from hermes_constants — canonical definition lives there.
|
||
from hermes_agent.constants import get_hermes_home # noqa: F811,E402
|
||
|
||
def get_config_path() -> Path:
|
||
"""Get the main config file path."""
|
||
return get_hermes_home() / "config.yaml"
|
||
|
||
def get_env_path() -> Path:
|
||
"""Get the .env file path (for API keys)."""
|
||
return get_hermes_home() / ".env"
|
||
|
||
def get_project_root() -> Path:
|
||
"""Get the project installation directory."""
|
||
return Path(__file__).resolve().parents[2].resolve()
|
||
|
||
def _secure_dir(path):
|
||
"""Set directory to owner-only access (0700 by default). No-op on Windows.
|
||
|
||
Skipped in managed mode — the NixOS module sets group-readable
|
||
permissions (0750) so interactive users in the hermes group can
|
||
share state with the gateway service.
|
||
|
||
The mode can be overridden via the HERMES_HOME_MODE environment variable
|
||
(e.g. HERMES_HOME_MODE=0701) for deployments where a web server (nginx,
|
||
caddy, etc.) needs to traverse HERMES_HOME to reach a served subdirectory.
|
||
The execute-only bit on a directory permits cd-through without exposing
|
||
directory listings.
|
||
"""
|
||
if is_managed():
|
||
return
|
||
try:
|
||
mode_str = os.environ.get("HERMES_HOME_MODE", "").strip()
|
||
mode = int(mode_str, 8) if mode_str else 0o700
|
||
except ValueError:
|
||
mode = 0o700
|
||
try:
|
||
os.chmod(path, mode)
|
||
except (OSError, NotImplementedError):
|
||
pass
|
||
|
||
|
||
def _is_container() -> bool:
|
||
"""Detect if we're running inside a Docker/Podman/LXC container.
|
||
|
||
When Hermes runs in a container with volume-mounted config files, forcing
|
||
0o600 permissions breaks multi-process setups where the gateway and
|
||
dashboard run as different UIDs or the volume mount requires broader
|
||
permissions.
|
||
"""
|
||
# Explicit opt-out
|
||
if os.environ.get("HERMES_CONTAINER") or os.environ.get("HERMES_SKIP_CHMOD"):
|
||
return True
|
||
# Docker / Podman marker file
|
||
if os.path.exists("/.dockerenv"):
|
||
return True
|
||
# LXC / cgroup-based detection
|
||
try:
|
||
with open("/proc/1/cgroup", "r") as f:
|
||
cgroup_content = f.read()
|
||
if "docker" in cgroup_content or "lxc" in cgroup_content or "kubepods" in cgroup_content:
|
||
return True
|
||
except (OSError, IOError):
|
||
pass
|
||
return False
|
||
|
||
|
||
def _secure_file(path):
|
||
"""Set file to owner-only read/write (0600). No-op on Windows.
|
||
|
||
Skipped in managed mode — the NixOS activation script sets
|
||
group-readable permissions (0640) on config files.
|
||
|
||
Skipped in containers — Docker/Podman volume mounts often need broader
|
||
permissions. Set HERMES_SKIP_CHMOD=1 to force-skip on other systems.
|
||
"""
|
||
if is_managed() or _is_container():
|
||
return
|
||
try:
|
||
if os.path.exists(str(path)):
|
||
os.chmod(path, 0o600)
|
||
except (OSError, NotImplementedError):
|
||
pass
|
||
|
||
|
||
def _ensure_default_soul_md(home: Path) -> None:
|
||
"""Seed a default SOUL.md into HERMES_HOME if the user doesn't have one yet."""
|
||
soul_path = home / "SOUL.md"
|
||
if soul_path.exists():
|
||
return
|
||
soul_path.write_text(DEFAULT_SOUL_MD, encoding="utf-8")
|
||
_secure_file(soul_path)
|
||
|
||
|
||
def ensure_hermes_home():
|
||
"""Ensure ~/.hermes directory structure exists with secure permissions.
|
||
|
||
In managed mode (NixOS), dirs are created by the activation script with
|
||
setgid + group-writable (2770). We skip mkdir and set umask(0o007) so
|
||
any files created (e.g. SOUL.md) are group-writable (0660).
|
||
"""
|
||
home = get_hermes_home()
|
||
if is_managed():
|
||
old_umask = os.umask(0o007)
|
||
try:
|
||
_ensure_hermes_home_managed(home)
|
||
finally:
|
||
os.umask(old_umask)
|
||
else:
|
||
home.mkdir(parents=True, exist_ok=True)
|
||
_secure_dir(home)
|
||
for subdir in ("cron", "sessions", "logs", "memories"):
|
||
d = home / subdir
|
||
d.mkdir(parents=True, exist_ok=True)
|
||
_secure_dir(d)
|
||
_ensure_default_soul_md(home)
|
||
|
||
|
||
def _ensure_hermes_home_managed(home: Path):
|
||
"""Managed-mode variant: verify dirs exist (activation creates them), seed SOUL.md."""
|
||
if not home.is_dir():
|
||
raise RuntimeError(
|
||
f"HERMES_HOME {home} does not exist. "
|
||
"Run 'sudo nixos-rebuild switch' first."
|
||
)
|
||
for subdir in ("cron", "sessions", "logs", "memories"):
|
||
d = home / subdir
|
||
if not d.is_dir():
|
||
raise RuntimeError(
|
||
f"{d} does not exist. "
|
||
"Run 'sudo nixos-rebuild switch' first."
|
||
)
|
||
# Inside umask(0o007) scope — SOUL.md will be created as 0660
|
||
_ensure_default_soul_md(home)
|
||
|
||
|
||
# =============================================================================
|
||
# Config loading/saving
|
||
# =============================================================================
|
||
|
||
class _AgentConfig(TypedDict):
|
||
max_turns: int
|
||
gateway_timeout: int
|
||
restart_drain_timeout: int
|
||
service_tier: str
|
||
tool_use_enforcement: str
|
||
gateway_timeout_warning: int
|
||
gateway_notify_interval: int
|
||
|
||
class _TerminalConfig(TypedDict):
|
||
backend: str
|
||
modal_mode: str
|
||
cwd: str
|
||
timeout: int
|
||
env_passthrough: List[str]
|
||
docker_image: str
|
||
docker_forward_env: List[str]
|
||
docker_env: Dict[str, str]
|
||
singularity_image: str
|
||
modal_image: str
|
||
daytona_image: str
|
||
container_cpu: int
|
||
container_memory: int
|
||
container_disk: int
|
||
container_persistent: bool
|
||
docker_volumes: List[str]
|
||
docker_mount_cwd_to_workspace: bool
|
||
persistent_shell: bool
|
||
|
||
|
||
class _CamofoxConfig(TypedDict, total=False):
|
||
managed_persistence: bool
|
||
|
||
|
||
class _BrowserConfig(TypedDict):
|
||
inactivity_timeout: int
|
||
command_timeout: int
|
||
record_sessions: bool
|
||
allow_private_urls: bool
|
||
cdp_url: str
|
||
camofox: _CamofoxConfig
|
||
|
||
|
||
class _CheckpointsConfig(TypedDict):
|
||
enabled: bool
|
||
max_snapshots: int
|
||
|
||
|
||
class _CompressionConfig(TypedDict):
|
||
enabled: bool
|
||
threshold: float
|
||
target_ratio: float
|
||
protect_last_n: int
|
||
|
||
|
||
class _BedrockDiscoveryConfig(TypedDict):
|
||
enabled: bool
|
||
provider_filter: List[str]
|
||
refresh_interval: int
|
||
|
||
|
||
class _BedrockGuardrailConfig(TypedDict):
|
||
guardrail_identifier: str
|
||
guardrail_version: str
|
||
stream_processing_mode: str
|
||
trace: str
|
||
|
||
|
||
class _BedrockConfig(TypedDict):
|
||
region: str
|
||
discovery: _BedrockDiscoveryConfig
|
||
guardrail: _BedrockGuardrailConfig
|
||
|
||
|
||
class _AuxiliaryTaskConfig(TypedDict, total=False):
|
||
provider: str
|
||
model: str
|
||
base_url: str
|
||
api_key: str
|
||
timeout: int
|
||
extra_body: Dict[str, Any]
|
||
max_concurrency: int
|
||
download_timeout: int
|
||
|
||
|
||
class _AuxiliaryConfig(TypedDict):
|
||
vision: _AuxiliaryTaskConfig
|
||
web_extract: _AuxiliaryTaskConfig
|
||
compression: _AuxiliaryTaskConfig
|
||
session_search: _AuxiliaryTaskConfig
|
||
skills_hub: _AuxiliaryTaskConfig
|
||
approval: _AuxiliaryTaskConfig
|
||
mcp: _AuxiliaryTaskConfig
|
||
flush_memories: _AuxiliaryTaskConfig
|
||
title_generation: _AuxiliaryTaskConfig
|
||
|
||
|
||
class _UserMessagePreviewConfig(TypedDict):
|
||
first_lines: int
|
||
last_lines: int
|
||
|
||
|
||
class _DisplayConfig(TypedDict):
|
||
compact: bool
|
||
personality: str
|
||
resume_display: str
|
||
busy_input_mode: str
|
||
bell_on_complete: bool
|
||
show_reasoning: bool
|
||
streaming: bool
|
||
final_response_markdown: str
|
||
inline_diffs: bool
|
||
show_cost: bool
|
||
skin: str
|
||
user_message_preview: _UserMessagePreviewConfig
|
||
interim_assistant_messages: bool
|
||
tool_progress_command: bool
|
||
tool_progress_overrides: Dict[str, Any]
|
||
tool_preview_length: int
|
||
platforms: Dict[str, Any]
|
||
|
||
|
||
class _DashboardConfig(TypedDict):
|
||
theme: str
|
||
|
||
|
||
class _PrivacyConfig(TypedDict):
|
||
redact_pii: bool
|
||
|
||
|
||
class _EdgeTtsConfig(TypedDict):
|
||
voice: str
|
||
|
||
|
||
class _ElevenlabsTtsConfig(TypedDict):
|
||
voice_id: str
|
||
model_id: str
|
||
|
||
|
||
class _OpenaiTtsConfig(TypedDict):
|
||
model: str
|
||
voice: str
|
||
|
||
|
||
class _XaiTtsConfig(TypedDict):
|
||
voice_id: str
|
||
language: str
|
||
sample_rate: int
|
||
bit_rate: int
|
||
|
||
|
||
class _MistralTtsConfig(TypedDict):
|
||
model: str
|
||
voice_id: str
|
||
|
||
|
||
class _NeuttsConfig(TypedDict):
|
||
ref_audio: str
|
||
ref_text: str
|
||
model: str
|
||
device: str
|
||
|
||
|
||
class _TtsConfig(TypedDict):
|
||
provider: str
|
||
edge: _EdgeTtsConfig
|
||
elevenlabs: _ElevenlabsTtsConfig
|
||
openai: _OpenaiTtsConfig
|
||
xai: _XaiTtsConfig
|
||
mistral: _MistralTtsConfig
|
||
neutts: _NeuttsConfig
|
||
|
||
|
||
class _LocalSttConfig(TypedDict):
|
||
model: str
|
||
language: str
|
||
|
||
|
||
class _OpenaiSttConfig(TypedDict):
|
||
model: str
|
||
|
||
|
||
class _MistralSttConfig(TypedDict):
|
||
model: str
|
||
|
||
|
||
class _SttConfig(TypedDict):
|
||
enabled: bool
|
||
provider: str
|
||
local: _LocalSttConfig
|
||
openai: _OpenaiSttConfig
|
||
mistral: _MistralSttConfig
|
||
|
||
|
||
class _VoiceConfig(TypedDict):
|
||
record_key: str
|
||
max_recording_seconds: int
|
||
auto_tts: bool
|
||
silence_threshold: int
|
||
silence_duration: float
|
||
|
||
|
||
class _HumanDelayConfig(TypedDict):
|
||
mode: str
|
||
min_ms: int
|
||
max_ms: int
|
||
|
||
|
||
class _ContextConfig(TypedDict):
|
||
engine: str
|
||
|
||
|
||
class _MemoryConfig(TypedDict):
|
||
memory_enabled: bool
|
||
user_profile_enabled: bool
|
||
memory_char_limit: int
|
||
user_char_limit: int
|
||
provider: str
|
||
|
||
|
||
class _DelegationConfig(TypedDict):
|
||
model: str
|
||
provider: str
|
||
base_url: str
|
||
api_key: str
|
||
max_iterations: int
|
||
reasoning_effort: str
|
||
|
||
|
||
class _SkillsConfig(TypedDict):
|
||
external_dirs: List[str]
|
||
|
||
|
||
class _ChannelPromptsConfig(TypedDict):
|
||
channel_prompts: Dict[str, str]
|
||
|
||
|
||
class _DiscordConfig(TypedDict):
|
||
require_mention: bool
|
||
free_response_channels: str
|
||
allowed_channels: str
|
||
auto_thread: bool
|
||
reactions: bool
|
||
channel_prompts: Dict[str, str]
|
||
server_actions: str
|
||
|
||
|
||
class _ApprovalsConfig(TypedDict):
|
||
mode: str
|
||
timeout: int
|
||
cron_mode: str
|
||
|
||
|
||
class _WebsiteBlocklistConfig(TypedDict):
|
||
enabled: bool
|
||
domains: List[str]
|
||
shared_files: List[str]
|
||
|
||
|
||
class _SecurityConfig(TypedDict):
|
||
redact_secrets: bool
|
||
tirith_enabled: bool
|
||
tirith_path: str
|
||
tirith_timeout: int
|
||
tirith_fail_open: bool
|
||
website_blocklist: _WebsiteBlocklistConfig
|
||
|
||
|
||
class _CronConfig(TypedDict):
|
||
wrap_response: bool
|
||
max_parallel_jobs: Optional[int]
|
||
|
||
|
||
class _CodeExecutionConfig(TypedDict):
|
||
mode: str
|
||
|
||
|
||
class _LoggingConfig(TypedDict):
|
||
level: str
|
||
max_size_mb: int
|
||
backup_count: int
|
||
|
||
|
||
class _NetworkConfig(TypedDict):
|
||
force_ipv4: bool
|
||
|
||
|
||
class _DefaultConfig(TypedDict):
|
||
model: str
|
||
providers: Dict[str, Any]
|
||
fallback_providers: List[Any]
|
||
credential_pool_strategies: Dict[str, Any]
|
||
toolsets: List[str]
|
||
agent: _AgentConfig
|
||
terminal: _TerminalConfig
|
||
browser: _BrowserConfig
|
||
checkpoints: _CheckpointsConfig
|
||
file_read_max_chars: int
|
||
compression: _CompressionConfig
|
||
bedrock: _BedrockConfig
|
||
auxiliary: _AuxiliaryConfig
|
||
display: _DisplayConfig
|
||
dashboard: _DashboardConfig
|
||
privacy: _PrivacyConfig
|
||
tts: _TtsConfig
|
||
stt: _SttConfig
|
||
voice: _VoiceConfig
|
||
human_delay: _HumanDelayConfig
|
||
context: _ContextConfig
|
||
memory: _MemoryConfig
|
||
delegation: _DelegationConfig
|
||
prefill_messages_file: str
|
||
skills: _SkillsConfig
|
||
honcho: Dict[str, Any]
|
||
timezone: str
|
||
discord: _DiscordConfig
|
||
whatsapp: Dict[str, Any]
|
||
telegram: _ChannelPromptsConfig
|
||
slack: _ChannelPromptsConfig
|
||
mattermost: _ChannelPromptsConfig
|
||
approvals: _ApprovalsConfig
|
||
command_allowlist: List[str]
|
||
quick_commands: Dict[str, Any]
|
||
hooks: Dict[str, Any]
|
||
hooks_auto_accept: bool
|
||
personalities: Dict[str, Any]
|
||
security: _SecurityConfig
|
||
cron: _CronConfig
|
||
code_execution: _CodeExecutionConfig
|
||
logging: _LoggingConfig
|
||
network: _NetworkConfig
|
||
_config_version: int
|
||
|
||
|
||
class _EnvVarRequired(TypedDict):
|
||
description: str
|
||
prompt: str
|
||
category: str
|
||
|
||
|
||
class _EnvVarOptional(TypedDict, total=False):
|
||
url: Optional[str]
|
||
password: bool
|
||
tools: List[str]
|
||
advanced: bool
|
||
|
||
|
||
class _EnvVarInfo(_EnvVarRequired, _EnvVarOptional):
|
||
pass
|
||
|
||
|
||
DEFAULT_CONFIG: _DefaultConfig = {
|
||
"model": "",
|
||
"providers": {},
|
||
"fallback_providers": [],
|
||
"credential_pool_strategies": {},
|
||
"hermes_agent.tools.toolsets": ["hermes-cli"],
|
||
"agent": {
|
||
"max_turns": 90,
|
||
# Inactivity timeout for gateway agent execution (seconds).
|
||
# The agent can run indefinitely as long as it's actively calling
|
||
# tools or receiving API responses. Only fires when the agent has
|
||
# been completely idle for this duration. 0 = unlimited.
|
||
"gateway_timeout": 1800,
|
||
# Graceful drain timeout for gateway stop/restart (seconds).
|
||
# The gateway stops accepting new work, waits for running agents
|
||
# to finish, then interrupts any remaining runs after the timeout.
|
||
# 0 = no drain, interrupt immediately.
|
||
"restart_drain_timeout": 60,
|
||
"service_tier": "",
|
||
# Tool-use enforcement: injects system prompt guidance that tells the
|
||
# model to actually call tools instead of describing intended actions.
|
||
# Values: "auto" (default — applies to gpt/codex models), true/false
|
||
# (force on/off for all models), or a list of model-name substrings
|
||
# to match (e.g. ["gpt", "codex", "gemini", "qwen"]).
|
||
"tool_use_enforcement": "auto",
|
||
# Staged inactivity warning: send a warning to the user at this
|
||
# threshold before escalating to a full timeout. The warning fires
|
||
# once per run and does not interrupt the agent. 0 = disable warning.
|
||
"gateway_timeout_warning": 900,
|
||
# Periodic "still working" notification interval (seconds).
|
||
# Sends a status message every N seconds so the user knows the
|
||
# agent hasn't died during long tasks. 0 = disable notifications.
|
||
"gateway_notify_interval": 600,
|
||
},
|
||
|
||
"terminal": {
|
||
"backend": "local",
|
||
"modal_mode": "auto",
|
||
"cwd": ".", # Use current directory
|
||
"timeout": 180,
|
||
# Environment variables to pass through to sandboxed execution
|
||
# (terminal and execute_code). Skill-declared required_environment_variables
|
||
# are passed through automatically; this list is for non-skill use cases.
|
||
"env_passthrough": [],
|
||
# Extra files to source in the login shell when building the
|
||
# per-session environment snapshot. Use this when tools like nvm,
|
||
# pyenv, asdf, or custom PATH entries are registered by files that
|
||
# a bash login shell would skip — most commonly ``~/.bashrc``
|
||
# (bash doesn't source bashrc in non-interactive login mode) or
|
||
# zsh-specific files like ``~/.zshrc`` / ``~/.zprofile``.
|
||
# Paths support ``~`` / ``${VAR}``. Missing files are silently
|
||
# skipped. When empty, Hermes auto-appends ``~/.bashrc`` if the
|
||
# snapshot shell is bash (this is the ``auto_source_bashrc``
|
||
# behaviour — disable with that key if you want strict login-only
|
||
# semantics).
|
||
"shell_init_files": [],
|
||
# When true (default), Hermes sources ``~/.bashrc`` in the login
|
||
# shell used to build the environment snapshot. This captures
|
||
# PATH additions, shell functions, and aliases defined in the
|
||
# user's bashrc — which a plain ``bash -l -c`` would otherwise
|
||
# miss because bash skips bashrc in non-interactive login mode.
|
||
# Turn this off if you have a bashrc that misbehaves when sourced
|
||
# non-interactively (e.g. one that hard-exits on TTY checks).
|
||
"auto_source_bashrc": True,
|
||
"docker_image": "nikolaik/python-nodejs:python3.11-nodejs20",
|
||
"docker_forward_env": [],
|
||
# Explicit environment variables to set inside Docker containers.
|
||
# Unlike docker_forward_env (which reads values from the host process),
|
||
# docker_env lets you specify exact key-value pairs — useful when Hermes
|
||
# runs as a systemd service without access to the user's shell environment.
|
||
# Example: {"SSH_AUTH_SOCK": "/run/user/1000/ssh-agent.sock"}
|
||
"docker_env": {},
|
||
"singularity_image": "docker://nikolaik/python-nodejs:python3.11-nodejs20",
|
||
"modal_image": "nikolaik/python-nodejs:python3.11-nodejs20",
|
||
"daytona_image": "nikolaik/python-nodejs:python3.11-nodejs20",
|
||
# Container resource limits (docker, singularity, modal, daytona — ignored for local/ssh)
|
||
"container_cpu": 1,
|
||
"container_memory": 5120, # MB (default 5GB)
|
||
"container_disk": 51200, # MB (default 50GB)
|
||
"container_persistent": True, # Persist filesystem across sessions
|
||
# Docker volume mounts — share host directories with the container.
|
||
# Each entry is "host_path:container_path" (standard Docker -v syntax).
|
||
# Example:
|
||
# ["/home/user/projects:/workspace/projects",
|
||
# "/home/user/.hermes/cache/documents:/output"]
|
||
# For gateway MEDIA delivery, write inside Docker to /output/... and emit
|
||
# the host-visible path in MEDIA:, not the container path.
|
||
"docker_volumes": [],
|
||
# Explicit opt-in: mount the host cwd into /workspace for Docker sessions.
|
||
# Default off because passing host directories into a sandbox weakens isolation.
|
||
"docker_mount_cwd_to_workspace": False,
|
||
# Persistent shell — keep a long-lived bash shell across execute() calls
|
||
# so cwd/env vars/shell variables survive between commands.
|
||
# Enabled by default for non-local backends (SSH); local is always opt-in
|
||
# via TERMINAL_LOCAL_PERSISTENT env var.
|
||
"persistent_shell": True,
|
||
},
|
||
|
||
"browser": {
|
||
"inactivity_timeout": 120,
|
||
"command_timeout": 30, # Timeout for browser commands in seconds (screenshot, navigate, etc.)
|
||
"record_sessions": False, # Auto-record browser sessions as WebM videos
|
||
"allow_private_urls": False, # Allow navigating to private/internal IPs (localhost, 192.168.x.x, etc.)
|
||
"cdp_url": "", # Optional persistent CDP endpoint for attaching to an existing Chromium/Chrome
|
||
"camofox": {
|
||
# When true, Hermes sends a stable profile-scoped userId to Camofox
|
||
# so the server maps it to a persistent Firefox profile automatically.
|
||
# When false (default), each session gets a random userId (ephemeral).
|
||
"managed_persistence": False,
|
||
},
|
||
},
|
||
|
||
# Filesystem checkpoints — automatic snapshots before destructive file ops.
|
||
# When enabled, the agent takes a snapshot of the working directory once per
|
||
# conversation turn (on first write_file/patch call). Use /rollback to restore.
|
||
"checkpoints": {
|
||
"enabled": True,
|
||
"max_snapshots": 50, # Max checkpoints to keep per directory
|
||
},
|
||
|
||
# Maximum characters returned by a single read_file call. Reads that
|
||
# exceed this are rejected with guidance to use offset+limit.
|
||
# 100K chars ≈ 25–35K tokens across typical tokenisers.
|
||
"file_read_max_chars": 100_000,
|
||
|
||
"compression": {
|
||
"enabled": True,
|
||
"threshold": 0.50, # compress when context usage exceeds this ratio
|
||
"target_ratio": 0.20, # fraction of threshold to preserve as recent tail
|
||
"protect_last_n": 20, # minimum recent messages to keep uncompressed
|
||
|
||
},
|
||
|
||
# AWS Bedrock provider configuration.
|
||
# Only used when model.provider is "bedrock".
|
||
"bedrock": {
|
||
"region": "", # AWS region for Bedrock API calls (empty = AWS_REGION env var → us-east-1)
|
||
"discovery": {
|
||
"enabled": True, # Auto-discover models via ListFoundationModels
|
||
"provider_filter": [], # Only show models from these providers (e.g. ["anthropic", "amazon"])
|
||
"refresh_interval": 3600, # Cache discovery results for this many seconds
|
||
},
|
||
"guardrail": {
|
||
# Amazon Bedrock Guardrails — content filtering and safety policies.
|
||
# Create a guardrail in the Bedrock console, then set the ID and version here.
|
||
# See: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
|
||
"guardrail_identifier": "", # e.g. "abc123def456"
|
||
"guardrail_version": "", # e.g. "1" or "DRAFT"
|
||
"stream_processing_mode": "async", # "sync" or "async"
|
||
"trace": "disabled", # "enabled", "disabled", or "enabled_full"
|
||
},
|
||
},
|
||
|
||
# Auxiliary model config — provider:model for each side task.
|
||
# Format: provider is the provider name, model is the model slug.
|
||
# "auto" for provider = auto-detect best available provider.
|
||
# Empty model = use provider's default auxiliary model.
|
||
# All tasks fall back to openrouter:google/gemini-3-flash-preview if
|
||
# the configured provider is unavailable.
|
||
"auxiliary": {
|
||
"vision": {
|
||
"provider": "auto", # auto | openrouter | nous | codex | custom
|
||
"model": "", # e.g. "google/gemini-2.5-flash", "gpt-4o"
|
||
"base_url": "", # direct OpenAI-compatible endpoint (takes precedence over provider)
|
||
"api_key": "", # API key for base_url (falls back to OPENAI_API_KEY)
|
||
"timeout": 120, # seconds — LLM API call timeout; vision payloads need generous timeout
|
||
"extra_body": {}, # OpenAI-compatible provider-specific request fields
|
||
"download_timeout": 30, # seconds — image HTTP download timeout; increase for slow connections
|
||
},
|
||
"web_extract": {
|
||
"provider": "auto",
|
||
"model": "",
|
||
"base_url": "",
|
||
"api_key": "",
|
||
"timeout": 360, # seconds (6min) — per-attempt LLM summarization timeout; increase for slow local models
|
||
"extra_body": {},
|
||
},
|
||
"compression": {
|
||
"provider": "auto",
|
||
"model": "",
|
||
"base_url": "",
|
||
"api_key": "",
|
||
"timeout": 120, # seconds — compression summarises large contexts; increase for local models
|
||
"extra_body": {},
|
||
},
|
||
"session_search": {
|
||
"provider": "auto",
|
||
"model": "",
|
||
"base_url": "",
|
||
"api_key": "",
|
||
"timeout": 30,
|
||
"extra_body": {},
|
||
"max_concurrency": 3, # Clamp parallel summaries to avoid request-burst 429s on small providers
|
||
},
|
||
"skills_hub": {
|
||
"provider": "auto",
|
||
"model": "",
|
||
"base_url": "",
|
||
"api_key": "",
|
||
"timeout": 30,
|
||
"extra_body": {},
|
||
},
|
||
"approval": {
|
||
"provider": "auto",
|
||
"model": "", # fast/cheap model recommended (e.g. gemini-flash, haiku)
|
||
"base_url": "",
|
||
"api_key": "",
|
||
"timeout": 30,
|
||
"extra_body": {},
|
||
},
|
||
"mcp": {
|
||
"provider": "auto",
|
||
"model": "",
|
||
"base_url": "",
|
||
"api_key": "",
|
||
"timeout": 30,
|
||
"extra_body": {},
|
||
},
|
||
"flush_memories": {
|
||
"provider": "auto",
|
||
"model": "",
|
||
"base_url": "",
|
||
"api_key": "",
|
||
"timeout": 30,
|
||
"extra_body": {},
|
||
},
|
||
"title_generation": {
|
||
"provider": "auto",
|
||
"model": "",
|
||
"base_url": "",
|
||
"api_key": "",
|
||
"timeout": 30,
|
||
"extra_body": {},
|
||
},
|
||
},
|
||
|
||
"display": {
|
||
"compact": False,
|
||
"personality": "kawaii",
|
||
"resume_display": "full",
|
||
"busy_input_mode": "interrupt",
|
||
"bell_on_complete": False,
|
||
"show_reasoning": False,
|
||
"streaming": False,
|
||
"final_response_markdown": "strip", # render | strip | raw
|
||
"inline_diffs": True, # Show inline diff previews for write actions (write_file, patch, skill_manage)
|
||
"show_cost": False, # Show $ cost in the status bar (off by default)
|
||
"skin": "default",
|
||
"user_message_preview": { # CLI: how many submitted user-message lines to echo back in scrollback
|
||
"first_lines": 2,
|
||
"last_lines": 2,
|
||
},
|
||
"interim_assistant_messages": True, # Gateway: show natural mid-turn assistant status messages
|
||
"tool_progress_command": False, # Enable /verbose command in messaging gateway
|
||
"tool_progress_overrides": {}, # DEPRECATED — use display.platforms instead
|
||
"tool_preview_length": 0, # Max chars for tool call previews (0 = no limit, show full paths/commands)
|
||
"platforms": {}, # Per-platform display overrides: {"telegram": {"tool_progress": "all"}, "slack": {"tool_progress": "off"}}
|
||
},
|
||
|
||
# Web dashboard settings
|
||
"dashboard": {
|
||
"theme": "default", # Dashboard visual theme: "default", "midnight", "ember", "mono", "cyberpunk", "rose"
|
||
},
|
||
|
||
# Privacy settings
|
||
"privacy": {
|
||
"redact_pii": False, # When True, hash user IDs and strip phone numbers from LLM context
|
||
},
|
||
|
||
# Text-to-speech configuration
|
||
# Each provider supports an optional `max_text_length:` override for the
|
||
# per-request input-character cap. Omit it to use the provider's documented
|
||
# limit (OpenAI 4096, xAI 15000, MiniMax 10000, ElevenLabs 5k-40k model-aware,
|
||
# Gemini 5000, Edge 5000, Mistral 4000, NeuTTS/KittenTTS 2000).
|
||
"tts": {
|
||
"provider": "edge", # "edge" (free) | "elevenlabs" (premium) | "openai" | "xai" | "minimax" | "mistral" | "neutts" (local)
|
||
"edge": {
|
||
"voice": "en-US-AriaNeural",
|
||
# Popular: AriaNeural, JennyNeural, AndrewNeural, BrianNeural, SoniaNeural
|
||
},
|
||
"elevenlabs": {
|
||
"voice_id": "pNInz6obpgDQGcFmaJgB", # Adam
|
||
"model_id": "eleven_multilingual_v2",
|
||
},
|
||
"openai": {
|
||
"model": "gpt-4o-mini-tts",
|
||
"voice": "alloy",
|
||
# Voices: alloy, echo, fable, onyx, nova, shimmer
|
||
},
|
||
"xai": {
|
||
"voice_id": "eve",
|
||
"language": "en",
|
||
"sample_rate": 24000,
|
||
"bit_rate": 128000,
|
||
},
|
||
"mistral": {
|
||
"model": "voxtral-mini-tts-2603",
|
||
"voice_id": "c69964a6-ab8b-4f8a-9465-ec0925096ec8", # Paul - Neutral
|
||
},
|
||
"neutts": {
|
||
"ref_audio": "", # Path to reference voice audio (empty = bundled default)
|
||
"ref_text": "", # Path to reference voice transcript (empty = bundled default)
|
||
"model": "neuphonic/neutts-air-q4-gguf", # HuggingFace model repo
|
||
"device": "cpu", # cpu, cuda, or mps
|
||
},
|
||
},
|
||
|
||
"stt": {
|
||
"enabled": True,
|
||
"provider": "local", # "local" (free, faster-whisper) | "groq" | "openai" (Whisper API) | "mistral" (Voxtral Transcribe)
|
||
"local": {
|
||
"model": "base", # tiny, base, small, medium, large-v3
|
||
"language": "", # auto-detect by default; set to "en", "es", "fr", etc. to force
|
||
},
|
||
"openai": {
|
||
"model": "whisper-1", # whisper-1, gpt-4o-mini-transcribe, gpt-4o-transcribe
|
||
},
|
||
"mistral": {
|
||
"model": "voxtral-mini-latest", # voxtral-mini-latest, voxtral-mini-2602
|
||
},
|
||
},
|
||
|
||
"voice": {
|
||
"record_key": "ctrl+b",
|
||
"max_recording_seconds": 120,
|
||
"auto_tts": False,
|
||
"beep_enabled": True, # Play record start/stop beeps in CLI voice mode
|
||
"silence_threshold": 200, # RMS below this = silence (0-32767)
|
||
"silence_duration": 3.0, # Seconds of silence before auto-stop
|
||
},
|
||
|
||
"human_delay": {
|
||
"mode": "off",
|
||
"min_ms": 800,
|
||
"max_ms": 2500,
|
||
},
|
||
|
||
# Context engine -- controls how the context window is managed when
|
||
# approaching the model's token limit.
|
||
# "compressor" = built-in lossy summarization (default).
|
||
# Set to a plugin name to activate an alternative engine (e.g. "lcm"
|
||
# for Lossless Context Management). The engine must be installed as
|
||
# a plugin in plugins/context_engine/<name>/ or ~/.hermes/plugins/.
|
||
"context": {
|
||
"engine": "compressor",
|
||
},
|
||
|
||
# Persistent memory -- bounded curated memory injected into system prompt
|
||
"memory": {
|
||
"memory_enabled": True,
|
||
"user_profile_enabled": True,
|
||
"memory_char_limit": 2200, # ~800 tokens at 2.75 chars/token
|
||
"user_char_limit": 1375, # ~500 tokens at 2.75 chars/token
|
||
# External memory provider plugin (empty = built-in only).
|
||
# Set to a provider name to activate: "openviking", "mem0",
|
||
# "hindsight", "holographic", "retaindb", "byterover".
|
||
# Only ONE external provider is allowed at a time.
|
||
"provider": "",
|
||
},
|
||
|
||
# Subagent delegation — override the provider:model used by delegate_task
|
||
# so child agents can run on a different (cheaper/faster) provider and model.
|
||
# Uses the same runtime provider resolution as CLI/gateway startup, so all
|
||
# configured providers (OpenRouter, Nous, Z.ai, Kimi, etc.) are supported.
|
||
"delegation": {
|
||
"model": "", # e.g. "google/gemini-3-flash-preview" (empty = inherit parent model)
|
||
"provider": "", # e.g. "openrouter" (empty = inherit parent provider + credentials)
|
||
"base_url": "", # direct OpenAI-compatible endpoint for subagents
|
||
"api_key": "", # API key for delegation.base_url (falls back to OPENAI_API_KEY)
|
||
"max_iterations": 50, # per-subagent iteration cap (each subagent gets its own budget,
|
||
# independent of the parent's max_iterations)
|
||
"reasoning_effort": "", # reasoning effort for subagents: "xhigh", "high", "medium",
|
||
# "low", "minimal", "none" (empty = inherit parent's level)
|
||
"max_concurrent_children": 3, # max parallel children per batch; floor of 1 enforced, no ceiling
|
||
# Orchestrator role controls (see tools/delegate_tool.py:_get_max_spawn_depth
|
||
# and _get_orchestrator_enabled). Values are clamped to [1, 3] with a
|
||
# warning log if out of range.
|
||
"max_spawn_depth": 1, # depth cap (1 = flat [default], 2 = orchestrator→leaf, 3 = three-level)
|
||
"orchestrator_enabled": True, # kill switch for role="orchestrator"
|
||
},
|
||
|
||
# Ephemeral prefill messages file — JSON list of {role, content} dicts
|
||
# injected at the start of every API call for few-shot priming.
|
||
# Never saved to sessions, logs, or trajectories.
|
||
"prefill_messages_file": "",
|
||
|
||
# Skills — external skill directories for sharing skills across tools/agents.
|
||
# Each path is expanded (~, ${VAR}) and resolved. Read-only — skill creation
|
||
# always goes to ~/.hermes/skills/.
|
||
"skills": {
|
||
"external_dirs": [], # e.g. ["~/.agents/skills", "/shared/team-skills"]
|
||
# Substitute ${HERMES_SKILL_DIR} and ${HERMES_SESSION_ID} in SKILL.md
|
||
# content with the absolute skill directory and the active session id
|
||
# before the agent sees it. Lets skill authors reference bundled
|
||
# scripts without the agent having to join paths.
|
||
"template_vars": True,
|
||
# Pre-execute inline shell snippets written as !`cmd` in SKILL.md
|
||
# body. Their stdout is inlined into the skill message before the
|
||
# agent reads it, so skills can inject dynamic context (dates, git
|
||
# state, detected tool versions, …). Off by default because any
|
||
# content from the skill author runs on the host without approval;
|
||
# only enable for skill sources you trust.
|
||
"inline_shell": False,
|
||
# Timeout (seconds) for each !`cmd` snippet when inline_shell is on.
|
||
"inline_shell_timeout": 10,
|
||
},
|
||
|
||
# Honcho AI-native memory -- reads ~/.honcho/config.json as single source of truth.
|
||
# This section is only needed for hermes-specific overrides; everything else
|
||
# (apiKey, workspace, peerName, sessions, enabled) comes from the global config.
|
||
"honcho": {},
|
||
|
||
# IANA timezone (e.g. "Asia/Kolkata", "America/New_York").
|
||
# Empty string means use server-local time.
|
||
"timezone": "",
|
||
|
||
# Discord platform settings (gateway mode)
|
||
"discord": {
|
||
"require_mention": True, # Require @mention to respond in server channels
|
||
"free_response_channels": "", # Comma-separated channel IDs where bot responds without mention
|
||
"allowed_channels": "", # If set, bot ONLY responds in these channel IDs (whitelist)
|
||
"auto_thread": True, # Auto-create threads on @mention in channels (like Slack)
|
||
"reactions": True, # Add 👀/✅/❌ reactions to messages during processing
|
||
"channel_prompts": {}, # Per-channel ephemeral system prompts (forum parents apply to child threads)
|
||
# discord_server tool: restrict which actions the agent may call.
|
||
# Default (empty) = all actions allowed (subject to bot privileged intents).
|
||
# Accepts comma-separated string ("list_guilds,list_channels,fetch_messages")
|
||
# or YAML list. Unknown names are dropped with a warning at load time.
|
||
# Actions: list_guilds, server_info, list_channels, channel_info,
|
||
# list_roles, member_info, search_members, fetch_messages, list_pins,
|
||
# pin_message, unpin_message, create_thread, add_role, remove_role.
|
||
"server_actions": "",
|
||
},
|
||
|
||
# WhatsApp platform settings (gateway mode)
|
||
"whatsapp": {
|
||
# Reply prefix prepended to every outgoing WhatsApp message.
|
||
# Default (None) uses the built-in "⚕ *Hermes Agent*" header.
|
||
# Set to "" (empty string) to disable the header entirely.
|
||
# Supports \n for newlines, e.g. "🤖 *My Bot*\n──────\n"
|
||
},
|
||
|
||
# Telegram platform settings (gateway mode)
|
||
"telegram": {
|
||
"channel_prompts": {}, # Per-chat/topic ephemeral system prompts (topics inherit from parent group)
|
||
},
|
||
|
||
# Slack platform settings (gateway mode)
|
||
"slack": {
|
||
"channel_prompts": {}, # Per-channel ephemeral system prompts
|
||
},
|
||
|
||
# Mattermost platform settings (gateway mode)
|
||
"mattermost": {
|
||
"channel_prompts": {}, # Per-channel ephemeral system prompts
|
||
},
|
||
|
||
# Approval mode for dangerous commands:
|
||
# manual — always prompt the user (default)
|
||
# smart — use auxiliary LLM to auto-approve low-risk commands, prompt for high-risk
|
||
# off — skip all approval prompts (equivalent to --yolo)
|
||
#
|
||
# cron_mode — what to do when a cron job hits a dangerous command:
|
||
# deny — block the command and let the agent find another way (default, safe)
|
||
# approve — auto-approve all dangerous commands in cron jobs
|
||
"approvals": {
|
||
"mode": "manual",
|
||
"timeout": 60,
|
||
"cron_mode": "deny",
|
||
},
|
||
|
||
# Permanently allowed dangerous command patterns (added via "always" approval)
|
||
"command_allowlist": [],
|
||
# User-defined quick commands that bypass the agent loop (type: exec only)
|
||
"quick_commands": {},
|
||
|
||
# Shell-script hooks — declarative bridge that invokes shell scripts
|
||
# on plugin-hook events (pre_tool_call, post_tool_call, pre_llm_call,
|
||
# subagent_stop, etc.). Each entry maps an event name to a list of
|
||
# {matcher, command, timeout} dicts. First registration of a new
|
||
# command prompts the user for consent; subsequent runs reuse the
|
||
# stored approval from ~/.hermes/shell-hooks-allowlist.json.
|
||
# See `website/docs/user-guide/features/hooks.md` for schema + examples.
|
||
"hooks": {},
|
||
|
||
# Auto-accept shell-hook registrations without a TTY prompt. Also
|
||
# toggleable per-invocation via --accept-hooks or HERMES_ACCEPT_HOOKS=1.
|
||
# Gateway / cron / non-interactive runs need this (or one of the other
|
||
# channels) to pick up newly-added hooks.
|
||
"hooks_auto_accept": False,
|
||
# Custom personalities — add your own entries here
|
||
# Supports string format: {"name": "system prompt"}
|
||
# Or dict format: {"name": {"description": "...", "system_prompt": "...", "tone": "...", "style": "..."}}
|
||
"personalities": {},
|
||
|
||
# Pre-exec security scanning via tirith
|
||
"security": {
|
||
"redact_secrets": True,
|
||
"tirith_enabled": True,
|
||
"tirith_path": "tirith",
|
||
"tirith_timeout": 5,
|
||
"tirith_fail_open": True,
|
||
"website_blocklist": {
|
||
"enabled": False,
|
||
"domains": [],
|
||
"shared_files": [],
|
||
},
|
||
},
|
||
|
||
"cron": {
|
||
# Wrap delivered cron responses with a header (task name) and footer
|
||
# ("The agent cannot see this message"). Set to false for clean output.
|
||
"wrap_response": True,
|
||
# Maximum number of due jobs to run in parallel per tick.
|
||
# null/0 = unbounded (limited only by thread count).
|
||
# 1 = serial (pre-v0.9 behaviour).
|
||
# Also overridable via HERMES_CRON_MAX_PARALLEL env var.
|
||
"max_parallel_jobs": None,
|
||
},
|
||
|
||
# execute_code settings — controls the tool used for programmatic tool calls.
|
||
"code_execution": {
|
||
# Execution mode:
|
||
# project (default) — scripts run in the session's working directory
|
||
# with the active virtualenv/conda env's python, so project deps
|
||
# (pandas, torch, project packages) and relative paths resolve.
|
||
# strict — scripts run in an isolated temp directory with
|
||
# hermes-agent's own python (sys.executable). Maximum isolation
|
||
# and reproducibility; project deps and relative paths won't work.
|
||
# Env scrubbing (strips *_API_KEY, *_TOKEN, *_SECRET, ...) and the
|
||
# tool whitelist apply identically in both modes.
|
||
"mode": "project",
|
||
},
|
||
|
||
# Logging — controls file logging to ~/.hermes/logs/.
|
||
# agent.log captures INFO+ (all agent activity); errors.log captures WARNING+.
|
||
"logging": {
|
||
"level": "INFO", # Minimum level for agent.log: DEBUG, INFO, WARNING
|
||
"max_size_mb": 5, # Max size per log file before rotation
|
||
"backup_count": 3, # Number of rotated backup files to keep
|
||
},
|
||
|
||
# Network settings — workarounds for connectivity issues.
|
||
"network": {
|
||
# Force IPv4 connections. On servers with broken or unreachable IPv6,
|
||
# Python tries AAAA records first and hangs for the full TCP timeout
|
||
# before falling back to IPv4. Set to true to skip IPv6 entirely.
|
||
"force_ipv4": False,
|
||
},
|
||
|
||
# Config schema version - bump this when adding new required fields
|
||
"_config_version": 22,
|
||
}
|
||
|
||
# =============================================================================
|
||
# Config Migration System
|
||
# =============================================================================
|
||
|
||
# Track which env vars were introduced in each config version.
|
||
# Migration only mentions vars new since the user's previous version.
|
||
ENV_VARS_BY_VERSION: Dict[int, List[str]] = {
|
||
3: ["FIRECRAWL_API_KEY", "BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID", "FAL_KEY"],
|
||
4: ["VOICE_TOOLS_OPENAI_KEY", "ELEVENLABS_API_KEY"],
|
||
5: ["WHATSAPP_ENABLED", "WHATSAPP_MODE", "WHATSAPP_ALLOWED_USERS",
|
||
"SLACK_BOT_TOKEN", "SLACK_APP_TOKEN", "SLACK_ALLOWED_USERS"],
|
||
10: ["TAVILY_API_KEY"],
|
||
11: ["TERMINAL_MODAL_MODE"],
|
||
}
|
||
|
||
# Required environment variables with metadata for migration prompts.
|
||
# LLM provider is required but handled in the setup wizard's provider
|
||
# selection step (Nous Portal / OpenRouter / Custom endpoint), so this
|
||
# dict is intentionally empty — no single env var is universally required.
|
||
REQUIRED_ENV_VARS = {}
|
||
|
||
# Optional environment variables that enhance functionality
|
||
OPTIONAL_ENV_VARS: Dict[str, _EnvVarInfo] = {
|
||
# ── Provider (handled in provider selection, not shown in checklists) ──
|
||
"NOUS_BASE_URL": {
|
||
"description": "Nous Portal base URL override",
|
||
"prompt": "Nous Portal base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"OPENROUTER_API_KEY": {
|
||
"description": "OpenRouter API key (for vision, web scraping helpers, and MoA)",
|
||
"prompt": "OpenRouter API key",
|
||
"url": "https://openrouter.ai/keys",
|
||
"password": True,
|
||
"tools": ["vision_analyze", "mixture_of_agents"],
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"GOOGLE_API_KEY": {
|
||
"description": "Google AI Studio API key (also recognized as GEMINI_API_KEY)",
|
||
"prompt": "Google AI Studio API key",
|
||
"url": "https://aistudio.google.com/app/apikey",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"GEMINI_API_KEY": {
|
||
"description": "Google AI Studio API key (alias for GOOGLE_API_KEY)",
|
||
"prompt": "Gemini API key",
|
||
"url": "https://aistudio.google.com/app/apikey",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"GEMINI_BASE_URL": {
|
||
"description": "Google AI Studio base URL override",
|
||
"prompt": "Gemini base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"XAI_API_KEY": {
|
||
"description": "xAI API key",
|
||
"prompt": "xAI API key",
|
||
"url": "https://console.x.ai/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"XAI_BASE_URL": {
|
||
"description": "xAI base URL override",
|
||
"prompt": "xAI base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"NVIDIA_API_KEY": {
|
||
"description": "NVIDIA NIM API key (build.nvidia.com or local NIM endpoint)",
|
||
"prompt": "NVIDIA NIM API key",
|
||
"url": "https://build.nvidia.com/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"NVIDIA_BASE_URL": {
|
||
"description": "NVIDIA NIM base URL override (e.g. http://localhost:8000/v1 for local NIM)",
|
||
"prompt": "NVIDIA NIM base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"GLM_API_KEY": {
|
||
"description": "Z.AI / GLM API key (also recognized as ZAI_API_KEY / Z_AI_API_KEY)",
|
||
"prompt": "Z.AI / GLM API key",
|
||
"url": "https://z.ai/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"ZAI_API_KEY": {
|
||
"description": "Z.AI API key (alias for GLM_API_KEY)",
|
||
"prompt": "Z.AI API key",
|
||
"url": "https://z.ai/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"Z_AI_API_KEY": {
|
||
"description": "Z.AI API key (alias for GLM_API_KEY)",
|
||
"prompt": "Z.AI API key",
|
||
"url": "https://z.ai/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"GLM_BASE_URL": {
|
||
"description": "Z.AI / GLM base URL override",
|
||
"prompt": "Z.AI / GLM base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"KIMI_API_KEY": {
|
||
"description": "Kimi / Moonshot API key",
|
||
"prompt": "Kimi API key",
|
||
"url": "https://platform.moonshot.cn/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"KIMI_BASE_URL": {
|
||
"description": "Kimi / Moonshot base URL override",
|
||
"prompt": "Kimi base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"KIMI_CN_API_KEY": {
|
||
"description": "Kimi / Moonshot China API key",
|
||
"prompt": "Kimi (China) API key",
|
||
"url": "https://platform.moonshot.cn/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"ARCEEAI_API_KEY": {
|
||
"description": "Arcee AI API key",
|
||
"prompt": "Arcee AI API key",
|
||
"url": "https://chat.arcee.ai/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"ARCEE_BASE_URL": {
|
||
"description": "Arcee AI base URL override",
|
||
"prompt": "Arcee base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"MINIMAX_API_KEY": {
|
||
"description": "MiniMax API key (international)",
|
||
"prompt": "MiniMax API key",
|
||
"url": "https://www.minimax.io/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"MINIMAX_BASE_URL": {
|
||
"description": "MiniMax base URL override",
|
||
"prompt": "MiniMax base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"MINIMAX_CN_API_KEY": {
|
||
"description": "MiniMax API key (China endpoint)",
|
||
"prompt": "MiniMax (China) API key",
|
||
"url": "https://www.minimaxi.com/",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"MINIMAX_CN_BASE_URL": {
|
||
"description": "MiniMax (China) base URL override",
|
||
"prompt": "MiniMax (China) base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"DEEPSEEK_API_KEY": {
|
||
"description": "DeepSeek API key for direct DeepSeek access",
|
||
"prompt": "DeepSeek API Key",
|
||
"url": "https://platform.deepseek.com/api_keys",
|
||
"password": True,
|
||
"category": "provider",
|
||
},
|
||
"DEEPSEEK_BASE_URL": {
|
||
"description": "Custom DeepSeek API base URL (advanced)",
|
||
"prompt": "DeepSeek Base URL",
|
||
"url": "",
|
||
"password": False,
|
||
"category": "provider",
|
||
},
|
||
"DASHSCOPE_API_KEY": {
|
||
"description": "Alibaba Cloud DashScope API key (Qwen + multi-provider models)",
|
||
"prompt": "DashScope API Key",
|
||
"url": "https://modelstudio.console.alibabacloud.com/",
|
||
"password": True,
|
||
"category": "provider",
|
||
},
|
||
"DASHSCOPE_BASE_URL": {
|
||
"description": "Custom DashScope base URL (default: coding-intl OpenAI-compat endpoint)",
|
||
"prompt": "DashScope Base URL",
|
||
"url": "",
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"HERMES_QWEN_BASE_URL": {
|
||
"description": "Qwen Portal base URL override (default: https://portal.qwen.ai/v1)",
|
||
"prompt": "Qwen Portal base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"HERMES_GEMINI_CLIENT_ID": {
|
||
"description": "Google OAuth client ID for google-gemini-cli (optional; defaults to Google's public gemini-cli client)",
|
||
"prompt": "Google OAuth client ID (optional — leave empty to use the public default)",
|
||
"url": "https://console.cloud.google.com/apis/credentials",
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"HERMES_GEMINI_CLIENT_SECRET": {
|
||
"description": "Google OAuth client secret for google-gemini-cli (optional)",
|
||
"prompt": "Google OAuth client secret (optional)",
|
||
"url": "https://console.cloud.google.com/apis/credentials",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"HERMES_GEMINI_PROJECT_ID": {
|
||
"description": "GCP project ID for paid Gemini tiers (free tier auto-provisions)",
|
||
"prompt": "GCP project ID for Gemini OAuth (leave empty for free tier)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"OPENCODE_ZEN_API_KEY": {
|
||
"description": "OpenCode Zen API key (pay-as-you-go access to curated models)",
|
||
"prompt": "OpenCode Zen API key",
|
||
"url": "https://opencode.ai/auth",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"OPENCODE_ZEN_BASE_URL": {
|
||
"description": "OpenCode Zen base URL override",
|
||
"prompt": "OpenCode Zen base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"OPENCODE_GO_API_KEY": {
|
||
"description": "OpenCode Go API key ($10/month subscription for open models)",
|
||
"prompt": "OpenCode Go API key",
|
||
"url": "https://opencode.ai/auth",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"OPENCODE_GO_BASE_URL": {
|
||
"description": "OpenCode Go base URL override",
|
||
"prompt": "OpenCode Go base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"HF_TOKEN": {
|
||
"description": "Hugging Face token for Inference Providers (20+ open models via router.huggingface.co)",
|
||
"prompt": "Hugging Face Token",
|
||
"url": "https://huggingface.co/settings/tokens",
|
||
"password": True,
|
||
"category": "provider",
|
||
},
|
||
"HF_BASE_URL": {
|
||
"description": "Hugging Face Inference Providers base URL override",
|
||
"prompt": "HF base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"OLLAMA_API_KEY": {
|
||
"description": "Ollama Cloud API key (ollama.com — cloud-hosted open models)",
|
||
"prompt": "Ollama Cloud API key",
|
||
"url": "https://ollama.com/settings",
|
||
"password": True,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"OLLAMA_BASE_URL": {
|
||
"description": "Ollama Cloud base URL override (default: https://ollama.com/v1)",
|
||
"prompt": "Ollama base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"XIAOMI_API_KEY": {
|
||
"description": "Xiaomi MiMo API key for MiMo models (mimo-v2-pro, mimo-v2-omni, mimo-v2-flash)",
|
||
"prompt": "Xiaomi MiMo API Key",
|
||
"url": "https://platform.xiaomimimo.com",
|
||
"password": True,
|
||
"category": "provider",
|
||
},
|
||
"XIAOMI_BASE_URL": {
|
||
"description": "Xiaomi MiMo base URL override (default: https://api.xiaomimimo.com/v1)",
|
||
"prompt": "Xiaomi base URL (leave empty for default)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"AWS_REGION": {
|
||
"description": "AWS region for Bedrock API calls (e.g. us-east-1, eu-central-1)",
|
||
"prompt": "AWS Region",
|
||
"url": "https://docs.aws.amazon.com/bedrock/latest/userguide/bedrock-regions.html",
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
"AWS_PROFILE": {
|
||
"description": "AWS named profile for Bedrock authentication (from ~/.aws/credentials)",
|
||
"prompt": "AWS Profile",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "provider",
|
||
"advanced": True,
|
||
},
|
||
|
||
# ── Tool API keys ──
|
||
"EXA_API_KEY": {
|
||
"description": "Exa API key for AI-native web search and contents",
|
||
"prompt": "Exa API key",
|
||
"url": "https://exa.ai/",
|
||
"tools": ["web_search", "web_extract"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"PARALLEL_API_KEY": {
|
||
"description": "Parallel API key for AI-native web search and extract",
|
||
"prompt": "Parallel API key",
|
||
"url": "https://parallel.ai/",
|
||
"tools": ["web_search", "web_extract"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"FIRECRAWL_API_KEY": {
|
||
"description": "Firecrawl API key for web search and scraping",
|
||
"prompt": "Firecrawl API key",
|
||
"url": "https://firecrawl.dev/",
|
||
"tools": ["web_search", "web_extract"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"FIRECRAWL_API_URL": {
|
||
"description": "Firecrawl API URL for self-hosted instances (optional)",
|
||
"prompt": "Firecrawl API URL (leave empty for cloud)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "tool",
|
||
"advanced": True,
|
||
},
|
||
"FIRECRAWL_GATEWAY_URL": {
|
||
"description": "Exact Firecrawl tool-gateway origin override for Nous Subscribers only (optional)",
|
||
"prompt": "Firecrawl gateway URL (leave empty to derive from domain)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "tool",
|
||
"advanced": True,
|
||
},
|
||
"TOOL_GATEWAY_DOMAIN": {
|
||
"description": "Shared tool-gateway domain suffix for Nous Subscribers only, used to derive vendor hosts, e.g. nousresearch.com -> firecrawl-gateway.nousresearch.com",
|
||
"prompt": "Tool-gateway domain suffix",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "tool",
|
||
"advanced": True,
|
||
},
|
||
"TOOL_GATEWAY_SCHEME": {
|
||
"description": "Shared tool-gateway URL scheme for Nous Subscribers only, used to derive vendor hosts (`https` by default, set `http` for local gateway testing)",
|
||
"prompt": "Tool-gateway URL scheme",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "tool",
|
||
"advanced": True,
|
||
},
|
||
"TOOL_GATEWAY_USER_TOKEN": {
|
||
"description": "Explicit Nous Subscriber access token for tool-gateway requests (optional; otherwise read from the Hermes auth store)",
|
||
"prompt": "Tool-gateway user token",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "tool",
|
||
"advanced": True,
|
||
},
|
||
"TAVILY_API_KEY": {
|
||
"description": "Tavily API key for AI-native web search, extract, and crawl",
|
||
"prompt": "Tavily API key",
|
||
"url": "https://app.tavily.com/home",
|
||
"tools": ["web_search", "web_extract", "web_crawl"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"BROWSERBASE_API_KEY": {
|
||
"description": "Browserbase API key for cloud browser (optional — local browser works without this)",
|
||
"prompt": "Browserbase API key",
|
||
"url": "https://browserbase.com/",
|
||
"tools": ["browser_navigate", "browser_click"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"BROWSERBASE_PROJECT_ID": {
|
||
"description": "Browserbase project ID (optional — only needed for cloud browser)",
|
||
"prompt": "Browserbase project ID",
|
||
"url": "https://browserbase.com/",
|
||
"tools": ["browser_navigate", "browser_click"],
|
||
"password": False,
|
||
"category": "tool",
|
||
},
|
||
"BROWSER_USE_API_KEY": {
|
||
"description": "Browser Use API key for cloud browser (optional — local browser works without this)",
|
||
"prompt": "Browser Use API key",
|
||
"url": "https://browser-use.com/",
|
||
"tools": ["browser_navigate", "browser_click"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"FIRECRAWL_BROWSER_TTL": {
|
||
"description": "Firecrawl browser session TTL in seconds (optional, default 300)",
|
||
"prompt": "Browser session TTL (seconds)",
|
||
"tools": ["browser_navigate", "browser_click"],
|
||
"password": False,
|
||
"category": "tool",
|
||
},
|
||
"CAMOFOX_URL": {
|
||
"description": "Camofox browser server URL for local anti-detection browsing (e.g. http://localhost:9377)",
|
||
"prompt": "Camofox server URL",
|
||
"url": "https://github.com/jo-inc/camofox-browser",
|
||
"tools": ["browser_navigate", "browser_click"],
|
||
"password": False,
|
||
"category": "tool",
|
||
},
|
||
"FAL_KEY": {
|
||
"description": "FAL API key for image generation",
|
||
"prompt": "FAL API key",
|
||
"url": "https://fal.ai/",
|
||
"tools": ["image_generate"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"TINKER_API_KEY": {
|
||
"description": "Tinker API key for RL training",
|
||
"prompt": "Tinker API key",
|
||
"url": "https://tinker-console.thinkingmachines.ai/keys",
|
||
"tools": ["rl_start_training", "rl_check_status", "rl_stop_training"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"WANDB_API_KEY": {
|
||
"description": "Weights & Biases API key for experiment tracking",
|
||
"prompt": "WandB API key",
|
||
"url": "https://wandb.ai/authorize",
|
||
"tools": ["rl_get_results", "rl_check_status"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"VOICE_TOOLS_OPENAI_KEY": {
|
||
"description": "OpenAI API key for voice transcription (Whisper) and OpenAI TTS",
|
||
"prompt": "OpenAI API Key (for Whisper STT + TTS)",
|
||
"url": "https://platform.openai.com/api-keys",
|
||
"tools": ["voice_transcription", "openai_tts"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"ELEVENLABS_API_KEY": {
|
||
"description": "ElevenLabs API key for premium text-to-speech voices",
|
||
"prompt": "ElevenLabs API key",
|
||
"url": "https://elevenlabs.io/",
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"MISTRAL_API_KEY": {
|
||
"description": "Mistral API key for Voxtral TTS and transcription (STT)",
|
||
"prompt": "Mistral API key",
|
||
"url": "https://console.mistral.ai/",
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"GITHUB_TOKEN": {
|
||
"description": "GitHub token for Skills Hub (higher API rate limits, skill publish)",
|
||
"prompt": "GitHub Token",
|
||
"url": "https://github.com/settings/tokens",
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
|
||
# ── Honcho ──
|
||
"HONCHO_API_KEY": {
|
||
"description": "Honcho API key for AI-native persistent memory",
|
||
"prompt": "Honcho API key",
|
||
"url": "https://app.honcho.dev",
|
||
"tools": ["honcho_context"],
|
||
"password": True,
|
||
"category": "tool",
|
||
},
|
||
"HONCHO_BASE_URL": {
|
||
"description": "Base URL for self-hosted Honcho instances (no API key needed)",
|
||
"prompt": "Honcho base URL (e.g. http://localhost:8000)",
|
||
"category": "tool",
|
||
},
|
||
|
||
# ── Messaging platforms ──
|
||
"TELEGRAM_BOT_TOKEN": {
|
||
"description": "Telegram bot token from @BotFather",
|
||
"prompt": "Telegram bot token",
|
||
"url": "https://t.me/BotFather",
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
"TELEGRAM_ALLOWED_USERS": {
|
||
"description": "Comma-separated Telegram user IDs allowed to use the bot (get ID from @userinfobot)",
|
||
"prompt": "Allowed Telegram user IDs (comma-separated)",
|
||
"url": "https://t.me/userinfobot",
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"TELEGRAM_PROXY": {
|
||
"description": "Proxy URL for Telegram connections (overrides HTTPS_PROXY). Supports http://, https://, socks5://",
|
||
"prompt": "Telegram proxy URL (optional)",
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"DISCORD_BOT_TOKEN": {
|
||
"description": "Discord bot token from Developer Portal",
|
||
"prompt": "Discord bot token",
|
||
"url": "https://discord.com/developers/applications",
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
"DISCORD_ALLOWED_USERS": {
|
||
"description": "Comma-separated Discord user IDs allowed to use the bot",
|
||
"prompt": "Allowed Discord user IDs (comma-separated)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"DISCORD_REPLY_TO_MODE": {
|
||
"description": "Discord reply threading mode: 'off' (no reply references), 'first' (reply on first message only, default), 'all' (reply on every chunk)",
|
||
"prompt": "Discord reply mode (off/first/all)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"SLACK_BOT_TOKEN": {
|
||
"description": "Slack bot token (xoxb-). Get from OAuth & Permissions after installing your app. "
|
||
"Required scopes: chat:write, app_mentions:read, channels:history, groups:history, "
|
||
"im:history, im:read, im:write, users:read, files:read, files:write",
|
||
"prompt": "Slack Bot Token (xoxb-...)",
|
||
"url": "https://api.slack.com/apps",
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
"SLACK_APP_TOKEN": {
|
||
"description": "Slack app-level token (xapp-) for Socket Mode. Get from Basic Information → "
|
||
"App-Level Tokens. Also ensure Event Subscriptions include: message.im, "
|
||
"message.channels, message.groups, app_mention",
|
||
"prompt": "Slack App Token (xapp-...)",
|
||
"url": "https://api.slack.com/apps",
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
"MATTERMOST_URL": {
|
||
"description": "Mattermost server URL (e.g. https://mm.example.com)",
|
||
"prompt": "Mattermost server URL",
|
||
"url": "https://mattermost.com/deploy/",
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"MATTERMOST_TOKEN": {
|
||
"description": "Mattermost bot token or personal access token",
|
||
"prompt": "Mattermost bot token",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
"MATTERMOST_ALLOWED_USERS": {
|
||
"description": "Comma-separated Mattermost user IDs allowed to use the bot",
|
||
"prompt": "Allowed Mattermost user IDs (comma-separated)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"MATTERMOST_REQUIRE_MENTION": {
|
||
"description": "Require @mention in Mattermost channels (default: true). Set to false to respond to all messages.",
|
||
"prompt": "Require @mention in channels",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"MATTERMOST_FREE_RESPONSE_CHANNELS": {
|
||
"description": "Comma-separated Mattermost channel IDs where bot responds without @mention",
|
||
"prompt": "Free-response channel IDs (comma-separated)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"MATRIX_HOMESERVER": {
|
||
"description": "Matrix homeserver URL (e.g. https://matrix.example.org)",
|
||
"prompt": "Matrix homeserver URL",
|
||
"url": "https://matrix.org/ecosystem/servers/",
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"MATRIX_ACCESS_TOKEN": {
|
||
"description": "Matrix access token (preferred over password login)",
|
||
"prompt": "Matrix access token",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
"MATRIX_USER_ID": {
|
||
"description": "Matrix user ID (e.g. @hermes:example.org)",
|
||
"prompt": "Matrix user ID (@user:server)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"MATRIX_ALLOWED_USERS": {
|
||
"description": "Comma-separated Matrix user IDs allowed to use the bot (@user:server format)",
|
||
"prompt": "Allowed Matrix user IDs (comma-separated)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"MATRIX_REQUIRE_MENTION": {
|
||
"description": "Require @mention in Matrix rooms (default: true). Set to false to respond to all messages.",
|
||
"prompt": "Require @mention in rooms (true/false)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"MATRIX_FREE_RESPONSE_ROOMS": {
|
||
"description": "Comma-separated Matrix room IDs where bot responds without @mention",
|
||
"prompt": "Free-response room IDs (comma-separated)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"MATRIX_AUTO_THREAD": {
|
||
"description": "Auto-create threads for messages in Matrix rooms (default: true)",
|
||
"prompt": "Auto-create threads in rooms (true/false)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"MATRIX_DEVICE_ID": {
|
||
"description": "Stable Matrix device ID for E2EE persistence across restarts (e.g. HERMES_BOT)",
|
||
"prompt": "Matrix device ID (stable across restarts)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"MATRIX_RECOVERY_KEY": {
|
||
"description": "Matrix recovery key for cross-signing verification after device key rotation (from Element: Settings → Security → Recovery Key)",
|
||
"prompt": "Matrix recovery key",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"BLUEBUBBLES_SERVER_URL": {
|
||
"description": "BlueBubbles server URL for iMessage integration (e.g. http://192.168.1.10:1234)",
|
||
"prompt": "BlueBubbles server URL",
|
||
"url": "https://bluebubbles.app/",
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"BLUEBUBBLES_PASSWORD": {
|
||
"description": "BlueBubbles server password (from BlueBubbles Server → Settings → API)",
|
||
"prompt": "BlueBubbles server password",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
"BLUEBUBBLES_ALLOWED_USERS": {
|
||
"description": "Comma-separated iMessage addresses (email or phone) allowed to use the bot",
|
||
"prompt": "Allowed iMessage addresses (comma-separated)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"BLUEBUBBLES_ALLOW_ALL_USERS": {
|
||
"description": "Allow all BlueBubbles users without allowlist",
|
||
"prompt": "Allow All BlueBubbles Users",
|
||
"category": "messaging",
|
||
},
|
||
"QQ_APP_ID": {
|
||
"description": "QQ Bot App ID from QQ Open Platform (q.qq.com)",
|
||
"prompt": "QQ App ID",
|
||
"url": "https://q.qq.com",
|
||
"category": "messaging",
|
||
},
|
||
"QQ_CLIENT_SECRET": {
|
||
"description": "QQ Bot Client Secret from QQ Open Platform",
|
||
"prompt": "QQ Client Secret",
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
"QQ_ALLOWED_USERS": {
|
||
"description": "Comma-separated QQ user IDs allowed to use the bot",
|
||
"prompt": "QQ Allowed Users",
|
||
"category": "messaging",
|
||
},
|
||
"QQ_GROUP_ALLOWED_USERS": {
|
||
"description": "Comma-separated QQ group IDs allowed to interact with the bot",
|
||
"prompt": "QQ Group Allowed Users",
|
||
"category": "messaging",
|
||
},
|
||
"QQ_ALLOW_ALL_USERS": {
|
||
"description": "Allow all QQ users without an allowlist (true/false)",
|
||
"prompt": "Allow All QQ Users",
|
||
"category": "messaging",
|
||
},
|
||
"QQBOT_HOME_CHANNEL": {
|
||
"description": "Default QQ channel/group for cron delivery and notifications",
|
||
"prompt": "QQ Home Channel",
|
||
"category": "messaging",
|
||
},
|
||
"QQBOT_HOME_CHANNEL_NAME": {
|
||
"description": "Display name for the QQ home channel",
|
||
"prompt": "QQ Home Channel Name",
|
||
"category": "messaging",
|
||
},
|
||
"QQ_SANDBOX": {
|
||
"description": "Enable QQ sandbox mode for development testing (true/false)",
|
||
"prompt": "QQ Sandbox Mode",
|
||
"category": "messaging",
|
||
},
|
||
"GATEWAY_ALLOW_ALL_USERS": {
|
||
"description": "Allow all users to interact with messaging bots (true/false). Default: false.",
|
||
"prompt": "Allow all users (true/false)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"API_SERVER_ENABLED": {
|
||
"description": "Enable the OpenAI-compatible API server (true/false). Allows frontends like Open WebUI, LobeChat, etc. to connect.",
|
||
"prompt": "Enable API server (true/false)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"API_SERVER_KEY": {
|
||
"description": "Bearer token for API server authentication. Required for non-loopback binding; server refuses to start without it. On loopback (127.0.0.1), all requests are allowed if empty.",
|
||
"prompt": "API server auth key (required for network access)",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"API_SERVER_PORT": {
|
||
"description": "Port for the API server (default: 8642).",
|
||
"prompt": "API server port",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"API_SERVER_HOST": {
|
||
"description": "Host/bind address for the API server (default: 127.0.0.1). Use 0.0.0.0 for network access — server refuses to start without API_SERVER_KEY.",
|
||
"prompt": "API server host",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"API_SERVER_MODEL_NAME": {
|
||
"description": "Model name advertised on /v1/models. Defaults to the profile name (or 'hermes-agent' for the default profile). Useful for multi-user setups with OpenWebUI.",
|
||
"prompt": "API server model name",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"GATEWAY_PROXY_URL": {
|
||
"description": "URL of a remote Hermes API server to forward messages to (proxy mode). When set, the gateway handles platform I/O only — all agent work is delegated to the remote server. Use for Docker E2EE containers that relay to a host agent. Also configurable via gateway.proxy_url in config.yaml.",
|
||
"prompt": "Remote Hermes API server URL (e.g. http://192.168.1.100:8642)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"GATEWAY_PROXY_KEY": {
|
||
"description": "Bearer token for authenticating with the remote Hermes API server (proxy mode). Must match the API_SERVER_KEY on the remote host.",
|
||
"prompt": "Remote API server auth key",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "messaging",
|
||
"advanced": True,
|
||
},
|
||
"WEBHOOK_ENABLED": {
|
||
"description": "Enable the webhook platform adapter for receiving events from GitHub, GitLab, etc.",
|
||
"prompt": "Enable webhooks (true/false)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"WEBHOOK_PORT": {
|
||
"description": "Port for the webhook HTTP server (default: 8644).",
|
||
"prompt": "Webhook port",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "messaging",
|
||
},
|
||
"WEBHOOK_SECRET": {
|
||
"description": "Global HMAC secret for webhook signature validation (overridable per route in config.yaml).",
|
||
"prompt": "Webhook secret",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "messaging",
|
||
},
|
||
|
||
# ── Agent settings ──
|
||
# NOTE: MESSAGING_CWD was removed here — use terminal.cwd in config.yaml
|
||
# instead. The gateway reads TERMINAL_CWD (bridged from terminal.cwd).
|
||
"SUDO_PASSWORD": {
|
||
"description": "Sudo password for terminal commands requiring root access; set to an explicit empty string to try empty without prompting",
|
||
"prompt": "Sudo password",
|
||
"url": None,
|
||
"password": True,
|
||
"category": "setting",
|
||
},
|
||
"HERMES_MAX_ITERATIONS": {
|
||
"description": "Maximum tool-calling iterations per conversation (default: 90)",
|
||
"prompt": "Max iterations",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "setting",
|
||
},
|
||
# HERMES_TOOL_PROGRESS and HERMES_TOOL_PROGRESS_MODE are deprecated —
|
||
# now configured via display.tool_progress in config.yaml (off|new|all|verbose).
|
||
# Gateway falls back to these env vars for backward compatibility.
|
||
"HERMES_TOOL_PROGRESS": {
|
||
"description": "(deprecated) Use display.tool_progress in config.yaml instead",
|
||
"prompt": "Tool progress (deprecated — use config.yaml)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "setting",
|
||
},
|
||
"HERMES_TOOL_PROGRESS_MODE": {
|
||
"description": "(deprecated) Use display.tool_progress in config.yaml instead",
|
||
"prompt": "Progress mode (deprecated — use config.yaml)",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "setting",
|
||
},
|
||
"HERMES_PREFILL_MESSAGES_FILE": {
|
||
"description": "Path to JSON file with ephemeral prefill messages for few-shot priming",
|
||
"prompt": "Prefill messages file path",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "setting",
|
||
},
|
||
"HERMES_EPHEMERAL_SYSTEM_PROMPT": {
|
||
"description": "Ephemeral system prompt injected at API-call time (never persisted to sessions)",
|
||
"prompt": "Ephemeral system prompt",
|
||
"url": None,
|
||
"password": False,
|
||
"category": "setting",
|
||
},
|
||
}
|
||
|
||
# Tool Gateway env vars are always visible — they're useful for
|
||
# self-hosted / custom gateway setups regardless of subscription state.
|
||
|
||
|
||
def get_missing_env_vars(required_only: bool = False) -> List[Dict[str, Any]]:
|
||
"""
|
||
Check which environment variables are missing.
|
||
|
||
Returns list of dicts with var info for missing variables.
|
||
"""
|
||
missing = []
|
||
|
||
# Check required vars
|
||
for var_name, info in REQUIRED_ENV_VARS.items():
|
||
if not get_env_value(var_name):
|
||
missing.append({"name": var_name, **info, "is_required": True})
|
||
|
||
# Check optional vars (if not required_only)
|
||
if not required_only:
|
||
for var_name, info in OPTIONAL_ENV_VARS.items():
|
||
if not get_env_value(var_name):
|
||
missing.append({"name": var_name, **info, "is_required": False})
|
||
|
||
return missing
|
||
|
||
|
||
def _set_nested(config: dict, dotted_key: str, value):
|
||
"""Set a value at an arbitrarily nested dotted key path.
|
||
|
||
Creates intermediate dicts as needed, e.g. ``_set_nested(c, "a.b.c", 1)``
|
||
ensures ``c["a"]["b"]["c"] == 1``.
|
||
"""
|
||
parts = dotted_key.split(".")
|
||
current = config
|
||
for part in parts[:-1]:
|
||
if part not in current or not isinstance(current.get(part), dict):
|
||
current[part] = {}
|
||
current = current[part]
|
||
current[parts[-1]] = value
|
||
|
||
|
||
def get_missing_config_fields() -> List[Dict[str, Any]]:
|
||
"""
|
||
Check which config fields are missing or outdated (recursive).
|
||
|
||
Walks the DEFAULT_CONFIG tree at arbitrary depth and reports any keys
|
||
present in defaults but absent from the user's loaded config.
|
||
"""
|
||
config = load_config()
|
||
missing = []
|
||
|
||
def _check(defaults: Dict[str, Any], current: Dict[str, Any], prefix: str = ""):
|
||
for key, default_value in defaults.items():
|
||
if key.startswith('_'):
|
||
continue
|
||
full_key = key if not prefix else f"{prefix}.{key}"
|
||
if key not in current:
|
||
missing.append({
|
||
"key": full_key,
|
||
"default": default_value,
|
||
"description": f"New config option: {full_key}",
|
||
})
|
||
elif isinstance(default_value, dict) and isinstance(current.get(key), dict):
|
||
_check(default_value, current[key], full_key)
|
||
|
||
_check(dict(DEFAULT_CONFIG), config)
|
||
return missing
|
||
|
||
|
||
def get_missing_skill_config_vars() -> List[Dict[str, Any]]:
|
||
"""Return skill-declared config vars that are missing or empty in config.yaml.
|
||
|
||
Scans all enabled skills for ``metadata.hermes.config`` entries, then checks
|
||
which ones are absent or empty under ``skills.config.<key>`` in the user's
|
||
config.yaml. Returns a list of dicts suitable for prompting.
|
||
"""
|
||
try:
|
||
from hermes_agent.agent.skill_utils import discover_all_skill_config_vars, SKILL_CONFIG_PREFIX
|
||
except Exception:
|
||
return []
|
||
|
||
all_vars = discover_all_skill_config_vars()
|
||
if not all_vars:
|
||
return []
|
||
|
||
config = load_config()
|
||
missing: List[Dict[str, Any]] = []
|
||
for var in all_vars:
|
||
# Skill config is stored under skills.config.<logical_key>
|
||
storage_key = f"{SKILL_CONFIG_PREFIX}.{var['key']}"
|
||
parts = storage_key.split(".")
|
||
current = config
|
||
value = None
|
||
for part in parts:
|
||
if isinstance(current, dict) and part in current:
|
||
current = current[part]
|
||
value = current
|
||
else:
|
||
value = None
|
||
break
|
||
# Missing = key doesn't exist or is empty string
|
||
if value is None or (isinstance(value, str) and not value.strip()):
|
||
missing.append(var)
|
||
return missing
|
||
|
||
|
||
def _normalize_custom_provider_entry(
|
||
entry: Any,
|
||
*,
|
||
provider_key: str = "",
|
||
) -> Optional[Dict[str, Any]]:
|
||
"""Return a runtime-compatible custom provider entry or ``None``."""
|
||
if not isinstance(entry, dict):
|
||
return None
|
||
|
||
# Accept camelCase aliases commonly used in hand-written configs.
|
||
_CAMEL_ALIASES: Dict[str, str] = {
|
||
"apiKey": "api_key",
|
||
"baseUrl": "base_url",
|
||
"apiMode": "api_mode",
|
||
"keyEnv": "key_env",
|
||
"defaultModel": "default_model",
|
||
"contextLength": "context_length",
|
||
"rateLimitDelay": "rate_limit_delay",
|
||
}
|
||
_KNOWN_KEYS = {
|
||
"name", "api", "url", "base_url", "api_key", "key_env",
|
||
"api_mode", "transport", "model", "default_model", "models",
|
||
"context_length", "rate_limit_delay",
|
||
}
|
||
for camel, snake in _CAMEL_ALIASES.items():
|
||
if camel in entry and snake not in entry:
|
||
logger.warning(
|
||
"providers.%s: camelCase key '%s' auto-mapped to '%s' "
|
||
"(use snake_case to avoid this warning)",
|
||
provider_key or "?", camel, snake,
|
||
)
|
||
entry[snake] = entry[camel]
|
||
unknown = set(entry.keys()) - _KNOWN_KEYS - set(_CAMEL_ALIASES.keys())
|
||
if unknown:
|
||
logger.warning(
|
||
"providers.%s: unknown config keys ignored: %s",
|
||
provider_key or "?", ", ".join(sorted(unknown)),
|
||
)
|
||
|
||
from urllib.parse import urlparse
|
||
|
||
base_url = ""
|
||
for url_key in ("base_url", "url", "api"):
|
||
raw_url = entry.get(url_key)
|
||
if isinstance(raw_url, str) and raw_url.strip():
|
||
candidate = raw_url.strip()
|
||
parsed = urlparse(candidate)
|
||
if parsed.scheme and parsed.netloc:
|
||
base_url = candidate
|
||
break
|
||
else:
|
||
logger.warning(
|
||
"providers.%s: '%s' value '%s' is not a valid URL "
|
||
"(no scheme or host) — skipped",
|
||
provider_key or "?", url_key, candidate,
|
||
)
|
||
if not base_url:
|
||
return None
|
||
|
||
name = ""
|
||
raw_name = entry.get("name")
|
||
if isinstance(raw_name, str) and raw_name.strip():
|
||
name = raw_name.strip()
|
||
elif provider_key.strip():
|
||
name = provider_key.strip()
|
||
if not name:
|
||
return None
|
||
|
||
normalized: Dict[str, Any] = {
|
||
"name": name,
|
||
"base_url": base_url,
|
||
}
|
||
|
||
provider_key = provider_key.strip()
|
||
if provider_key:
|
||
normalized["provider_key"] = provider_key
|
||
|
||
api_key = entry.get("api_key")
|
||
if isinstance(api_key, str) and api_key.strip():
|
||
normalized["api_key"] = api_key.strip()
|
||
|
||
key_env = entry.get("key_env")
|
||
if isinstance(key_env, str) and key_env.strip():
|
||
normalized["key_env"] = key_env.strip()
|
||
|
||
api_mode = entry.get("api_mode") or entry.get("transport")
|
||
if isinstance(api_mode, str) and api_mode.strip():
|
||
normalized["api_mode"] = api_mode.strip()
|
||
|
||
model_name = entry.get("model") or entry.get("default_model")
|
||
if isinstance(model_name, str) and model_name.strip():
|
||
normalized["model"] = model_name.strip()
|
||
|
||
models = entry.get("models")
|
||
if isinstance(models, dict) and models:
|
||
normalized["models"] = models
|
||
|
||
context_length = entry.get("context_length")
|
||
if isinstance(context_length, int) and context_length > 0:
|
||
normalized["context_length"] = context_length
|
||
|
||
rate_limit_delay = entry.get("rate_limit_delay")
|
||
if isinstance(rate_limit_delay, (int, float)) and rate_limit_delay >= 0:
|
||
normalized["rate_limit_delay"] = rate_limit_delay
|
||
|
||
return normalized
|
||
|
||
|
||
def providers_dict_to_custom_providers(providers_dict: Any) -> List[Dict[str, Any]]:
|
||
"""Normalize ``providers`` config entries into the legacy custom-provider shape."""
|
||
if not isinstance(providers_dict, dict):
|
||
return []
|
||
|
||
custom_providers: List[Dict[str, Any]] = []
|
||
for key, entry in providers_dict.items():
|
||
normalized = _normalize_custom_provider_entry(entry, provider_key=str(key))
|
||
if normalized is not None:
|
||
custom_providers.append(normalized)
|
||
|
||
return custom_providers
|
||
|
||
|
||
def get_compatible_custom_providers(
|
||
config: Optional[Dict[str, Any]] = None,
|
||
) -> List[Dict[str, Any]]:
|
||
"""Return a deduplicated custom-provider view across legacy and v12+ config.
|
||
|
||
``custom_providers`` remains the on-disk legacy format, while ``providers``
|
||
is the newer keyed schema. Runtime and picker flows still need a single
|
||
list-shaped view, but we should not materialise that compatibility layer
|
||
back into config.yaml because it duplicates entries in UIs.
|
||
"""
|
||
if config is None:
|
||
config = load_config()
|
||
|
||
compatible: List[Dict[str, Any]] = []
|
||
seen_provider_keys: set = set()
|
||
seen_name_url_pairs: set = set()
|
||
|
||
def _append_if_new(entry: Optional[Dict[str, Any]]) -> None:
|
||
if entry is None:
|
||
return
|
||
provider_key = str(entry.get("provider_key", "") or "").strip().lower()
|
||
name = str(entry.get("name", "") or "").strip().lower()
|
||
base_url = str(entry.get("base_url", "") or "").strip().rstrip("/").lower()
|
||
model = str(entry.get("model", "") or "").strip().lower()
|
||
pair = (name, base_url, model)
|
||
|
||
if provider_key and provider_key in seen_provider_keys:
|
||
return
|
||
if name and base_url and pair in seen_name_url_pairs:
|
||
return
|
||
|
||
compatible.append(entry)
|
||
if provider_key:
|
||
seen_provider_keys.add(provider_key)
|
||
if name and base_url:
|
||
seen_name_url_pairs.add(pair)
|
||
|
||
custom_providers = config.get("custom_providers")
|
||
if custom_providers is not None:
|
||
if not isinstance(custom_providers, list):
|
||
return []
|
||
for entry in custom_providers:
|
||
_append_if_new(_normalize_custom_provider_entry(entry))
|
||
|
||
for entry in providers_dict_to_custom_providers(config.get("providers")):
|
||
_append_if_new(entry)
|
||
|
||
return compatible
|
||
|
||
|
||
def check_config_version() -> Tuple[int, int]:
|
||
"""
|
||
Check config version.
|
||
|
||
Returns (current_version, latest_version).
|
||
"""
|
||
config = load_config()
|
||
current = int(config.get("_config_version", 0))
|
||
latest = int(DEFAULT_CONFIG.get("_config_version", 1))
|
||
return current, latest
|
||
|
||
|
||
# =============================================================================
|
||
# Config structure validation
|
||
# =============================================================================
|
||
|
||
# Fields that are valid at root level of config.yaml
|
||
_KNOWN_ROOT_KEYS = {
|
||
"_config_version", "model", "providers", "fallback_model",
|
||
"fallback_providers", "credential_pool_strategies", "hermes_agent.tools.toolsets",
|
||
"agent", "terminal", "display", "compression", "delegation",
|
||
"auxiliary", "custom_providers", "context", "memory", "gateway",
|
||
}
|
||
|
||
# Valid fields inside a custom_providers list entry
|
||
_VALID_CUSTOM_PROVIDER_FIELDS = {
|
||
"name", "base_url", "api_key", "api_mode", "model", "models",
|
||
"context_length", "rate_limit_delay",
|
||
}
|
||
|
||
# Fields that look like they should be inside custom_providers, not at root
|
||
_CUSTOM_PROVIDER_LIKE_FIELDS = {"base_url", "api_key", "rate_limit_delay", "api_mode"}
|
||
|
||
|
||
@dataclass
|
||
class ConfigIssue:
|
||
"""A detected config structure problem."""
|
||
|
||
severity: str # "error", "warning"
|
||
message: str
|
||
hint: str
|
||
|
||
|
||
def validate_config_structure(config: Optional[Dict[str, Any]] = None) -> List["ConfigIssue"]:
|
||
"""Validate config.yaml structure and return a list of detected issues.
|
||
|
||
Catches common YAML formatting mistakes that produce confusing runtime
|
||
errors (like "Unknown provider") instead of clear diagnostics.
|
||
|
||
Can be called with a pre-loaded config dict, or will load from disk.
|
||
"""
|
||
if config is None:
|
||
try:
|
||
config = load_config()
|
||
except Exception:
|
||
return [ConfigIssue("error", "Could not load config.yaml", "Run 'hermes setup' to create a valid config")]
|
||
|
||
issues: List[ConfigIssue] = []
|
||
|
||
# ── custom_providers must be a list, not a dict ──────────────────────
|
||
cp = config.get("custom_providers")
|
||
if cp is not None:
|
||
if isinstance(cp, dict):
|
||
issues.append(ConfigIssue(
|
||
"error",
|
||
"custom_providers is a dict — it must be a YAML list (items prefixed with '-')",
|
||
"Change to:\n"
|
||
" custom_providers:\n"
|
||
" - name: my-provider\n"
|
||
" base_url: https://...\n"
|
||
" api_key: ...",
|
||
))
|
||
# Check if dict keys look like they should be list-entry fields
|
||
cp_keys = set(cp.keys()) if isinstance(cp, dict) else set()
|
||
suspicious = cp_keys & _CUSTOM_PROVIDER_LIKE_FIELDS
|
||
if suspicious:
|
||
issues.append(ConfigIssue(
|
||
"warning",
|
||
f"Root-level keys {sorted(suspicious)} look like custom_providers entry fields",
|
||
"These should be indented under a '- name: ...' list entry, not at root level",
|
||
))
|
||
elif isinstance(cp, list):
|
||
# Validate each entry in the list
|
||
for i, entry in enumerate(cp):
|
||
if not isinstance(entry, dict):
|
||
issues.append(ConfigIssue(
|
||
"warning",
|
||
f"custom_providers[{i}] is not a dict (got {type(entry).__name__})",
|
||
"Each entry should have at minimum: name, base_url",
|
||
))
|
||
continue
|
||
if not entry.get("name"):
|
||
issues.append(ConfigIssue(
|
||
"warning",
|
||
f"custom_providers[{i}] is missing 'name' field",
|
||
"Add a name, e.g.: name: my-provider",
|
||
))
|
||
if not entry.get("base_url"):
|
||
issues.append(ConfigIssue(
|
||
"warning",
|
||
f"custom_providers[{i}] is missing 'base_url' field",
|
||
"Add the API endpoint URL, e.g.: base_url: https://api.example.com/v1",
|
||
))
|
||
|
||
# ── fallback_model must be a top-level dict with provider + model ────
|
||
fb = config.get("fallback_model")
|
||
if fb is not None:
|
||
if not isinstance(fb, dict):
|
||
issues.append(ConfigIssue(
|
||
"error",
|
||
f"fallback_model should be a dict with 'provider' and 'model', got {type(fb).__name__}",
|
||
"Change to:\n"
|
||
" fallback_model:\n"
|
||
" provider: openrouter\n"
|
||
" model: anthropic/claude-sonnet-4",
|
||
))
|
||
elif fb:
|
||
if not fb.get("provider"):
|
||
issues.append(ConfigIssue(
|
||
"warning",
|
||
"fallback_model is missing 'provider' field — fallback will be disabled",
|
||
"Add: provider: openrouter (or another provider)",
|
||
))
|
||
if not fb.get("model"):
|
||
issues.append(ConfigIssue(
|
||
"warning",
|
||
"fallback_model is missing 'model' field — fallback will be disabled",
|
||
"Add: model: anthropic/claude-sonnet-4 (or another model)",
|
||
))
|
||
|
||
# ── Check for fallback_model accidentally nested inside custom_providers ──
|
||
if isinstance(cp, dict) and "fallback_model" not in config and "fallback_model" in (cp or {}):
|
||
issues.append(ConfigIssue(
|
||
"error",
|
||
"fallback_model appears inside custom_providers instead of at root level",
|
||
"Move fallback_model to the top level of config.yaml (no indentation)",
|
||
))
|
||
|
||
# ── model section: should exist when custom_providers is configured ──
|
||
model_cfg = config.get("model")
|
||
if cp and not model_cfg:
|
||
issues.append(ConfigIssue(
|
||
"warning",
|
||
"custom_providers defined but no 'model' section — Hermes won't know which provider to use",
|
||
"Add a model section:\n"
|
||
" model:\n"
|
||
" provider: custom\n"
|
||
" default: your-model-name\n"
|
||
" base_url: https://...",
|
||
))
|
||
|
||
# ── Root-level keys that look misplaced ──────────────────────────────
|
||
for key in config:
|
||
if key.startswith("_"):
|
||
continue
|
||
if key not in _KNOWN_ROOT_KEYS and key in _CUSTOM_PROVIDER_LIKE_FIELDS:
|
||
issues.append(ConfigIssue(
|
||
"warning",
|
||
f"Root-level key '{key}' looks misplaced — should it be under 'model:' or inside a 'custom_providers' entry?",
|
||
f"Move '{key}' under the appropriate section",
|
||
))
|
||
|
||
return issues
|
||
|
||
|
||
def print_config_warnings(config: Optional[Dict[str, Any]] = None) -> None:
|
||
"""Print config structure warnings to stderr at startup.
|
||
|
||
Called early in CLI and gateway init so users see problems before
|
||
they hit cryptic "Unknown provider" errors. Prints nothing if
|
||
config is healthy.
|
||
"""
|
||
try:
|
||
issues = validate_config_structure(config)
|
||
except Exception:
|
||
return
|
||
if not issues:
|
||
return
|
||
|
||
lines = ["\033[33m⚠ Config issues detected in config.yaml:\033[0m"]
|
||
for ci in issues:
|
||
marker = "\033[31m✗\033[0m" if ci.severity == "error" else "\033[33m⚠\033[0m"
|
||
lines.append(f" {marker} {ci.message}")
|
||
lines.append(" \033[2mRun 'hermes doctor' for fix suggestions.\033[0m")
|
||
sys.stderr.write("\n".join(lines) + "\n\n")
|
||
|
||
|
||
def warn_deprecated_cwd_env_vars(config: Optional[Dict[str, Any]] = None) -> None:
|
||
"""Warn if MESSAGING_CWD or TERMINAL_CWD is set in .env instead of config.yaml.
|
||
|
||
These env vars are deprecated — the canonical setting is terminal.cwd
|
||
in config.yaml. Prints a migration hint to stderr.
|
||
"""
|
||
messaging_cwd = os.environ.get("MESSAGING_CWD")
|
||
terminal_cwd_env = os.environ.get("TERMINAL_CWD")
|
||
|
||
if config is None:
|
||
try:
|
||
config = load_config()
|
||
except Exception:
|
||
return
|
||
|
||
terminal_cfg = config.get("terminal", {})
|
||
config_cwd = terminal_cfg.get("cwd", ".") if isinstance(terminal_cfg, dict) else "."
|
||
# Only warn if config.yaml doesn't have an explicit path
|
||
config_has_explicit_cwd = config_cwd not in (".", "auto", "cwd", "")
|
||
|
||
lines: list[str] = []
|
||
if messaging_cwd:
|
||
lines.append(
|
||
f" \033[33m⚠\033[0m MESSAGING_CWD={messaging_cwd} found in .env — "
|
||
f"this is deprecated."
|
||
)
|
||
if terminal_cwd_env and not config_has_explicit_cwd:
|
||
# TERMINAL_CWD in env but not from config bridge — likely from .env
|
||
lines.append(
|
||
f" \033[33m⚠\033[0m TERMINAL_CWD={terminal_cwd_env} found in .env — "
|
||
f"this is deprecated."
|
||
)
|
||
if lines:
|
||
hint_path = os.environ.get("HERMES_HOME", "~/.hermes")
|
||
lines.insert(0, "\033[33m⚠ Deprecated .env settings detected:\033[0m")
|
||
lines.append(
|
||
f" \033[2mMove to config.yaml instead: "
|
||
f"terminal:\\n cwd: /your/project/path\033[0m"
|
||
)
|
||
lines.append(
|
||
f" \033[2mThen remove the old entries from {hint_path}/.env\033[0m"
|
||
)
|
||
sys.stderr.write("\n".join(lines) + "\n\n")
|
||
|
||
|
||
def migrate_config(interactive: bool = True, quiet: bool = False) -> Dict[str, Any]:
|
||
"""
|
||
Migrate config to latest version, prompting for new required fields.
|
||
|
||
Args:
|
||
interactive: If True, prompt user for missing values
|
||
quiet: If True, suppress output
|
||
|
||
Returns:
|
||
Dict with migration results: {"env_added": [...], "config_added": [...], "warnings": [...]}
|
||
"""
|
||
results = {"env_added": [], "config_added": [], "warnings": []}
|
||
|
||
# ── Always: sanitize .env (split concatenated keys) ──
|
||
try:
|
||
fixes = sanitize_env_file()
|
||
if fixes and not quiet:
|
||
print(f" ✓ Repaired .env file ({fixes} corrupted entries fixed)")
|
||
except Exception:
|
||
pass # best-effort; don't block migration on sanitize failure
|
||
|
||
# Check config version
|
||
current_ver, latest_ver = check_config_version()
|
||
|
||
# ── Version 3 → 4: migrate tool progress from .env to config.yaml ──
|
||
if current_ver < 4:
|
||
config = load_config()
|
||
display = config.get("display", {})
|
||
if not isinstance(display, dict):
|
||
display = {}
|
||
if "tool_progress" not in display:
|
||
old_enabled = get_env_value("HERMES_TOOL_PROGRESS")
|
||
old_mode = get_env_value("HERMES_TOOL_PROGRESS_MODE")
|
||
if old_enabled and old_enabled.lower() in ("false", "0", "no"):
|
||
display["tool_progress"] = "off"
|
||
results["config_added"].append("display.tool_progress=off (from HERMES_TOOL_PROGRESS=false)")
|
||
elif old_mode and old_mode.lower() in ("new", "all"):
|
||
display["tool_progress"] = old_mode.lower()
|
||
results["config_added"].append(f"display.tool_progress={old_mode.lower()} (from HERMES_TOOL_PROGRESS_MODE)")
|
||
else:
|
||
display["tool_progress"] = "all"
|
||
results["config_added"].append("display.tool_progress=all (default)")
|
||
config["display"] = display
|
||
save_config(config)
|
||
if not quiet:
|
||
print(f" ✓ Migrated tool progress to config.yaml: {display['tool_progress']}")
|
||
|
||
# ── Version 4 → 5: add timezone field ──
|
||
if current_ver < 5:
|
||
config = load_config()
|
||
if "timezone" not in config:
|
||
old_tz = os.getenv("HERMES_TIMEZONE", "")
|
||
if old_tz and old_tz.strip():
|
||
config["timezone"] = old_tz.strip()
|
||
results["config_added"].append(f"timezone={old_tz.strip()} (from HERMES_TIMEZONE)")
|
||
else:
|
||
config["timezone"] = ""
|
||
results["config_added"].append("timezone= (empty, uses server-local)")
|
||
save_config(config)
|
||
if not quiet:
|
||
tz_display = config["timezone"] or "(server-local)"
|
||
print(f" ✓ Added timezone to config.yaml: {tz_display}")
|
||
|
||
# ── Version 8 → 9: clear ANTHROPIC_TOKEN from .env ──
|
||
# The new Anthropic auth flow no longer uses this env var.
|
||
if current_ver < 9:
|
||
try:
|
||
old_token = get_env_value("ANTHROPIC_TOKEN")
|
||
if old_token:
|
||
save_env_value("ANTHROPIC_TOKEN", "")
|
||
if not quiet:
|
||
print(" ✓ Cleared ANTHROPIC_TOKEN from .env (no longer used)")
|
||
except Exception:
|
||
pass
|
||
|
||
# ── Version 11 → 12: migrate custom_providers list → providers dict ──
|
||
if current_ver < 12:
|
||
config = load_config()
|
||
custom_list = config.get("custom_providers")
|
||
if isinstance(custom_list, list) and custom_list:
|
||
providers_dict = config.get("providers", {})
|
||
if not isinstance(providers_dict, dict):
|
||
providers_dict = {}
|
||
migrated_count = 0
|
||
for entry in custom_list:
|
||
if not isinstance(entry, dict):
|
||
continue
|
||
old_name = entry.get("name", "")
|
||
old_url = entry.get("base_url", "") or entry.get("url", "") or ""
|
||
old_key = entry.get("api_key", "")
|
||
if not old_url:
|
||
continue # skip entries with no URL
|
||
|
||
# Generate a kebab-case key from the display name
|
||
key = old_name.strip().lower().replace(" ", "-").replace("(", "").replace(")", "")
|
||
# Remove consecutive hyphens and trailing hyphens
|
||
while "--" in key:
|
||
key = key.replace("--", "-")
|
||
key = key.strip("-")
|
||
if not key:
|
||
# Fallback: derive from URL hostname
|
||
try:
|
||
from urllib.parse import urlparse
|
||
parsed = urlparse(old_url)
|
||
key = (parsed.hostname or "endpoint").replace(".", "-")
|
||
except Exception:
|
||
key = f"endpoint-{migrated_count}"
|
||
|
||
# Don't overwrite existing entries
|
||
if key in providers_dict:
|
||
key = f"{key}-{migrated_count}"
|
||
|
||
new_entry = {"api": old_url}
|
||
if old_name:
|
||
new_entry["name"] = old_name
|
||
if old_key and old_key not in ("no-key", "no-key-required", ""):
|
||
new_entry["api_key"] = old_key
|
||
|
||
# Carry over model and api_mode if present
|
||
if entry.get("model"):
|
||
new_entry["default_model"] = entry["model"]
|
||
if entry.get("api_mode"):
|
||
new_entry["transport"] = entry["api_mode"]
|
||
|
||
providers_dict[key] = new_entry
|
||
migrated_count += 1
|
||
|
||
if migrated_count > 0:
|
||
config["providers"] = providers_dict
|
||
# Remove the old list — runtime reads via get_compatible_custom_providers()
|
||
config.pop("custom_providers", None)
|
||
save_config(config)
|
||
if not quiet:
|
||
print(f" ✓ Migrated {migrated_count} custom provider(s) to providers: section")
|
||
for key in list(providers_dict.keys())[-migrated_count:]:
|
||
ep = providers_dict[key]
|
||
print(f" → {key}: {ep.get('api', '')}")
|
||
|
||
# ── Version 12 → 13: clear dead LLM_MODEL / OPENAI_MODEL from .env ──
|
||
# These env vars were written by the old setup wizard but nothing reads
|
||
# them anymore (config.yaml is the sole source of truth since March 2026).
|
||
# Stale entries cause user confusion — see issue report.
|
||
if current_ver < 13:
|
||
for dead_var in ("LLM_MODEL", "OPENAI_MODEL"):
|
||
try:
|
||
old_val = get_env_value(dead_var)
|
||
if old_val:
|
||
save_env_value(dead_var, "")
|
||
if not quiet:
|
||
print(f" ✓ Cleared {dead_var} from .env (no longer used — config.yaml is source of truth)")
|
||
except Exception:
|
||
pass
|
||
|
||
# ── Version 13 → 14: migrate legacy flat stt.model to provider section ──
|
||
# Old configs (and cli-config.yaml.example) had a flat `stt.model` key
|
||
# that was provider-agnostic. When the provider was "local" this caused
|
||
# OpenAI model names (e.g. "whisper-1") to be fed to faster-whisper,
|
||
# crashing with "Invalid model size". Move the value into the correct
|
||
# provider-specific section and remove the flat key.
|
||
if current_ver < 14:
|
||
# Read raw config (no defaults merged) to check what the user actually
|
||
# wrote, then apply changes to the merged config for saving.
|
||
raw = read_raw_config()
|
||
raw_stt = raw.get("stt", {})
|
||
if isinstance(raw_stt, dict) and "model" in raw_stt:
|
||
legacy_model = raw_stt["model"]
|
||
provider = raw_stt.get("provider", "local")
|
||
config = load_config()
|
||
stt = config.get("stt", {})
|
||
# Remove the legacy flat key
|
||
stt.pop("model", None)
|
||
# Place it in the appropriate provider section only if the
|
||
# user didn't already set a model there
|
||
if provider in ("local", "local_command"):
|
||
# Don't migrate an OpenAI model name into the local section
|
||
_local_models = {
|
||
"tiny.en", "tiny", "base.en", "base", "small.en", "small",
|
||
"medium.en", "medium", "large-v1", "large-v2", "large-v3",
|
||
"large", "distil-large-v2", "distil-medium.en",
|
||
"distil-small.en", "distil-large-v3", "distil-large-v3.5",
|
||
"large-v3-turbo", "turbo",
|
||
}
|
||
if legacy_model in _local_models:
|
||
# Check raw config — only set if user didn't already
|
||
# have a nested local.model
|
||
raw_local = raw_stt.get("local", {})
|
||
if not isinstance(raw_local, dict) or "model" not in raw_local:
|
||
local_cfg = stt.setdefault("local", {})
|
||
local_cfg["model"] = legacy_model
|
||
# else: drop it — it was an OpenAI model name, local section
|
||
# already defaults to "base" via DEFAULT_CONFIG
|
||
else:
|
||
# Cloud provider — put it in that provider's section only
|
||
# if user didn't already set a nested model
|
||
raw_provider = raw_stt.get(provider, {})
|
||
if not isinstance(raw_provider, dict) or "model" not in raw_provider:
|
||
provider_cfg = stt.setdefault(provider, {})
|
||
provider_cfg["model"] = legacy_model
|
||
config["stt"] = stt
|
||
save_config(config)
|
||
if not quiet:
|
||
print(f" ✓ Migrated legacy stt.model to provider-specific config")
|
||
|
||
# ── Version 14 → 15: add explicit gateway interim-message gate ──
|
||
if current_ver < 15:
|
||
config = read_raw_config()
|
||
display = config.get("display", {})
|
||
if not isinstance(display, dict):
|
||
display = {}
|
||
if "interim_assistant_messages" not in display:
|
||
display["interim_assistant_messages"] = True
|
||
config["display"] = display
|
||
results["config_added"].append("display.interim_assistant_messages=true (default)")
|
||
save_config(config)
|
||
if not quiet:
|
||
print(" ✓ Added display.interim_assistant_messages=true")
|
||
|
||
# ── Version 15 → 16: migrate tool_progress_overrides into display.platforms ──
|
||
if current_ver < 16:
|
||
config = read_raw_config()
|
||
display = config.get("display", {})
|
||
if not isinstance(display, dict):
|
||
display = {}
|
||
old_overrides = display.get("tool_progress_overrides")
|
||
if isinstance(old_overrides, dict) and old_overrides:
|
||
platforms = display.get("platforms", {})
|
||
if not isinstance(platforms, dict):
|
||
platforms = {}
|
||
for plat, mode in old_overrides.items():
|
||
if plat not in platforms:
|
||
platforms[plat] = {}
|
||
if "tool_progress" not in platforms[plat]:
|
||
platforms[plat]["tool_progress"] = mode
|
||
display["platforms"] = platforms
|
||
config["display"] = display
|
||
save_config(config)
|
||
if not quiet:
|
||
migrated = ", ".join(f"{p}={m}" for p, m in old_overrides.items())
|
||
print(f" ✓ Migrated tool_progress_overrides → display.platforms: {migrated}")
|
||
results["config_added"].append("display.platforms (migrated from tool_progress_overrides)")
|
||
|
||
# ── Version 16 → 17: remove legacy compression.summary_* keys ──
|
||
if current_ver < 17:
|
||
config = read_raw_config()
|
||
comp = config.get("compression", {})
|
||
if isinstance(comp, dict):
|
||
s_model = comp.pop("summary_model", None)
|
||
s_provider = comp.pop("summary_provider", None)
|
||
s_base_url = comp.pop("summary_base_url", None)
|
||
migrated_keys = []
|
||
# Migrate non-empty, non-default values to auxiliary.compression
|
||
if s_model and str(s_model).strip():
|
||
aux = config.setdefault("auxiliary", {})
|
||
aux_comp = aux.setdefault("compression", {})
|
||
if not aux_comp.get("model"):
|
||
aux_comp["model"] = str(s_model).strip()
|
||
migrated_keys.append(f"model={s_model}")
|
||
if s_provider and str(s_provider).strip() not in ("", "auto"):
|
||
aux = config.setdefault("auxiliary", {})
|
||
aux_comp = aux.setdefault("compression", {})
|
||
if not aux_comp.get("provider") or aux_comp.get("provider") == "auto":
|
||
aux_comp["provider"] = str(s_provider).strip()
|
||
migrated_keys.append(f"provider={s_provider}")
|
||
if s_base_url and str(s_base_url).strip():
|
||
aux = config.setdefault("auxiliary", {})
|
||
aux_comp = aux.setdefault("compression", {})
|
||
if not aux_comp.get("base_url"):
|
||
aux_comp["base_url"] = str(s_base_url).strip()
|
||
migrated_keys.append(f"base_url={s_base_url}")
|
||
if migrated_keys or s_model is not None or s_provider is not None or s_base_url is not None:
|
||
config["compression"] = comp
|
||
save_config(config)
|
||
if not quiet:
|
||
if migrated_keys:
|
||
print(f" ✓ Migrated compression.summary_* → auxiliary.compression: {', '.join(migrated_keys)}")
|
||
else:
|
||
print(" ✓ Removed unused compression.summary_* keys")
|
||
|
||
# ── Version 20 → 21: plugins are now opt-in; grandfather existing user plugins ──
|
||
# The loader now requires plugins to appear in ``plugins.enabled`` before
|
||
# loading. Existing installs had all discovered plugins loading by default
|
||
# (minus anything in ``plugins.disabled``). To avoid silently breaking
|
||
# those setups on upgrade, populate ``plugins.enabled`` with the set of
|
||
# currently-installed user plugins that aren't already disabled.
|
||
#
|
||
# Bundled plugins (shipped in the repo itself) are NOT grandfathered —
|
||
# they ship off for everyone, including existing users, so any user who
|
||
# wants one has to opt in explicitly.
|
||
if current_ver < 21:
|
||
config = read_raw_config()
|
||
plugins_cfg = config.get("plugins")
|
||
if not isinstance(plugins_cfg, dict):
|
||
plugins_cfg = {}
|
||
# Only migrate if the enabled allow-list hasn't been set yet.
|
||
if "enabled" not in plugins_cfg:
|
||
disabled = plugins_cfg.get("disabled", []) or []
|
||
if not isinstance(disabled, list):
|
||
disabled = []
|
||
disabled_set = set(disabled)
|
||
|
||
# Scan ``$HERMES_HOME/plugins/`` for currently installed user plugins.
|
||
grandfathered: List[str] = []
|
||
try:
|
||
user_plugins_dir = get_hermes_home() / "plugins"
|
||
if user_plugins_dir.is_dir():
|
||
for child in sorted(user_plugins_dir.iterdir()):
|
||
if not child.is_dir():
|
||
continue
|
||
manifest_file = child / "plugin.yaml"
|
||
if not manifest_file.exists():
|
||
manifest_file = child / "plugin.yml"
|
||
if not manifest_file.exists():
|
||
continue
|
||
try:
|
||
with open(manifest_file) as _mf:
|
||
manifest = yaml.safe_load(_mf) or {}
|
||
except Exception:
|
||
manifest = {}
|
||
name = manifest.get("name") or child.name
|
||
if name in disabled_set:
|
||
continue
|
||
grandfathered.append(name)
|
||
except Exception:
|
||
grandfathered = []
|
||
|
||
plugins_cfg["enabled"] = grandfathered
|
||
config["plugins"] = plugins_cfg
|
||
save_config(config)
|
||
results["config_added"].append(
|
||
f"plugins.enabled (opt-in allow-list, {len(grandfathered)} grandfathered)"
|
||
)
|
||
if not quiet:
|
||
if grandfathered:
|
||
print(
|
||
f" ✓ Plugins now opt-in: grandfathered "
|
||
f"{len(grandfathered)} existing plugin(s) into plugins.enabled"
|
||
)
|
||
else:
|
||
print(
|
||
" ✓ Plugins now opt-in: no existing plugins to grandfather. "
|
||
"Use `hermes plugins enable <name>` to activate."
|
||
)
|
||
|
||
if current_ver < latest_ver and not quiet:
|
||
print(f"Config version: {current_ver} → {latest_ver}")
|
||
|
||
# Check for missing required env vars
|
||
missing_env = get_missing_env_vars(required_only=True)
|
||
|
||
if missing_env and not quiet:
|
||
print("\n⚠️ Missing required environment variables:")
|
||
for var in missing_env:
|
||
print(f" • {var['name']}: {var['description']}")
|
||
|
||
if interactive and missing_env:
|
||
print("\nLet's configure them now:\n")
|
||
for var in missing_env:
|
||
if var.get("url"):
|
||
print(f" Get your key at: {var['url']}")
|
||
|
||
if var.get("password"):
|
||
import getpass
|
||
value = getpass.getpass(f" {var['prompt']}: ")
|
||
else:
|
||
value = input(f" {var['prompt']}: ").strip()
|
||
|
||
if value:
|
||
save_env_value(var["name"], value)
|
||
results["env_added"].append(var["name"])
|
||
print(f" ✓ Saved {var['name']}")
|
||
else:
|
||
results["warnings"].append(f"Skipped {var['name']} - some features may not work")
|
||
print()
|
||
|
||
# Check for missing optional env vars and offer to configure interactively
|
||
# Skip "advanced" vars (like OPENAI_BASE_URL) -- those are for power users
|
||
missing_optional = get_missing_env_vars(required_only=False)
|
||
required_names = {v["name"] for v in missing_env} if missing_env else set()
|
||
missing_optional = [
|
||
v for v in missing_optional
|
||
if v["name"] not in required_names and not v.get("advanced")
|
||
]
|
||
|
||
# Only offer to configure env vars that are NEW since the user's previous version
|
||
new_var_names = set()
|
||
for ver in range(current_ver + 1, latest_ver + 1):
|
||
new_var_names.update(ENV_VARS_BY_VERSION.get(ver, []))
|
||
|
||
if new_var_names and interactive and not quiet:
|
||
new_and_unset = [
|
||
(name, OPTIONAL_ENV_VARS[name])
|
||
for name in sorted(new_var_names)
|
||
if not get_env_value(name) and name in OPTIONAL_ENV_VARS
|
||
]
|
||
if new_and_unset:
|
||
print(f"\n {len(new_and_unset)} new optional key(s) in this update:")
|
||
for name, info in new_and_unset:
|
||
print(f" • {name} — {info.get('description', '')}")
|
||
print()
|
||
try:
|
||
answer = input(" Configure new keys? [y/N]: ").strip().lower()
|
||
except (EOFError, KeyboardInterrupt):
|
||
answer = "n"
|
||
|
||
if answer in ("y", "yes"):
|
||
print()
|
||
for name, info in new_and_unset:
|
||
if info.get("url"):
|
||
print(f" {info.get('description', name)}")
|
||
print(f" Get your key at: {info['url']}")
|
||
else:
|
||
print(f" {info.get('description', name)}")
|
||
if info.get("password"):
|
||
import getpass
|
||
value = getpass.getpass(f" {info.get('prompt', name)} (Enter to skip): ")
|
||
else:
|
||
value = input(f" {info.get('prompt', name)} (Enter to skip): ").strip()
|
||
if value:
|
||
save_env_value(name, value)
|
||
results["env_added"].append(name)
|
||
print(f" ✓ Saved {name}")
|
||
print()
|
||
else:
|
||
print(" Set later with: hermes config set <key> <value>")
|
||
|
||
# Check for missing config fields
|
||
missing_config = get_missing_config_fields()
|
||
|
||
if missing_config:
|
||
config = load_config()
|
||
|
||
for field in missing_config:
|
||
key = field["key"]
|
||
default = field["default"]
|
||
|
||
_set_nested(config, key, default)
|
||
results["config_added"].append(key)
|
||
if not quiet:
|
||
print(f" ✓ Added {key} = {default}")
|
||
|
||
# Update version and save
|
||
config["_config_version"] = latest_ver
|
||
save_config(config)
|
||
elif current_ver < latest_ver:
|
||
# Just update version
|
||
config = load_config()
|
||
config["_config_version"] = latest_ver
|
||
save_config(config)
|
||
|
||
# ── Skill-declared config vars ──────────────────────────────────────
|
||
# Skills can declare config.yaml settings they need via
|
||
# metadata.hermes.config in their SKILL.md frontmatter.
|
||
# Prompt for any that are missing/empty.
|
||
missing_skill_config = get_missing_skill_config_vars()
|
||
if missing_skill_config and interactive and not quiet:
|
||
print(f"\n {len(missing_skill_config)} skill setting(s) not configured:")
|
||
for var in missing_skill_config:
|
||
skill_name = var.get("skill", "unknown")
|
||
print(f" • {var['key']} — {var['description']} (from skill: {skill_name})")
|
||
print()
|
||
try:
|
||
answer = input(" Configure skill settings? [y/N]: ").strip().lower()
|
||
except (EOFError, KeyboardInterrupt):
|
||
answer = "n"
|
||
|
||
if answer in ("y", "yes"):
|
||
print()
|
||
config = load_config()
|
||
try:
|
||
from hermes_agent.agent.skill_utils import SKILL_CONFIG_PREFIX
|
||
except Exception:
|
||
SKILL_CONFIG_PREFIX = "skills.config"
|
||
for var in missing_skill_config:
|
||
default = var.get("default", "")
|
||
default_hint = f" (default: {default})" if default else ""
|
||
value = input(f" {var['prompt']}{default_hint}: ").strip()
|
||
if not value and default:
|
||
value = str(default)
|
||
if value:
|
||
storage_key = f"{SKILL_CONFIG_PREFIX}.{var['key']}"
|
||
_set_nested(config, storage_key, value)
|
||
results["config_added"].append(var["key"])
|
||
print(f" ✓ Saved {var['key']} = {value}")
|
||
else:
|
||
results["warnings"].append(
|
||
f"Skipped {var['key']} — skill '{var.get('skill', '?')}' may ask for it later"
|
||
)
|
||
print()
|
||
save_config(config)
|
||
else:
|
||
print(" Set later with: hermes config set <key> <value>")
|
||
|
||
return results
|
||
|
||
|
||
def _deep_merge(base: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
|
||
"""Recursively merge *override* into *base*, preserving nested defaults.
|
||
|
||
Keys in *override* take precedence. If both values are dicts the merge
|
||
recurses, so a user who overrides only ``tts.elevenlabs.voice_id`` will
|
||
keep the default ``tts.elevenlabs.model_id`` intact.
|
||
"""
|
||
result = base.copy()
|
||
for key, value in override.items():
|
||
if (
|
||
key in result
|
||
and isinstance(result[key], dict)
|
||
and isinstance(value, dict)
|
||
):
|
||
result[key] = _deep_merge(result[key], value)
|
||
else:
|
||
result[key] = value
|
||
return result
|
||
|
||
|
||
def _expand_env_vars(obj):
|
||
"""Recursively expand ``${VAR}`` references in config values.
|
||
|
||
Only string values are processed; dict keys, numbers, booleans, and
|
||
None are left untouched. Unresolved references (variable not in
|
||
``os.environ``) are kept verbatim so callers can detect them.
|
||
"""
|
||
if isinstance(obj, str):
|
||
return re.sub(
|
||
r"\${([^}]+)}",
|
||
lambda m: os.environ.get(m.group(1), m.group(0)),
|
||
obj,
|
||
)
|
||
if isinstance(obj, dict):
|
||
return {k: _expand_env_vars(v) for k, v in obj.items()}
|
||
if isinstance(obj, list):
|
||
return [_expand_env_vars(item) for item in obj]
|
||
return obj
|
||
|
||
|
||
def _items_by_unique_name(items):
|
||
"""Return a name-indexed dict only when all items have unique string names."""
|
||
if not isinstance(items, list):
|
||
return None
|
||
indexed = {}
|
||
for item in items:
|
||
if not isinstance(item, dict) or not isinstance(item.get("name"), str):
|
||
return None
|
||
name = item["name"]
|
||
if name in indexed:
|
||
return None
|
||
indexed[name] = item
|
||
return indexed
|
||
|
||
|
||
def _preserve_env_ref_templates(current, raw, loaded_expanded=None):
|
||
"""Restore raw ``${VAR}`` templates when a value is otherwise unchanged.
|
||
|
||
``load_config()`` expands env refs for runtime use. When a caller later
|
||
persists that config after modifying some unrelated setting, keep the
|
||
original on-disk template instead of writing the expanded plaintext
|
||
secret back to ``config.yaml``.
|
||
|
||
Prefer preserving the raw template when ``current`` still matches either
|
||
the value previously returned by ``load_config()`` for this config path or
|
||
the current environment expansion of ``raw``. This handles env-var
|
||
rotation between load and save while still treating mixed literal/template
|
||
string edits as caller-owned once their rendered value diverges.
|
||
"""
|
||
if isinstance(current, str) and isinstance(raw, str) and re.search(r"\${[^}]+}", raw):
|
||
if current == raw:
|
||
return raw
|
||
if isinstance(loaded_expanded, str) and current == loaded_expanded:
|
||
return raw
|
||
if _expand_env_vars(raw) == current:
|
||
return raw
|
||
return current
|
||
|
||
if isinstance(current, dict) and isinstance(raw, dict):
|
||
return {
|
||
key: _preserve_env_ref_templates(
|
||
value,
|
||
raw.get(key),
|
||
loaded_expanded.get(key) if isinstance(loaded_expanded, dict) else None,
|
||
)
|
||
for key, value in current.items()
|
||
}
|
||
|
||
if isinstance(current, list) and isinstance(raw, list):
|
||
# Prefer matching named config objects (e.g. custom_providers) by name
|
||
# so harmless reordering doesn't drop the original template. If names
|
||
# are duplicated, fall back to positional matching instead of silently
|
||
# shadowing one entry.
|
||
current_by_name = _items_by_unique_name(current)
|
||
raw_by_name = _items_by_unique_name(raw)
|
||
loaded_by_name = _items_by_unique_name(loaded_expanded)
|
||
if current_by_name is not None and raw_by_name is not None:
|
||
return [
|
||
_preserve_env_ref_templates(
|
||
item,
|
||
raw_by_name.get(item.get("name")),
|
||
loaded_by_name.get(item.get("name")) if loaded_by_name is not None else None,
|
||
)
|
||
for item in current
|
||
]
|
||
return [
|
||
_preserve_env_ref_templates(
|
||
item,
|
||
raw[index] if index < len(raw) else None,
|
||
loaded_expanded[index]
|
||
if isinstance(loaded_expanded, list) and index < len(loaded_expanded)
|
||
else None,
|
||
)
|
||
for index, item in enumerate(current)
|
||
]
|
||
|
||
return current
|
||
|
||
|
||
def _normalize_root_model_keys(config: Dict[str, Any]) -> Dict[str, Any]:
|
||
"""Move stale root-level provider/base_url into model section.
|
||
|
||
Some users (or older code) placed ``provider:`` and ``base_url:`` at the
|
||
config root instead of inside ``model:``. These root-level keys are only
|
||
used as a fallback when the corresponding ``model.*`` key is empty — they
|
||
never override an existing ``model.provider`` or ``model.base_url``.
|
||
After migration the root-level keys are removed so they can't cause
|
||
confusion on subsequent loads.
|
||
"""
|
||
# Only act if there are root-level keys to migrate
|
||
has_root = any(config.get(k) for k in ("provider", "base_url"))
|
||
if not has_root:
|
||
return config
|
||
|
||
config = dict(config)
|
||
model = config.get("model")
|
||
if not isinstance(model, dict):
|
||
model = {"default": model} if model else {}
|
||
config["model"] = model
|
||
|
||
for key in ("provider", "base_url"):
|
||
root_val = config.get(key)
|
||
if root_val and not model.get(key):
|
||
model[key] = root_val
|
||
config.pop(key, None)
|
||
|
||
return config
|
||
|
||
|
||
def _normalize_max_turns_config(config: Dict[str, Any]) -> Dict[str, Any]:
|
||
"""Normalize legacy root-level max_turns into agent.max_turns."""
|
||
config = dict(config)
|
||
agent_config = dict(config.get("agent") or {})
|
||
|
||
if "max_turns" in config and "max_turns" not in agent_config:
|
||
agent_config["max_turns"] = config["max_turns"]
|
||
|
||
if "max_turns" not in agent_config:
|
||
agent_config["max_turns"] = DEFAULT_CONFIG["agent"]["max_turns"]
|
||
|
||
config["agent"] = agent_config
|
||
config.pop("max_turns", None)
|
||
return config
|
||
|
||
|
||
|
||
def read_raw_config() -> Dict[str, Any]:
|
||
"""Read ~/.hermes/config.yaml as-is, without merging defaults or migrating.
|
||
|
||
Returns the raw YAML dict, or ``{}`` if the file doesn't exist or can't
|
||
be parsed. Use this for lightweight config reads where you just need a
|
||
single value and don't want the overhead of ``load_config()``'s deep-merge
|
||
+ migration pipeline.
|
||
"""
|
||
try:
|
||
config_path = get_config_path()
|
||
if config_path.exists():
|
||
with open(config_path, encoding="utf-8") as f:
|
||
return yaml.safe_load(f) or {}
|
||
except Exception:
|
||
pass
|
||
return {}
|
||
|
||
|
||
def load_config() -> Dict[str, Any]:
|
||
"""Load configuration from ~/.hermes/config.yaml."""
|
||
ensure_hermes_home()
|
||
config_path = get_config_path()
|
||
|
||
config: Dict[str, Any] = copy.deepcopy(DEFAULT_CONFIG)
|
||
|
||
if config_path.exists():
|
||
try:
|
||
with open(config_path, encoding="utf-8") as f:
|
||
user_config = yaml.safe_load(f) or {}
|
||
|
||
if "max_turns" in user_config:
|
||
agent_user_config = dict(user_config.get("agent") or {})
|
||
if agent_user_config.get("max_turns") is None:
|
||
agent_user_config["max_turns"] = user_config["max_turns"]
|
||
user_config["agent"] = agent_user_config
|
||
user_config.pop("max_turns", None)
|
||
|
||
config = _deep_merge(config, user_config)
|
||
except Exception as e:
|
||
print(f"Warning: Failed to load config: {e}")
|
||
|
||
normalized = _normalize_root_model_keys(_normalize_max_turns_config(config))
|
||
expanded = _expand_env_vars(normalized)
|
||
_LAST_EXPANDED_CONFIG_BY_PATH[str(config_path)] = copy.deepcopy(expanded)
|
||
return expanded
|
||
|
||
|
||
_SECURITY_COMMENT = """
|
||
# ── Security ──────────────────────────────────────────────────────────
|
||
# API keys, tokens, and passwords are redacted from tool output by default.
|
||
# Set to false to see full values (useful for debugging auth issues).
|
||
# tirith pre-exec scanning is enabled by default when the tirith binary
|
||
# is available. Configure via security.tirith_* keys or env vars
|
||
# (TIRITH_ENABLED, TIRITH_BIN, TIRITH_TIMEOUT, TIRITH_FAIL_OPEN).
|
||
#
|
||
# security:
|
||
# redact_secrets: false
|
||
# tirith_enabled: true
|
||
# tirith_path: "tirith"
|
||
# tirith_timeout: 5
|
||
# tirith_fail_open: true
|
||
"""
|
||
|
||
_FALLBACK_COMMENT = """
|
||
# ── Fallback Model ────────────────────────────────────────────────────
|
||
# Automatic provider failover when primary is unavailable.
|
||
# Uncomment and configure to enable. Triggers on rate limits (429),
|
||
# overload (529), service errors (503), or connection failures.
|
||
#
|
||
# Supported providers:
|
||
# openrouter (OPENROUTER_API_KEY) — routes to any model
|
||
# openai-codex (OAuth — hermes auth) — OpenAI Codex
|
||
# nous (OAuth — hermes auth) — Nous Portal
|
||
# zai (ZAI_API_KEY) — Z.AI / GLM
|
||
# kimi-coding (KIMI_API_KEY) — Kimi / Moonshot
|
||
# kimi-coding-cn (KIMI_CN_API_KEY) — Kimi / Moonshot (China)
|
||
# minimax (MINIMAX_API_KEY) — MiniMax
|
||
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
|
||
#
|
||
# For custom OpenAI-compatible endpoints, add base_url and key_env.
|
||
#
|
||
# fallback_model:
|
||
# provider: openrouter
|
||
# model: anthropic/claude-sonnet-4
|
||
"""
|
||
|
||
|
||
_COMMENTED_SECTIONS = """
|
||
# ── Security ──────────────────────────────────────────────────────────
|
||
# API keys, tokens, and passwords are redacted from tool output by default.
|
||
# Set to false to see full values (useful for debugging auth issues).
|
||
#
|
||
# security:
|
||
# redact_secrets: false
|
||
|
||
# ── Fallback Model ────────────────────────────────────────────────────
|
||
# Automatic provider failover when primary is unavailable.
|
||
# Uncomment and configure to enable. Triggers on rate limits (429),
|
||
# overload (529), service errors (503), or connection failures.
|
||
#
|
||
# Supported providers:
|
||
# openrouter (OPENROUTER_API_KEY) — routes to any model
|
||
# openai-codex (OAuth — hermes auth) — OpenAI Codex
|
||
# nous (OAuth — hermes auth) — Nous Portal
|
||
# zai (ZAI_API_KEY) — Z.AI / GLM
|
||
# kimi-coding (KIMI_API_KEY) — Kimi / Moonshot
|
||
# kimi-coding-cn (KIMI_CN_API_KEY) — Kimi / Moonshot (China)
|
||
# minimax (MINIMAX_API_KEY) — MiniMax
|
||
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
|
||
#
|
||
# For custom OpenAI-compatible endpoints, add base_url and key_env.
|
||
#
|
||
# fallback_model:
|
||
# provider: openrouter
|
||
# model: anthropic/claude-sonnet-4
|
||
"""
|
||
|
||
|
||
def save_config(config: Dict[str, Any]):
|
||
"""Save configuration to ~/.hermes/config.yaml."""
|
||
if is_managed():
|
||
managed_error("save configuration")
|
||
return
|
||
from hermes_agent.utils import atomic_yaml_write
|
||
|
||
ensure_hermes_home()
|
||
config_path = get_config_path()
|
||
current_normalized = _normalize_root_model_keys(_normalize_max_turns_config(config))
|
||
normalized = current_normalized
|
||
raw_existing = _normalize_root_model_keys(_normalize_max_turns_config(read_raw_config()))
|
||
if raw_existing:
|
||
normalized = _preserve_env_ref_templates(
|
||
normalized,
|
||
raw_existing,
|
||
_LAST_EXPANDED_CONFIG_BY_PATH.get(str(config_path)),
|
||
)
|
||
|
||
# Build optional commented-out sections for features that are off by
|
||
# default or only relevant when explicitly configured.
|
||
parts = []
|
||
sec = normalized.get("security", {})
|
||
if not sec or sec.get("redact_secrets") is None:
|
||
parts.append(_SECURITY_COMMENT)
|
||
fb = normalized.get("fallback_model", {})
|
||
if not fb or not (fb.get("provider") and fb.get("model")):
|
||
parts.append(_FALLBACK_COMMENT)
|
||
|
||
atomic_yaml_write(
|
||
config_path,
|
||
normalized,
|
||
extra_content="".join(parts) if parts else None,
|
||
)
|
||
_secure_file(config_path)
|
||
_LAST_EXPANDED_CONFIG_BY_PATH[str(config_path)] = copy.deepcopy(current_normalized)
|
||
|
||
|
||
def load_env() -> Dict[str, str]:
|
||
"""Load environment variables from ~/.hermes/.env.
|
||
|
||
Sanitizes lines before parsing so that corrupted files (e.g.
|
||
concatenated KEY=VALUE pairs on a single line) are handled
|
||
gracefully instead of producing mangled values such as duplicated
|
||
bot tokens. See #8908.
|
||
"""
|
||
env_path = get_env_path()
|
||
env_vars = {}
|
||
|
||
if env_path.exists():
|
||
# On Windows, open() defaults to the system locale (cp1252) which can
|
||
# fail on UTF-8 .env files. Use explicit UTF-8 only on Windows.
|
||
open_kw = {"encoding": "utf-8", "errors": "replace"} if _IS_WINDOWS else {}
|
||
with open(env_path, **open_kw) as f:
|
||
raw_lines = f.readlines()
|
||
# Sanitize before parsing: split concatenated lines & drop stale
|
||
# placeholders so corrupted .env files don't produce invalid tokens.
|
||
lines = _sanitize_env_lines(raw_lines)
|
||
for line in lines:
|
||
line = line.strip()
|
||
if line and not line.startswith('#') and '=' in line:
|
||
key, _, value = line.partition('=')
|
||
env_vars[key.strip()] = value.strip().strip('"\'')
|
||
|
||
return env_vars
|
||
|
||
|
||
def _sanitize_env_lines(lines: list) -> list:
|
||
"""Fix corrupted .env lines before reading or writing.
|
||
|
||
Handles two known corruption patterns:
|
||
1. Concatenated KEY=VALUE pairs on a single line (missing newline between
|
||
entries, e.g. ``ANTHROPIC_API_KEY=sk-...OPENAI_BASE_URL=https://...``).
|
||
2. Stale ``KEY=***`` placeholder entries left by incomplete setup runs.
|
||
|
||
Uses a known-keys set (OPTIONAL_ENV_VARS + _EXTRA_ENV_KEYS) so we only
|
||
split on real Hermes env var names, avoiding false positives from values
|
||
that happen to contain uppercase text with ``=``.
|
||
"""
|
||
# Build the known keys set lazily from OPTIONAL_ENV_VARS + extras.
|
||
# Done inside the function so OPTIONAL_ENV_VARS is guaranteed to be defined.
|
||
known_keys = set(OPTIONAL_ENV_VARS.keys()) | _EXTRA_ENV_KEYS
|
||
|
||
sanitized: list[str] = []
|
||
for line in lines:
|
||
raw = line.rstrip("\r\n")
|
||
stripped = raw.strip()
|
||
|
||
# Preserve blank lines and comments
|
||
if not stripped or stripped.startswith("#"):
|
||
sanitized.append(raw + "\n")
|
||
continue
|
||
|
||
# Detect concatenated KEY=VALUE pairs on one line.
|
||
# Search for known KEY= patterns at any position in the line.
|
||
split_positions = []
|
||
for key_name in known_keys:
|
||
needle = key_name + "="
|
||
idx = stripped.find(needle)
|
||
while idx >= 0:
|
||
split_positions.append(idx)
|
||
idx = stripped.find(needle, idx + len(needle))
|
||
|
||
if len(split_positions) > 1:
|
||
split_positions.sort()
|
||
# Deduplicate (shouldn't happen, but be safe)
|
||
split_positions = sorted(set(split_positions))
|
||
for i, pos in enumerate(split_positions):
|
||
end = split_positions[i + 1] if i + 1 < len(split_positions) else len(stripped)
|
||
part = stripped[pos:end].strip()
|
||
if part:
|
||
sanitized.append(part + "\n")
|
||
else:
|
||
sanitized.append(stripped + "\n")
|
||
|
||
return sanitized
|
||
|
||
|
||
def sanitize_env_file() -> int:
|
||
"""Read, sanitize, and rewrite ~/.hermes/.env in place.
|
||
|
||
Returns the number of lines that were fixed (concatenation splits +
|
||
placeholder removals). Returns 0 when no changes are needed.
|
||
"""
|
||
env_path = get_env_path()
|
||
if not env_path.exists():
|
||
return 0
|
||
|
||
read_kw = {"encoding": "utf-8", "errors": "replace"} if _IS_WINDOWS else {}
|
||
write_kw = {"encoding": "utf-8"} if _IS_WINDOWS else {}
|
||
|
||
with open(env_path, **read_kw) as f:
|
||
original_lines = f.readlines()
|
||
|
||
sanitized = _sanitize_env_lines(original_lines)
|
||
|
||
if sanitized == original_lines:
|
||
return 0
|
||
|
||
# Count fixes: difference in line count (from splits) + removed lines
|
||
fixes = abs(len(sanitized) - len(original_lines))
|
||
if fixes == 0:
|
||
# Lines changed content (e.g. *** removal) even if count is same
|
||
fixes = sum(1 for a, b in zip(original_lines, sanitized) if a != b)
|
||
fixes += abs(len(sanitized) - len(original_lines))
|
||
|
||
fd, tmp_path = tempfile.mkstemp(dir=str(env_path.parent), suffix=".tmp", prefix=".env_")
|
||
try:
|
||
with os.fdopen(fd, "w", **write_kw) as f:
|
||
f.writelines(sanitized)
|
||
f.flush()
|
||
os.fsync(f.fileno())
|
||
os.replace(tmp_path, env_path)
|
||
except BaseException:
|
||
try:
|
||
os.unlink(tmp_path)
|
||
except OSError:
|
||
pass
|
||
raise
|
||
_secure_file(env_path)
|
||
return fixes
|
||
|
||
|
||
def _check_non_ascii_credential(key: str, value: str) -> str:
|
||
"""Warn and strip non-ASCII characters from credential values.
|
||
|
||
API keys and tokens must be pure ASCII — they are sent as HTTP header
|
||
values which httpx/httpcore encode as ASCII. Non-ASCII characters
|
||
(commonly introduced by copy-pasting from rich-text editors or PDFs
|
||
that substitute lookalike Unicode glyphs for ASCII letters) cause
|
||
``UnicodeEncodeError: 'ascii' codec can't encode character`` at
|
||
request time.
|
||
|
||
Returns the sanitized (ASCII-only) value. Prints a warning if any
|
||
non-ASCII characters were found and removed.
|
||
"""
|
||
try:
|
||
value.encode("ascii")
|
||
return value # all ASCII — nothing to do
|
||
except UnicodeEncodeError:
|
||
pass
|
||
|
||
# Build a readable list of the offending characters
|
||
bad_chars: list[str] = []
|
||
for i, ch in enumerate(value):
|
||
if ord(ch) > 127:
|
||
bad_chars.append(f" position {i}: {ch!r} (U+{ord(ch):04X})")
|
||
sanitized = value.encode("ascii", errors="ignore").decode("ascii")
|
||
|
||
print(
|
||
f"\n Warning: {key} contains non-ASCII characters that will break API requests.\n"
|
||
f" This usually happens when copy-pasting from a PDF, rich-text editor,\n"
|
||
f" or web page that substitutes lookalike Unicode glyphs for ASCII letters.\n"
|
||
f"\n"
|
||
+ "\n".join(f" {line}" for line in bad_chars[:5])
|
||
+ ("\n ... and more" if len(bad_chars) > 5 else "")
|
||
+ f"\n\n The non-ASCII characters have been stripped automatically.\n"
|
||
f" If authentication fails, re-copy the key from the provider's dashboard.\n",
|
||
file=sys.stderr,
|
||
)
|
||
return sanitized
|
||
|
||
|
||
def save_env_value(key: str, value: str):
|
||
"""Save or update a value in ~/.hermes/.env."""
|
||
if is_managed():
|
||
managed_error(f"set {key}")
|
||
return
|
||
if not _ENV_VAR_NAME_RE.match(key):
|
||
raise ValueError(f"Invalid environment variable name: {key!r}")
|
||
value = value.replace("\n", "").replace("\r", "")
|
||
# API keys / tokens must be ASCII — strip non-ASCII with a warning.
|
||
value = _check_non_ascii_credential(key, value)
|
||
ensure_hermes_home()
|
||
env_path = get_env_path()
|
||
|
||
# On Windows, open() defaults to the system locale (cp1252) which can
|
||
# cause OSError errno 22 on UTF-8 .env files.
|
||
read_kw = {"encoding": "utf-8", "errors": "replace"} if _IS_WINDOWS else {}
|
||
write_kw = {"encoding": "utf-8"} if _IS_WINDOWS else {}
|
||
|
||
lines = []
|
||
if env_path.exists():
|
||
with open(env_path, **read_kw) as f:
|
||
lines = f.readlines()
|
||
# Sanitize on every read: split concatenated keys, drop stale placeholders
|
||
lines = _sanitize_env_lines(lines)
|
||
|
||
# Find and update or append
|
||
found = False
|
||
for i, line in enumerate(lines):
|
||
if line.strip().startswith(f"{key}="):
|
||
lines[i] = f"{key}={value}\n"
|
||
found = True
|
||
break
|
||
|
||
if not found:
|
||
# Ensure there's a newline at the end of the file before appending
|
||
if lines and not lines[-1].endswith("\n"):
|
||
lines[-1] += "\n"
|
||
lines.append(f"{key}={value}\n")
|
||
|
||
fd, tmp_path = tempfile.mkstemp(dir=str(env_path.parent), suffix='.tmp', prefix='.env_')
|
||
# Preserve original permissions so Docker volume mounts aren't clobbered.
|
||
original_mode = None
|
||
if env_path.exists():
|
||
try:
|
||
original_mode = stat.S_IMODE(env_path.stat().st_mode)
|
||
except OSError:
|
||
pass
|
||
try:
|
||
with os.fdopen(fd, 'w', **write_kw) as f:
|
||
f.writelines(lines)
|
||
f.flush()
|
||
os.fsync(f.fileno())
|
||
os.replace(tmp_path, env_path)
|
||
# Restore original permissions before _secure_file may tighten them.
|
||
if original_mode is not None:
|
||
try:
|
||
os.chmod(env_path, original_mode)
|
||
except OSError:
|
||
pass
|
||
except BaseException:
|
||
try:
|
||
os.unlink(tmp_path)
|
||
except OSError:
|
||
pass
|
||
raise
|
||
_secure_file(env_path)
|
||
|
||
os.environ[key] = value
|
||
|
||
|
||
def remove_env_value(key: str) -> bool:
|
||
"""Remove a key from ~/.hermes/.env and os.environ.
|
||
|
||
Returns True if the key was found and removed, False otherwise.
|
||
"""
|
||
if is_managed():
|
||
managed_error(f"remove {key}")
|
||
return False
|
||
if not _ENV_VAR_NAME_RE.match(key):
|
||
raise ValueError(f"Invalid environment variable name: {key!r}")
|
||
env_path = get_env_path()
|
||
if not env_path.exists():
|
||
os.environ.pop(key, None)
|
||
return False
|
||
|
||
read_kw = {"encoding": "utf-8", "errors": "replace"} if _IS_WINDOWS else {}
|
||
write_kw = {"encoding": "utf-8"} if _IS_WINDOWS else {}
|
||
|
||
with open(env_path, **read_kw) as f:
|
||
lines = f.readlines()
|
||
lines = _sanitize_env_lines(lines)
|
||
|
||
new_lines = [line for line in lines if not line.strip().startswith(f"{key}=")]
|
||
found = len(new_lines) < len(lines)
|
||
|
||
if found:
|
||
fd, tmp_path = tempfile.mkstemp(dir=str(env_path.parent), suffix='.tmp', prefix='.env_')
|
||
# Preserve original permissions so Docker volume mounts aren't clobbered.
|
||
original_mode = None
|
||
try:
|
||
original_mode = stat.S_IMODE(env_path.stat().st_mode)
|
||
except OSError:
|
||
pass
|
||
try:
|
||
with os.fdopen(fd, 'w', **write_kw) as f:
|
||
f.writelines(new_lines)
|
||
f.flush()
|
||
os.fsync(f.fileno())
|
||
os.replace(tmp_path, env_path)
|
||
if original_mode is not None:
|
||
try:
|
||
os.chmod(env_path, original_mode)
|
||
except OSError:
|
||
pass
|
||
except BaseException:
|
||
try:
|
||
os.unlink(tmp_path)
|
||
except OSError:
|
||
pass
|
||
raise
|
||
_secure_file(env_path)
|
||
|
||
os.environ.pop(key, None)
|
||
return found
|
||
|
||
|
||
def save_anthropic_oauth_token(value: str, save_fn=None):
|
||
"""Persist an Anthropic OAuth/setup token and clear the API-key slot."""
|
||
writer = save_fn or save_env_value
|
||
writer("ANTHROPIC_TOKEN", value)
|
||
writer("ANTHROPIC_API_KEY", "")
|
||
|
||
|
||
def use_anthropic_claude_code_credentials(save_fn=None):
|
||
"""Use Claude Code's own credential files instead of persisting env tokens."""
|
||
writer = save_fn or save_env_value
|
||
writer("ANTHROPIC_TOKEN", "")
|
||
writer("ANTHROPIC_API_KEY", "")
|
||
|
||
|
||
def save_anthropic_api_key(value: str, save_fn=None):
|
||
"""Persist an Anthropic API key and clear the OAuth/setup-token slot."""
|
||
writer = save_fn or save_env_value
|
||
writer("ANTHROPIC_API_KEY", value)
|
||
writer("ANTHROPIC_TOKEN", "")
|
||
|
||
|
||
def save_env_value_secure(key: str, value: str) -> Dict[str, Any]:
|
||
save_env_value(key, value)
|
||
return {
|
||
"success": True,
|
||
"stored_as": key,
|
||
"validated": False,
|
||
}
|
||
|
||
|
||
|
||
def reload_env() -> int:
|
||
"""Re-read ~/.hermes/.env into os.environ. Returns count of vars updated.
|
||
|
||
Adds/updates vars that changed and removes vars that were deleted from
|
||
the .env file (but only vars known to Hermes — OPTIONAL_ENV_VARS and
|
||
_EXTRA_ENV_KEYS — to avoid clobbering unrelated environment).
|
||
"""
|
||
env_vars = load_env()
|
||
known_keys = set(OPTIONAL_ENV_VARS.keys()) | _EXTRA_ENV_KEYS
|
||
count = 0
|
||
for key, value in env_vars.items():
|
||
if os.environ.get(key) != value:
|
||
os.environ[key] = value
|
||
count += 1
|
||
# Remove known Hermes vars that are no longer in .env
|
||
for key in known_keys:
|
||
if key not in env_vars and key in os.environ:
|
||
del os.environ[key]
|
||
count += 1
|
||
return count
|
||
|
||
|
||
def get_env_value(key: str) -> Optional[str]:
|
||
"""Get a value from ~/.hermes/.env or environment."""
|
||
# Check environment first
|
||
if key in os.environ:
|
||
return os.environ[key]
|
||
|
||
# Then check .env file
|
||
env_vars = load_env()
|
||
return env_vars.get(key)
|
||
|
||
|
||
# =============================================================================
|
||
# Config display
|
||
# =============================================================================
|
||
|
||
def redact_key(key: str) -> str:
|
||
"""Redact an API key for display."""
|
||
if not key:
|
||
return color("(not set)", Colors.DIM)
|
||
if len(key) < 12:
|
||
return "***"
|
||
return key[:4] + "..." + key[-4:]
|
||
|
||
|
||
def show_config():
|
||
"""Display current configuration."""
|
||
config = load_config()
|
||
|
||
print()
|
||
print(color("┌─────────────────────────────────────────────────────────┐", Colors.CYAN))
|
||
print(color("│ ⚕ Hermes Configuration │", Colors.CYAN))
|
||
print(color("└─────────────────────────────────────────────────────────┘", Colors.CYAN))
|
||
|
||
# Paths
|
||
print()
|
||
print(color("◆ Paths", Colors.CYAN, Colors.BOLD))
|
||
print(f" Config: {get_config_path()}")
|
||
print(f" Secrets: {get_env_path()}")
|
||
print(f" Install: {get_project_root()}")
|
||
|
||
# API Keys
|
||
print()
|
||
print(color("◆ API Keys", Colors.CYAN, Colors.BOLD))
|
||
|
||
keys = [
|
||
("OPENROUTER_API_KEY", "OpenRouter"),
|
||
("VOICE_TOOLS_OPENAI_KEY", "OpenAI (STT/TTS)"),
|
||
("EXA_API_KEY", "Exa"),
|
||
("PARALLEL_API_KEY", "Parallel"),
|
||
("FIRECRAWL_API_KEY", "Firecrawl"),
|
||
("TAVILY_API_KEY", "Tavily"),
|
||
("BROWSERBASE_API_KEY", "Browserbase"),
|
||
("BROWSER_USE_API_KEY", "Browser Use"),
|
||
("FAL_KEY", "FAL"),
|
||
]
|
||
|
||
for env_key, name in keys:
|
||
value = get_env_value(env_key)
|
||
print(f" {name:<14} {redact_key(value)}")
|
||
from hermes_agent.cli.auth.auth import get_anthropic_key
|
||
anthropic_value = get_anthropic_key()
|
||
print(f" {'Anthropic':<14} {redact_key(anthropic_value)}")
|
||
|
||
# Model settings
|
||
print()
|
||
print(color("◆ Model", Colors.CYAN, Colors.BOLD))
|
||
print(f" Model: {config.get('model', 'not set')}")
|
||
print(f" Max turns: {config.get('agent', {}).get('max_turns', DEFAULT_CONFIG['agent']['max_turns'])}")
|
||
|
||
# Display
|
||
print()
|
||
print(color("◆ Display", Colors.CYAN, Colors.BOLD))
|
||
display = config.get('display', {})
|
||
print(f" Personality: {display.get('personality', 'kawaii')}")
|
||
print(f" Reasoning: {'on' if display.get('show_reasoning', False) else 'off'}")
|
||
print(f" Bell: {'on' if display.get('bell_on_complete', False) else 'off'}")
|
||
ump = display.get('user_message_preview', {}) if isinstance(display.get('user_message_preview', {}), dict) else {}
|
||
ump_first = ump.get('first_lines', 2)
|
||
ump_last = ump.get('last_lines', 2)
|
||
print(f" User preview: first {ump_first} line(s), last {ump_last} line(s)")
|
||
|
||
# Terminal
|
||
print()
|
||
print(color("◆ Terminal", Colors.CYAN, Colors.BOLD))
|
||
terminal = config.get('terminal', {})
|
||
print(f" Backend: {terminal.get('backend', 'local')}")
|
||
print(f" Working dir: {terminal.get('cwd', '.')}")
|
||
print(f" Timeout: {terminal.get('timeout', 60)}s")
|
||
|
||
if terminal.get('backend') == 'docker':
|
||
print(f" Docker image: {terminal.get('docker_image', 'nikolaik/python-nodejs:python3.11-nodejs20')}")
|
||
elif terminal.get('backend') == 'singularity':
|
||
print(f" Image: {terminal.get('singularity_image', 'docker://nikolaik/python-nodejs:python3.11-nodejs20')}")
|
||
elif terminal.get('backend') == 'modal':
|
||
print(f" Modal image: {terminal.get('modal_image', 'nikolaik/python-nodejs:python3.11-nodejs20')}")
|
||
modal_token = get_env_value('MODAL_TOKEN_ID')
|
||
print(f" Modal token: {'configured' if modal_token else '(not set)'}")
|
||
elif terminal.get('backend') == 'daytona':
|
||
print(f" Daytona image: {terminal.get('daytona_image', 'nikolaik/python-nodejs:python3.11-nodejs20')}")
|
||
daytona_key = get_env_value('DAYTONA_API_KEY')
|
||
print(f" API key: {'configured' if daytona_key else '(not set)'}")
|
||
elif terminal.get('backend') == 'ssh':
|
||
ssh_host = get_env_value('TERMINAL_SSH_HOST')
|
||
ssh_user = get_env_value('TERMINAL_SSH_USER')
|
||
print(f" SSH host: {ssh_host or '(not set)'}")
|
||
print(f" SSH user: {ssh_user or '(not set)'}")
|
||
|
||
# Timezone
|
||
print()
|
||
print(color("◆ Timezone", Colors.CYAN, Colors.BOLD))
|
||
tz = config.get('timezone', '')
|
||
if tz:
|
||
print(f" Timezone: {tz}")
|
||
else:
|
||
print(f" Timezone: {color('(server-local)', Colors.DIM)}")
|
||
|
||
# Compression
|
||
print()
|
||
print(color("◆ Context Compression", Colors.CYAN, Colors.BOLD))
|
||
compression = config.get('compression', {})
|
||
enabled = compression.get('enabled', True)
|
||
print(f" Enabled: {'yes' if enabled else 'no'}")
|
||
if enabled:
|
||
print(f" Threshold: {compression.get('threshold', 0.50) * 100:.0f}%")
|
||
print(f" Target ratio: {compression.get('target_ratio', 0.20) * 100:.0f}% of threshold preserved")
|
||
print(f" Protect last: {compression.get('protect_last_n', 20)} messages")
|
||
_aux_comp = config.get('auxiliary', {}).get('compression', {})
|
||
_sm = _aux_comp.get('model', '') or '(auto)'
|
||
print(f" Model: {_sm}")
|
||
comp_provider = _aux_comp.get('provider', 'auto')
|
||
if comp_provider and comp_provider != 'auto':
|
||
print(f" Provider: {comp_provider}")
|
||
|
||
# Auxiliary models
|
||
auxiliary = config.get('auxiliary', {})
|
||
aux_tasks = {
|
||
"Vision": auxiliary.get('vision', {}),
|
||
"Web extract": auxiliary.get('web_extract', {}),
|
||
}
|
||
has_overrides = any(
|
||
t.get('provider', 'auto') != 'auto' or t.get('model', '')
|
||
for t in aux_tasks.values()
|
||
)
|
||
if has_overrides:
|
||
print()
|
||
print(color("◆ Auxiliary Models (overrides)", Colors.CYAN, Colors.BOLD))
|
||
for label, task_cfg in aux_tasks.items():
|
||
prov = task_cfg.get('provider', 'auto')
|
||
mdl = task_cfg.get('model', '')
|
||
if prov != 'auto' or mdl:
|
||
parts = [f"provider={prov}"]
|
||
if mdl:
|
||
parts.append(f"model={mdl}")
|
||
print(f" {label:12s} {', '.join(parts)}")
|
||
|
||
# Messaging
|
||
print()
|
||
print(color("◆ Messaging Platforms", Colors.CYAN, Colors.BOLD))
|
||
|
||
telegram_token = get_env_value('TELEGRAM_BOT_TOKEN')
|
||
discord_token = get_env_value('DISCORD_BOT_TOKEN')
|
||
|
||
print(f" Telegram: {'configured' if telegram_token else color('not configured', Colors.DIM)}")
|
||
print(f" Discord: {'configured' if discord_token else color('not configured', Colors.DIM)}")
|
||
|
||
# Skill config
|
||
try:
|
||
from hermes_agent.agent.skill_utils import discover_all_skill_config_vars, resolve_skill_config_values
|
||
skill_vars = discover_all_skill_config_vars()
|
||
if skill_vars:
|
||
resolved = resolve_skill_config_values(skill_vars)
|
||
print()
|
||
print(color("◆ Skill Settings", Colors.CYAN, Colors.BOLD))
|
||
for var in skill_vars:
|
||
key = var["key"]
|
||
value = resolved.get(key, "")
|
||
skill_name = var.get("skill", "")
|
||
display_val = str(value) if value else color("(not set)", Colors.DIM)
|
||
print(f" {key:<20s} {display_val} {color(f'[{skill_name}]', Colors.DIM)}")
|
||
except Exception:
|
||
pass
|
||
|
||
print()
|
||
print(color("─" * 60, Colors.DIM))
|
||
print(color(" hermes config edit # Edit config file", Colors.DIM))
|
||
print(color(" hermes config set <key> <value>", Colors.DIM))
|
||
print(color(" hermes setup # Run setup wizard", Colors.DIM))
|
||
print()
|
||
|
||
|
||
def edit_config():
|
||
"""Open config file in user's editor."""
|
||
if is_managed():
|
||
managed_error("edit configuration")
|
||
return
|
||
config_path = get_config_path()
|
||
|
||
# Ensure config exists
|
||
if not config_path.exists():
|
||
save_config(dict(DEFAULT_CONFIG))
|
||
print(f"Created {config_path}")
|
||
|
||
# Find editor
|
||
editor = os.getenv('EDITOR') or os.getenv('VISUAL')
|
||
|
||
if not editor:
|
||
# Try common editors
|
||
for cmd in ['nano', 'vim', 'vi', 'code', 'notepad']:
|
||
import shutil
|
||
if shutil.which(cmd):
|
||
editor = cmd
|
||
break
|
||
|
||
if not editor:
|
||
print("No editor found. Config file is at:")
|
||
print(f" {config_path}")
|
||
return
|
||
|
||
print(f"Opening {config_path} in {editor}...")
|
||
subprocess.run([editor, str(config_path)])
|
||
|
||
|
||
def set_config_value(key: str, value: str):
|
||
"""Set a configuration value."""
|
||
if is_managed():
|
||
managed_error("set configuration values")
|
||
return
|
||
# Check if it's an API key (goes to .env)
|
||
api_keys = [
|
||
'OPENROUTER_API_KEY', 'OPENAI_API_KEY', 'ANTHROPIC_API_KEY', 'VOICE_TOOLS_OPENAI_KEY',
|
||
'EXA_API_KEY', 'PARALLEL_API_KEY', 'FIRECRAWL_API_KEY', 'FIRECRAWL_API_URL',
|
||
'FIRECRAWL_GATEWAY_URL', 'TOOL_GATEWAY_DOMAIN', 'TOOL_GATEWAY_SCHEME',
|
||
'TOOL_GATEWAY_USER_TOKEN', 'TAVILY_API_KEY',
|
||
'BROWSERBASE_API_KEY', 'BROWSERBASE_PROJECT_ID', 'BROWSER_USE_API_KEY',
|
||
'FAL_KEY', 'TELEGRAM_BOT_TOKEN', 'DISCORD_BOT_TOKEN',
|
||
'TERMINAL_SSH_HOST', 'TERMINAL_SSH_USER', 'TERMINAL_SSH_KEY',
|
||
'SUDO_PASSWORD', 'SLACK_BOT_TOKEN', 'SLACK_APP_TOKEN',
|
||
'GITHUB_TOKEN', 'HONCHO_API_KEY', 'WANDB_API_KEY',
|
||
'TINKER_API_KEY',
|
||
]
|
||
|
||
if key.upper() in api_keys or key.upper().endswith(('_API_KEY', '_TOKEN')) or key.upper().startswith('TERMINAL_SSH'):
|
||
save_env_value(key.upper(), value)
|
||
print(f"✓ Set {key} in {get_env_path()}")
|
||
return
|
||
|
||
# Otherwise it goes to config.yaml
|
||
# Read the raw user config (not merged with defaults) to avoid
|
||
# dumping all default values back to the file
|
||
config_path = get_config_path()
|
||
user_config = {}
|
||
if config_path.exists():
|
||
try:
|
||
with open(config_path, encoding="utf-8") as f:
|
||
user_config = yaml.safe_load(f) or {}
|
||
except Exception:
|
||
user_config = {}
|
||
|
||
# Handle nested keys (e.g., "tts.provider")
|
||
parts = key.split('.')
|
||
current = user_config
|
||
|
||
for part in parts[:-1]:
|
||
if part not in current or not isinstance(current.get(part), dict):
|
||
current[part] = {}
|
||
current = current[part]
|
||
|
||
# Convert value to appropriate type
|
||
if value.lower() in ('true', 'yes', 'on'):
|
||
value = True
|
||
elif value.lower() in ('false', 'no', 'off'):
|
||
value = False
|
||
elif value.isdigit():
|
||
value = int(value)
|
||
elif value.replace('.', '', 1).isdigit():
|
||
value = float(value)
|
||
|
||
current[parts[-1]] = value
|
||
|
||
# Write only user config back (not the full merged defaults)
|
||
ensure_hermes_home()
|
||
from hermes_agent.utils import atomic_yaml_write
|
||
atomic_yaml_write(config_path, user_config, sort_keys=False)
|
||
|
||
# Keep .env in sync for keys that terminal_tool reads directly from env vars.
|
||
# config.yaml is authoritative, but terminal_tool only reads TERMINAL_ENV etc.
|
||
_config_to_env_sync = {
|
||
"terminal.backend": "TERMINAL_ENV",
|
||
"terminal.modal_mode": "TERMINAL_MODAL_MODE",
|
||
"terminal.docker_image": "TERMINAL_DOCKER_IMAGE",
|
||
"terminal.singularity_image": "TERMINAL_SINGULARITY_IMAGE",
|
||
"terminal.modal_image": "TERMINAL_MODAL_IMAGE",
|
||
"terminal.daytona_image": "TERMINAL_DAYTONA_IMAGE",
|
||
"terminal.docker_mount_cwd_to_workspace": "TERMINAL_DOCKER_MOUNT_CWD_TO_WORKSPACE",
|
||
"terminal.cwd": "TERMINAL_CWD",
|
||
"terminal.timeout": "TERMINAL_TIMEOUT",
|
||
"terminal.sandbox_dir": "TERMINAL_SANDBOX_DIR",
|
||
"terminal.persistent_shell": "TERMINAL_PERSISTENT_SHELL",
|
||
"terminal.container_cpu": "TERMINAL_CONTAINER_CPU",
|
||
"terminal.container_memory": "TERMINAL_CONTAINER_MEMORY",
|
||
"terminal.container_disk": "TERMINAL_CONTAINER_DISK",
|
||
"terminal.container_persistent": "TERMINAL_CONTAINER_PERSISTENT",
|
||
}
|
||
if key in _config_to_env_sync:
|
||
save_env_value(_config_to_env_sync[key], str(value))
|
||
|
||
print(f"✓ Set {key} = {value} in {config_path}")
|
||
|
||
|
||
# =============================================================================
|
||
# Command handler
|
||
# =============================================================================
|
||
|
||
def config_command(args):
|
||
"""Handle config subcommands."""
|
||
subcmd = getattr(args, 'config_command', None)
|
||
|
||
if subcmd is None or subcmd == "show":
|
||
show_config()
|
||
|
||
elif subcmd == "edit":
|
||
edit_config()
|
||
|
||
elif subcmd == "set":
|
||
key = getattr(args, 'key', None)
|
||
value = getattr(args, 'value', None)
|
||
if not key or value is None:
|
||
print("Usage: hermes config set <key> <value>")
|
||
print()
|
||
print("Examples:")
|
||
print(" hermes config set model anthropic/claude-sonnet-4")
|
||
print(" hermes config set terminal.backend docker")
|
||
print(" hermes config set OPENROUTER_API_KEY sk-or-...")
|
||
sys.exit(1)
|
||
set_config_value(key, value)
|
||
|
||
elif subcmd == "path":
|
||
print(get_config_path())
|
||
|
||
elif subcmd == "env-path":
|
||
print(get_env_path())
|
||
|
||
elif subcmd == "migrate":
|
||
print()
|
||
print(color("🔄 Checking configuration for updates...", Colors.CYAN, Colors.BOLD))
|
||
print()
|
||
|
||
# Check what's missing
|
||
missing_env = get_missing_env_vars(required_only=False)
|
||
missing_config = get_missing_config_fields()
|
||
current_ver, latest_ver = check_config_version()
|
||
|
||
if not missing_env and not missing_config and current_ver >= latest_ver:
|
||
print(color("✓ Configuration is up to date!", Colors.GREEN))
|
||
print()
|
||
return
|
||
|
||
# Show what needs to be updated
|
||
if current_ver < latest_ver:
|
||
print(f" Config version: {current_ver} → {latest_ver}")
|
||
|
||
if missing_config:
|
||
print(f"\n {len(missing_config)} new config option(s) will be added with defaults")
|
||
|
||
required_missing = [v for v in missing_env if v.get("is_required")]
|
||
optional_missing = [
|
||
v for v in missing_env
|
||
if not v.get("is_required") and not v.get("advanced")
|
||
]
|
||
|
||
if required_missing:
|
||
print(f"\n ⚠️ {len(required_missing)} required API key(s) missing:")
|
||
for var in required_missing:
|
||
print(f" • {var['name']}")
|
||
|
||
if optional_missing:
|
||
print(f"\n ℹ️ {len(optional_missing)} optional API key(s) not configured:")
|
||
for var in optional_missing:
|
||
tools = var.get("tools", [])
|
||
tools_str = f" (enables: {', '.join(tools[:2])})" if tools else ""
|
||
print(f" • {var['name']}{tools_str}")
|
||
|
||
print()
|
||
|
||
# Run migration
|
||
results = migrate_config(interactive=True, quiet=False)
|
||
|
||
print()
|
||
if results["env_added"] or results["config_added"]:
|
||
print(color("✓ Configuration updated!", Colors.GREEN))
|
||
|
||
if results["warnings"]:
|
||
print()
|
||
for warning in results["warnings"]:
|
||
print(color(f" ⚠️ {warning}", Colors.YELLOW))
|
||
|
||
print()
|
||
|
||
elif subcmd == "check":
|
||
# Non-interactive check for what's missing
|
||
print()
|
||
print(color("📋 Configuration Status", Colors.CYAN, Colors.BOLD))
|
||
print()
|
||
|
||
current_ver, latest_ver = check_config_version()
|
||
if current_ver >= latest_ver:
|
||
print(f" Config version: {current_ver} ✓")
|
||
else:
|
||
print(color(f" Config version: {current_ver} → {latest_ver} (update available)", Colors.YELLOW))
|
||
|
||
print()
|
||
print(color(" Required:", Colors.BOLD))
|
||
for var_name in REQUIRED_ENV_VARS:
|
||
if get_env_value(var_name):
|
||
print(f" ✓ {var_name}")
|
||
else:
|
||
print(color(f" ✗ {var_name} (missing)", Colors.RED))
|
||
|
||
print()
|
||
print(color(" Optional:", Colors.BOLD))
|
||
for var_name, info in OPTIONAL_ENV_VARS.items():
|
||
if get_env_value(var_name):
|
||
print(f" ✓ {var_name}")
|
||
else:
|
||
tools = info.get("tools", [])
|
||
tools_str = f" → {', '.join(tools[:2])}" if tools else ""
|
||
print(color(f" ○ {var_name}{tools_str}", Colors.DIM))
|
||
|
||
missing_config = get_missing_config_fields()
|
||
if missing_config:
|
||
print()
|
||
print(color(f" {len(missing_config)} new config option(s) available", Colors.YELLOW))
|
||
print(" Run 'hermes config migrate' to add them")
|
||
|
||
print()
|
||
|
||
else:
|
||
print(f"Unknown config command: {subcmd}")
|
||
print()
|
||
print("Available commands:")
|
||
print(" hermes config Show current configuration")
|
||
print(" hermes config edit Open config in editor")
|
||
print(" hermes config set <key> <value> Set a config value")
|
||
print(" hermes config check Check for missing/outdated config")
|
||
print(" hermes config migrate Update config with new options")
|
||
print(" hermes config path Show config file path")
|
||
print(" hermes config env-path Show .env file path")
|
||
sys.exit(1)
|