Merge origin/main and resolve conflict in nix/tui.nix

Co-authored-by: austinpickett <260188+austinpickett@users.noreply.github.com>
This commit is contained in:
copilot-swe-agent[bot] 2026-05-07 22:56:19 +00:00 committed by GitHub
commit 901eccc88e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
587 changed files with 72570 additions and 4537 deletions

View file

@ -14,8 +14,8 @@ Provides subcommands for:
import os
import sys
__version__ = "0.12.0"
__release_date__ = "2026.4.30"
__version__ = "0.13.0"
__release_date__ = "2026.5.7"
def _ensure_utf8():

View file

@ -70,6 +70,9 @@ Examples:
hermes logs --since 1h Lines from the last hour
hermes debug share Upload debug report for support
hermes update Update to latest version
hermes dashboard Start web UI dashboard (port 9119)
hermes dashboard --stop Stop running dashboard processes
hermes dashboard --status List running dashboard processes
For more help on a command:
hermes <command> --help

View file

@ -416,6 +416,40 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
),
}
# Auto-extend PROVIDER_REGISTRY with any api-key provider registered in
# providers/ that is not already declared above. New providers only need a
# plugins/model-providers/<name>/ plugin — no edits to this file required.
try:
from providers import list_providers as _list_providers_for_registry
for _pp in _list_providers_for_registry():
if _pp.name in PROVIDER_REGISTRY:
continue
if _pp.auth_type != "api_key" or not _pp.env_vars:
continue
# Skip providers that need custom token resolution or are special-cased
# in resolve_provider() (copilot/kimi/zai have bespoke token refresh;
# openrouter/custom are aggregator/user-supplied and handled outside
# the registry — adding them here breaks runtime_provider resolution
# that relies on `openrouter not in PROVIDER_REGISTRY`).
if _pp.name in {"copilot", "kimi-coding", "kimi-coding-cn", "zai", "openrouter", "custom"}:
continue
_api_key_vars = tuple(v for v in _pp.env_vars if not v.endswith("_BASE_URL") and not v.endswith("_URL"))
_base_url_var = next((v for v in _pp.env_vars if v.endswith("_BASE_URL") or v.endswith("_URL")), None)
PROVIDER_REGISTRY[_pp.name] = ProviderConfig(
id=_pp.name,
name=_pp.display_name or _pp.name,
auth_type="api_key",
inference_base_url=_pp.base_url,
api_key_env_vars=_api_key_vars or _pp.env_vars,
base_url_env_var=_base_url_var or "",
)
# Also register aliases so resolve_provider() resolves them
for _alias in _pp.aliases:
if _alias not in PROVIDER_REGISTRY:
PROVIDER_REGISTRY[_alias] = PROVIDER_REGISTRY[_pp.name]
except Exception:
pass
# =============================================================================
# Anthropic Key Helper
@ -746,42 +780,121 @@ def _auth_file_path() -> Path:
return path
def _global_auth_file_path() -> Optional[Path]:
"""Return the global-root auth.json when the process is in profile mode.
Returns ``None`` when the profile and global root resolve to the same
directory (classic mode, or custom HERMES_HOME that is not a profile).
Used by read-only fallback paths so providers authed at the root are
visible to profile processes that haven't configured them locally.
See issue #18594 follow-up (credential_pool shadowing).
"""
try:
from hermes_constants import get_default_hermes_root
global_root = get_default_hermes_root()
except Exception:
return None
profile_home = get_hermes_home()
try:
if profile_home.resolve(strict=False) == global_root.resolve(strict=False):
return None
except Exception:
if profile_home == global_root:
return None
# No pytest seat belt here: this is a pure read-only path, and
# ``_load_global_auth_store()`` wraps the read in a try/except so an
# unreadable global file can never break the profile process. The
# write-side seat belt still lives on ``_auth_file_path()`` where it
# belongs (that's what protects the real user's auth store from being
# corrupted by a mis-configured test).
return global_root / "auth.json"
def _load_global_auth_store() -> Dict[str, Any]:
"""Load the global-root auth store (read-only fallback).
Returns an empty dict when no global fallback exists (classic mode,
or the global auth.json is absent). Never raises on missing file.
Seat belt: under pytest, refuses to read the real user's
``~/.hermes/auth.json`` even when HERMES_HOME is set to a profile
path. The hermetic conftest does not redirect ``HOME``, so
``get_default_hermes_root()`` for a profile-shaped HERMES_HOME can
still resolve to the real user's home on a dev machine. That would
leak real credentials into tests. This guard uses the unmodified
``HOME`` env var (what ``os.path.expanduser('~')`` would resolve to),
not ``Path.home()``, because ``Path.home`` is sometimes monkeypatched
by fixtures that want to relocate the global root to a tmp path.
"""
global_path = _global_auth_file_path()
if global_path is None or not global_path.exists():
return {}
if os.environ.get("PYTEST_CURRENT_TEST"):
real_home_env = os.environ.get("HOME", "")
if real_home_env:
real_root = Path(real_home_env) / ".hermes" / "auth.json"
try:
if global_path.resolve(strict=False) == real_root.resolve(strict=False):
return {}
except Exception:
pass
try:
return _load_auth_store(global_path)
except Exception:
# A malformed global store must not break profile reads. The
# profile's own auth store is still authoritative.
return {}
def _auth_lock_path() -> Path:
return _auth_file_path().with_suffix(".lock")
_auth_lock_holder = threading.local()
@contextmanager
def _auth_store_lock(timeout_seconds: float = AUTH_LOCK_TIMEOUT_SECONDS):
"""Cross-process advisory lock for auth.json reads+writes. Reentrant."""
# Reentrant: if this thread already holds the lock, just yield.
if getattr(_auth_lock_holder, "depth", 0) > 0:
_auth_lock_holder.depth += 1
def _file_lock(
lock_path: Path,
holder: threading.local,
timeout_seconds: float,
timeout_message: str,
):
"""Cross-process advisory flock helper.
Reentrant per-thread via ``holder.depth``. Falls back to a depth-only
guard when neither ``fcntl`` nor ``msvcrt`` is available (rare).
Callers supply their own ``threading.local`` so independent locks
(e.g. profile auth.json vs shared Nous store) don't share reentrancy
state that would let one lock's reentrant acquisition silently skip
the other's kernel-level flock.
"""
if getattr(holder, "depth", 0) > 0:
holder.depth += 1
try:
yield
finally:
_auth_lock_holder.depth -= 1
holder.depth -= 1
return
lock_path = _auth_lock_path()
lock_path.parent.mkdir(parents=True, exist_ok=True)
if fcntl is None and msvcrt is None:
_auth_lock_holder.depth = 1
holder.depth = 1
try:
yield
finally:
_auth_lock_holder.depth = 0
holder.depth = 0
return
# On Windows, msvcrt.locking needs the file to have content and the
# file pointer at position 0. Ensure the lock file has at least 1 byte.
# file pointer at position 0. Ensure the lock file has at least 1 byte.
if msvcrt and (not lock_path.exists() or lock_path.stat().st_size == 0):
lock_path.write_text(" ", encoding="utf-8")
with lock_path.open("r+" if msvcrt else "a+") as lock_file:
deadline = time.time() + max(1.0, timeout_seconds)
deadline = time.monotonic() + max(1.0, timeout_seconds)
while True:
try:
if fcntl:
@ -791,15 +904,15 @@ def _auth_store_lock(timeout_seconds: float = AUTH_LOCK_TIMEOUT_SECONDS):
msvcrt.locking(lock_file.fileno(), msvcrt.LK_NBLCK, 1)
break
except (BlockingIOError, OSError, PermissionError):
if time.time() >= deadline:
raise TimeoutError("Timed out waiting for auth store lock")
if time.monotonic() >= deadline:
raise TimeoutError(timeout_message)
time.sleep(0.05)
_auth_lock_holder.depth = 1
holder.depth = 1
try:
yield
finally:
_auth_lock_holder.depth = 0
holder.depth = 0
if fcntl:
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
elif msvcrt:
@ -810,6 +923,25 @@ def _auth_store_lock(timeout_seconds: float = AUTH_LOCK_TIMEOUT_SECONDS):
pass
@contextmanager
def _auth_store_lock(timeout_seconds: float = AUTH_LOCK_TIMEOUT_SECONDS):
"""Cross-process advisory lock for auth.json reads+writes. Reentrant.
Lock ordering invariant: when this lock is held together with
``_nous_shared_store_lock``, acquire ``_auth_store_lock`` FIRST
(outer) and the shared Nous lock SECOND (inner). All runtime
refresh paths follow this order; violating it risks deadlock
against a concurrent import on the shared store.
"""
with _file_lock(
_auth_lock_path(),
_auth_lock_holder,
timeout_seconds,
"Timed out waiting for auth store lock",
):
yield
def _load_auth_store(auth_file: Optional[Path] = None) -> Dict[str, Any]:
auth_file = auth_file or _auth_file_path()
if not auth_file.exists():
@ -853,12 +985,27 @@ def _load_auth_store(auth_file: Optional[Path] = None) -> Dict[str, Any]:
def _save_auth_store(auth_store: Dict[str, Any]) -> Path:
auth_file = _auth_file_path()
auth_file.parent.mkdir(parents=True, exist_ok=True)
# Tighten parent dir to 0o700 so siblings can't traverse to creds.
# No-op on Windows (POSIX mode bits not enforced); ignore failures.
try:
os.chmod(auth_file.parent, 0o700)
except OSError:
pass
auth_store["version"] = AUTH_STORE_VERSION
auth_store["updated_at"] = datetime.now(timezone.utc).isoformat()
payload = json.dumps(auth_store, indent=2) + "\n"
tmp_path = auth_file.with_name(f"{auth_file.name}.tmp.{os.getpid()}.{uuid.uuid4().hex}")
try:
with tmp_path.open("w", encoding="utf-8") as handle:
# Create with 0o600 atomically via os.open(O_EXCL) + fdopen to close
# the TOCTOU window where default umask (often 0o644) briefly exposed
# OAuth tokens to other local users between open() and chmod().
# Mirrors agent/google_oauth.py (#19673) and tools/mcp_oauth.py (#21148).
fd = os.open(
str(tmp_path),
os.O_WRONLY | os.O_CREAT | os.O_EXCL,
stat.S_IRUSR | stat.S_IWUSR,
)
with os.fdopen(fd, "w", encoding="utf-8") as handle:
handle.write(payload)
handle.flush()
os.fsync(handle.fileno())
@ -932,15 +1079,50 @@ def get_auth_provider_display_name(provider_id: str) -> str:
def read_credential_pool(provider_id: Optional[str] = None) -> Dict[str, Any]:
"""Return the persisted credential pool, or one provider slice."""
"""Return the persisted credential pool, or one provider slice.
In profile mode, the profile's credential pool is authoritative. If a
provider has no entries in the profile, entries from the global-root
``auth.json`` are used as a read-only fallback so workers spawned in a
profile can see providers that were only authenticated at global scope.
Profile entries always win: the global fallback only applies per-provider
when the profile has zero entries for that provider. Once the user runs
``hermes auth add <provider>`` inside the profile, profile entries
fully shadow global for that provider on the next read.
Writes always go to the profile (``write_credential_pool`` is unchanged).
See issue #18594 follow-up.
"""
auth_store = _load_auth_store()
pool = auth_store.get("credential_pool")
if not isinstance(pool, dict):
pool = {}
global_pool: Dict[str, Any] = {}
global_store = _load_global_auth_store()
maybe_global_pool = global_store.get("credential_pool") if global_store else None
if isinstance(maybe_global_pool, dict):
global_pool = maybe_global_pool
if provider_id is None:
return dict(pool)
merged = dict(pool)
for gp_key, gp_entries in global_pool.items():
if not isinstance(gp_entries, list) or not gp_entries:
continue
# Per-provider shadowing: profile wins whenever it has ANY entries.
existing = merged.get(gp_key)
if isinstance(existing, list) and existing:
continue
merged[gp_key] = list(gp_entries)
return merged
provider_entries = pool.get(provider_id)
return list(provider_entries) if isinstance(provider_entries, list) else []
if isinstance(provider_entries, list) and provider_entries:
return list(provider_entries)
# Profile has no entries for this provider — fall back to global.
global_entries = global_pool.get(provider_id)
return list(global_entries) if isinstance(global_entries, list) else []
def write_credential_pool(provider_id: str, entries: List[Dict[str, Any]]) -> Path:
@ -999,9 +1181,25 @@ def unsuppress_credential_source(provider_id: str, source: str) -> bool:
def get_provider_auth_state(provider_id: str) -> Optional[Dict[str, Any]]:
"""Return persisted auth state for a provider, or None."""
"""Return persisted auth state for a provider, or None.
In profile mode, falls back to the global-root ``auth.json`` when the
profile has no state for this provider. Profile state always wins when
present. Writes (``_save_auth_store`` / ``persist_*_credentials``) are
unchanged they still target the profile only. This mirrors
``read_credential_pool``'s per-provider shadowing semantics so that
``_seed_from_singletons`` can reseed a profile's credential pool from
global-scope provider state (e.g. a globally-authenticated Anthropic
OAuth or Nous device-code session). See issue #18594 follow-up.
"""
auth_store = _load_auth_store()
return _load_provider_state(auth_store, provider_id)
state = _load_provider_state(auth_store, provider_id)
if state is not None:
return state
global_store = _load_global_auth_store()
if not global_store:
return None
return _load_provider_state(global_store, provider_id)
def get_active_provider() -> Optional[str]:
@ -1195,6 +1393,17 @@ def resolve_provider(
"vllm": "custom", "llamacpp": "custom",
"llama.cpp": "custom", "llama-cpp": "custom",
}
# Extend with aliases declared in plugins/model-providers/<name>/ that aren't already mapped.
# This keeps providers/ as the single source for new aliases while the
# hardcoded dict above remains authoritative for existing ones.
try:
from providers import list_providers as _lp
for _pp in _lp():
for _alias in _pp.aliases:
if _alias not in _PROVIDER_ALIASES:
_PROVIDER_ALIASES[_alias] = _pp.name
except Exception:
pass
normalized = _PROVIDER_ALIASES.get(normalized, normalized)
if normalized == "openrouter":
@ -1360,10 +1569,33 @@ def _read_qwen_cli_tokens() -> Dict[str, Any]:
def _save_qwen_cli_tokens(tokens: Dict[str, Any]) -> Path:
auth_path = _qwen_cli_auth_path()
auth_path.parent.mkdir(parents=True, exist_ok=True)
tmp_path = auth_path.with_suffix(".tmp")
tmp_path.write_text(json.dumps(tokens, indent=2, sort_keys=True) + "\n", encoding="utf-8")
os.chmod(tmp_path, stat.S_IRUSR | stat.S_IWUSR)
tmp_path.replace(auth_path)
try:
os.chmod(auth_path.parent, 0o700)
except OSError:
pass
# Per-process random temp suffix avoids collisions between concurrent
# writers and stale leftovers from a crashed prior write.
tmp_path = auth_path.with_name(f"{auth_path.name}.tmp.{os.getpid()}.{uuid.uuid4().hex}")
# Create with 0o600 atomically via os.open(O_EXCL) — closes the TOCTOU
# window where write_text() + post-write chmod briefly exposed tokens
# at process umask (typically 0o644). See #19673, #21148.
fd = os.open(
str(tmp_path),
os.O_WRONLY | os.O_CREAT | os.O_EXCL,
stat.S_IRUSR | stat.S_IWUSR,
)
try:
with os.fdopen(fd, "w", encoding="utf-8") as fh:
fh.write(json.dumps(tokens, indent=2, sort_keys=True) + "\n")
fh.flush()
os.fsync(fh.fileno())
atomic_replace(tmp_path, auth_path)
finally:
try:
if tmp_path.exists():
tmp_path.unlink()
except OSError:
pass
return auth_path
@ -1780,9 +2012,9 @@ def _spotify_wait_for_callback(
thread = threading.Thread(target=server.serve_forever, kwargs={"poll_interval": 0.1}, daemon=True)
thread.start()
deadline = time.time() + max(5.0, timeout_seconds)
deadline = time.monotonic() + max(5.0, timeout_seconds)
try:
while time.time() < deadline:
while time.monotonic() < deadline:
if result["code"] or result["error"]:
return result
time.sleep(0.1)
@ -2545,10 +2777,10 @@ def _poll_for_token(
poll_interval: int,
) -> Dict[str, Any]:
"""Poll the token endpoint until the user approves or the code expires."""
deadline = time.time() + max(1, expires_in)
deadline = time.monotonic() + max(1, expires_in)
current_interval = max(1, min(poll_interval, DEVICE_AUTH_POLL_INTERVAL_CAP_SECONDS))
while time.time() < deadline:
while time.monotonic() < deadline:
response = client.post(
f"{portal_base_url}/api/oauth/token",
data={
@ -2606,6 +2838,7 @@ def _poll_for_token(
# -----------------------------------------------------------------------------
NOUS_SHARED_STORE_FILENAME = "nous_auth.json"
_nous_shared_lock_holder = threading.local()
def _nous_shared_auth_dir() -> Path:
@ -2645,6 +2878,69 @@ def _nous_shared_store_path() -> Path:
return path
@contextmanager
def _nous_shared_store_lock(timeout_seconds: float = AUTH_LOCK_TIMEOUT_SECONDS):
"""Cross-profile lock for the shared Nous OAuth store.
Lock ordering invariant: if both this and ``_auth_store_lock`` need
to be held, acquire ``_auth_store_lock`` FIRST. All runtime refresh
paths follow this order. The one exception is
``_try_import_shared_nous_state``, which holds this lock alone for
the entire refresh+mint cycle so concurrent imports on sibling
profiles can't race on the single-use shared refresh token; that
helper must NOT be called with ``_auth_store_lock`` already held.
"""
try:
lock_path = _nous_shared_store_path().with_suffix(".lock")
except RuntimeError:
# No HERMES_HOME yet (pre-setup): fall through without locking.
yield
return
with _file_lock(
lock_path,
_nous_shared_lock_holder,
timeout_seconds,
"Timed out waiting for shared Nous auth lock",
):
yield
def _merge_shared_nous_oauth_state(state: Dict[str, Any]) -> bool:
"""Copy fresher shared OAuth tokens into a profile-local Nous state."""
shared = _read_shared_nous_state()
if not shared:
return False
shared_refresh = shared.get("refresh_token")
if not isinstance(shared_refresh, str) or not shared_refresh.strip():
return False
local_refresh = state.get("refresh_token")
shared_access_exp = _parse_iso_timestamp(shared.get("expires_at")) or 0.0
local_access_exp = _parse_iso_timestamp(state.get("expires_at")) or 0.0
refresh_changed = shared_refresh.strip() != str(local_refresh or "").strip()
fresher_access = shared_access_exp > local_access_exp
if not refresh_changed and not fresher_access:
return False
for key in (
"access_token",
"refresh_token",
"token_type",
"scope",
"client_id",
"portal_base_url",
"inference_base_url",
"obtained_at",
"expires_at",
):
value = shared.get(key)
if value not in (None, ""):
state[key] = value
return True
def _write_shared_nous_state(state: Dict[str, Any]) -> None:
"""Persist a minimal copy of the Nous OAuth state to the shared store.
@ -2677,15 +2973,34 @@ def _write_shared_nous_state(state: Dict[str, Any]) -> None:
"updated_at": datetime.now(timezone.utc).isoformat(),
}
try:
path = _nous_shared_store_path()
path.parent.mkdir(parents=True, exist_ok=True)
tmp = path.with_suffix(path.suffix + ".tmp")
tmp.write_text(json.dumps(shared, indent=2, sort_keys=True))
try:
os.chmod(tmp, 0o600)
except OSError:
pass
os.replace(tmp, path)
with _nous_shared_store_lock():
path = _nous_shared_store_path()
path.parent.mkdir(parents=True, exist_ok=True)
try:
os.chmod(path.parent, 0o700)
except OSError:
pass
tmp = path.with_name(f"{path.name}.tmp.{os.getpid()}.{uuid.uuid4().hex}")
# Create with 0o600 atomically via os.open(O_EXCL) — closes the TOCTOU
# window where write_text() + post-write chmod briefly exposed Nous
# refresh_token at process umask. See #19673, #21148.
fd = os.open(
str(tmp),
os.O_WRONLY | os.O_CREAT | os.O_EXCL,
stat.S_IRUSR | stat.S_IWUSR,
)
try:
with os.fdopen(fd, "w", encoding="utf-8") as fh:
fh.write(json.dumps(shared, indent=2, sort_keys=True))
fh.flush()
os.fsync(fh.fileno())
os.replace(tmp, path)
finally:
try:
if tmp.exists():
tmp.unlink()
except OSError:
pass
_oauth_trace(
"nous_shared_store_written",
path=str(path),
@ -2742,36 +3057,38 @@ def _try_import_shared_nous_state(
etc.) caller should then fall through to the normal device-code
flow.
"""
shared = _read_shared_nous_state()
if not shared:
return None
# Build a full state dict so refresh_nous_oauth_from_state has every
# field it needs. force_refresh=True gets us a fresh access_token
# for this profile; force_mint=True gets us a fresh agent_key.
state: Dict[str, Any] = {
"access_token": shared.get("access_token"),
"refresh_token": shared.get("refresh_token"),
"client_id": shared.get("client_id") or DEFAULT_NOUS_CLIENT_ID,
"portal_base_url": shared.get("portal_base_url") or DEFAULT_NOUS_PORTAL_URL,
"inference_base_url": shared.get("inference_base_url") or DEFAULT_NOUS_INFERENCE_URL,
"token_type": shared.get("token_type") or "Bearer",
"scope": shared.get("scope") or DEFAULT_NOUS_SCOPE,
"obtained_at": shared.get("obtained_at"),
"expires_at": shared.get("expires_at"),
"agent_key": None,
"agent_key_expires_at": None,
"tls": {"insecure": False, "ca_bundle": None},
}
try:
refreshed = refresh_nous_oauth_from_state(
state,
min_key_ttl_seconds=min_key_ttl_seconds,
timeout_seconds=timeout_seconds,
force_refresh=True,
force_mint=True,
)
with _nous_shared_store_lock(timeout_seconds=max(timeout_seconds + 5.0, AUTH_LOCK_TIMEOUT_SECONDS)):
shared = _read_shared_nous_state()
if not shared:
return None
# Build a full state dict so refresh_nous_oauth_from_state has every
# field it needs. force_refresh=True gets us a fresh access_token
# for this profile; force_mint=True gets us a fresh agent_key.
state: Dict[str, Any] = {
"access_token": shared.get("access_token"),
"refresh_token": shared.get("refresh_token"),
"client_id": shared.get("client_id") or DEFAULT_NOUS_CLIENT_ID,
"portal_base_url": shared.get("portal_base_url") or DEFAULT_NOUS_PORTAL_URL,
"inference_base_url": shared.get("inference_base_url") or DEFAULT_NOUS_INFERENCE_URL,
"token_type": shared.get("token_type") or "Bearer",
"scope": shared.get("scope") or DEFAULT_NOUS_SCOPE,
"obtained_at": shared.get("obtained_at"),
"expires_at": shared.get("expires_at"),
"agent_key": None,
"agent_key_expires_at": None,
"tls": {"insecure": False, "ca_bundle": None},
}
refreshed = refresh_nous_oauth_from_state(
state,
min_key_ttl_seconds=min_key_ttl_seconds,
timeout_seconds=timeout_seconds,
force_refresh=True,
force_mint=True,
)
_write_shared_nous_state(refreshed)
except AuthError as exc:
_oauth_trace(
"nous_shared_import_failed",
@ -2973,59 +3290,65 @@ def resolve_nous_access_token(
client_id = str(state.get("client_id") or DEFAULT_NOUS_CLIENT_ID)
verify = _resolve_verify(insecure=insecure, ca_bundle=ca_bundle, auth_state=state)
access_token = state.get("access_token")
refresh_token = state.get("refresh_token")
if not isinstance(access_token, str) or not access_token:
raise AuthError(
"No access token found for Nous Portal login.",
provider="nous",
relogin_required=True,
)
with _nous_shared_store_lock(timeout_seconds=max(timeout_seconds + 5.0, AUTH_LOCK_TIMEOUT_SECONDS)):
merged_shared = _merge_shared_nous_oauth_state(state)
access_token = state.get("access_token")
refresh_token = state.get("refresh_token")
if not isinstance(access_token, str) or not access_token:
raise AuthError(
"No access token found for Nous Portal login.",
provider="nous",
relogin_required=True,
)
if not _is_expiring(state.get("expires_at"), refresh_skew_seconds):
return access_token
if not _is_expiring(state.get("expires_at"), refresh_skew_seconds):
if merged_shared:
_save_provider_state(auth_store, "nous", state)
_save_auth_store(auth_store)
return access_token
if not isinstance(refresh_token, str) or not refresh_token:
raise AuthError(
"Session expired and no refresh token is available.",
provider="nous",
relogin_required=True,
)
if not isinstance(refresh_token, str) or not refresh_token:
raise AuthError(
"Session expired and no refresh token is available.",
provider="nous",
relogin_required=True,
)
timeout = httpx.Timeout(timeout_seconds if timeout_seconds else 15.0)
with httpx.Client(
timeout=timeout,
headers={"Accept": "application/json"},
verify=verify,
) as client:
refreshed = _refresh_access_token(
client=client,
portal_base_url=portal_base_url,
client_id=client_id,
refresh_token=refresh_token,
)
timeout = httpx.Timeout(timeout_seconds if timeout_seconds else 15.0)
with httpx.Client(
timeout=timeout,
headers={"Accept": "application/json"},
verify=verify,
) as client:
refreshed = _refresh_access_token(
client=client,
portal_base_url=portal_base_url,
client_id=client_id,
refresh_token=refresh_token,
)
now = datetime.now(timezone.utc)
access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in"))
state["access_token"] = refreshed["access_token"]
state["refresh_token"] = refreshed.get("refresh_token") or refresh_token
state["token_type"] = refreshed.get("token_type") or state.get("token_type") or "Bearer"
state["scope"] = refreshed.get("scope") or state.get("scope")
state["obtained_at"] = now.isoformat()
state["expires_in"] = access_ttl
state["expires_at"] = datetime.fromtimestamp(
now.timestamp() + access_ttl,
tz=timezone.utc,
).isoformat()
state["portal_base_url"] = portal_base_url
state["client_id"] = client_id
state["tls"] = {
"insecure": verify is False,
"ca_bundle": verify if isinstance(verify, str) else None,
}
_save_provider_state(auth_store, "nous", state)
_save_auth_store(auth_store)
return state["access_token"]
now = datetime.now(timezone.utc)
access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in"))
state["access_token"] = refreshed["access_token"]
state["refresh_token"] = refreshed.get("refresh_token") or refresh_token
state["token_type"] = refreshed.get("token_type") or state.get("token_type") or "Bearer"
state["scope"] = refreshed.get("scope") or state.get("scope")
state["obtained_at"] = now.isoformat()
state["expires_in"] = access_ttl
state["expires_at"] = datetime.fromtimestamp(
now.timestamp() + access_ttl,
tz=timezone.utc,
).isoformat()
state["portal_base_url"] = portal_base_url
state["client_id"] = client_id
state["tls"] = {
"insecure": verify is False,
"ca_bundle": verify if isinstance(verify, str) else None,
}
_save_provider_state(auth_store, "nous", state)
_save_auth_store(auth_store)
_write_shared_nous_state(state)
return state["access_token"]
def refresh_nous_oauth_pure(
@ -3293,46 +3616,53 @@ def resolve_nous_runtime_credentials(
# Step 1: refresh access token if expiring
if _is_expiring(state.get("expires_at"), ACCESS_TOKEN_REFRESH_SKEW_SECONDS):
if not isinstance(refresh_token, str) or not refresh_token:
raise AuthError("Session expired and no refresh token is available.",
provider="nous", relogin_required=True)
with _nous_shared_store_lock(timeout_seconds=max(timeout_seconds + 5.0, AUTH_LOCK_TIMEOUT_SECONDS)):
if _merge_shared_nous_oauth_state(state):
access_token = state.get("access_token")
refresh_token = state.get("refresh_token")
_persist_state("post_shared_merge_access_expiring")
_oauth_trace(
"refresh_start",
sequence_id=sequence_id,
reason="access_expiring",
refresh_token_fp=_token_fingerprint(refresh_token),
)
refreshed = _refresh_access_token(
client=client, portal_base_url=portal_base_url,
client_id=client_id, refresh_token=refresh_token,
)
now = datetime.now(timezone.utc)
access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in"))
previous_refresh_token = refresh_token
state["access_token"] = refreshed["access_token"]
state["refresh_token"] = refreshed.get("refresh_token") or refresh_token
state["token_type"] = refreshed.get("token_type") or state.get("token_type") or "Bearer"
state["scope"] = refreshed.get("scope") or state.get("scope")
refreshed_url = _optional_base_url(refreshed.get("inference_base_url"))
if refreshed_url:
inference_base_url = refreshed_url
state["obtained_at"] = now.isoformat()
state["expires_in"] = access_ttl
state["expires_at"] = datetime.fromtimestamp(
now.timestamp() + access_ttl, tz=timezone.utc
).isoformat()
access_token = state["access_token"]
refresh_token = state["refresh_token"]
_oauth_trace(
"refresh_success",
sequence_id=sequence_id,
reason="access_expiring",
previous_refresh_token_fp=_token_fingerprint(previous_refresh_token),
new_refresh_token_fp=_token_fingerprint(refresh_token),
)
# Persist immediately so downstream mint failures cannot drop rotated refresh tokens.
_persist_state("post_refresh_access_expiring")
if _is_expiring(state.get("expires_at"), ACCESS_TOKEN_REFRESH_SKEW_SECONDS):
if not isinstance(refresh_token, str) or not refresh_token:
raise AuthError("Session expired and no refresh token is available.",
provider="nous", relogin_required=True)
_oauth_trace(
"refresh_start",
sequence_id=sequence_id,
reason="access_expiring",
refresh_token_fp=_token_fingerprint(refresh_token),
)
refreshed = _refresh_access_token(
client=client, portal_base_url=portal_base_url,
client_id=client_id, refresh_token=refresh_token,
)
now = datetime.now(timezone.utc)
access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in"))
previous_refresh_token = refresh_token
state["access_token"] = refreshed["access_token"]
state["refresh_token"] = refreshed.get("refresh_token") or refresh_token
state["token_type"] = refreshed.get("token_type") or state.get("token_type") or "Bearer"
state["scope"] = refreshed.get("scope") or state.get("scope")
refreshed_url = _optional_base_url(refreshed.get("inference_base_url"))
if refreshed_url:
inference_base_url = refreshed_url
state["obtained_at"] = now.isoformat()
state["expires_in"] = access_ttl
state["expires_at"] = datetime.fromtimestamp(
now.timestamp() + access_ttl, tz=timezone.utc
).isoformat()
access_token = state["access_token"]
refresh_token = state["refresh_token"]
_oauth_trace(
"refresh_success",
sequence_id=sequence_id,
reason="access_expiring",
previous_refresh_token_fp=_token_fingerprint(previous_refresh_token),
new_refresh_token_fp=_token_fingerprint(refresh_token),
)
# Persist immediately so downstream mint failures cannot drop rotated refresh tokens.
_persist_state("post_refresh_access_expiring")
# Step 2: mint agent key if missing/expiring
used_cached_key = False
@ -3365,41 +3695,47 @@ def resolve_nous_runtime_credentials(
and isinstance(latest_refresh_token, str)
and latest_refresh_token
):
_oauth_trace(
"refresh_start",
sequence_id=sequence_id,
reason="mint_retry_after_invalid_token",
refresh_token_fp=_token_fingerprint(latest_refresh_token),
)
refreshed = _refresh_access_token(
client=client, portal_base_url=portal_base_url,
client_id=client_id, refresh_token=latest_refresh_token,
)
now = datetime.now(timezone.utc)
access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in"))
state["access_token"] = refreshed["access_token"]
state["refresh_token"] = refreshed.get("refresh_token") or latest_refresh_token
state["token_type"] = refreshed.get("token_type") or state.get("token_type") or "Bearer"
state["scope"] = refreshed.get("scope") or state.get("scope")
refreshed_url = _optional_base_url(refreshed.get("inference_base_url"))
if refreshed_url:
inference_base_url = refreshed_url
state["obtained_at"] = now.isoformat()
state["expires_in"] = access_ttl
state["expires_at"] = datetime.fromtimestamp(
now.timestamp() + access_ttl, tz=timezone.utc
).isoformat()
access_token = state["access_token"]
refresh_token = state["refresh_token"]
_oauth_trace(
"refresh_success",
sequence_id=sequence_id,
reason="mint_retry_after_invalid_token",
previous_refresh_token_fp=_token_fingerprint(latest_refresh_token),
new_refresh_token_fp=_token_fingerprint(refresh_token),
)
# Persist retry refresh immediately for crash safety and cross-process visibility.
_persist_state("post_refresh_mint_retry")
with _nous_shared_store_lock(timeout_seconds=max(timeout_seconds + 5.0, AUTH_LOCK_TIMEOUT_SECONDS)):
if _merge_shared_nous_oauth_state(state):
access_token = state.get("access_token")
latest_refresh_token = state.get("refresh_token")
_persist_state("post_shared_merge_mint_retry")
else:
_oauth_trace(
"refresh_start",
sequence_id=sequence_id,
reason="mint_retry_after_invalid_token",
refresh_token_fp=_token_fingerprint(latest_refresh_token),
)
refreshed = _refresh_access_token(
client=client, portal_base_url=portal_base_url,
client_id=client_id, refresh_token=latest_refresh_token,
)
now = datetime.now(timezone.utc)
access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in"))
state["access_token"] = refreshed["access_token"]
state["refresh_token"] = refreshed.get("refresh_token") or latest_refresh_token
state["token_type"] = refreshed.get("token_type") or state.get("token_type") or "Bearer"
state["scope"] = refreshed.get("scope") or state.get("scope")
refreshed_url = _optional_base_url(refreshed.get("inference_base_url"))
if refreshed_url:
inference_base_url = refreshed_url
state["obtained_at"] = now.isoformat()
state["expires_in"] = access_ttl
state["expires_at"] = datetime.fromtimestamp(
now.timestamp() + access_ttl, tz=timezone.utc
).isoformat()
access_token = state["access_token"]
refresh_token = state["refresh_token"]
_oauth_trace(
"refresh_success",
sequence_id=sequence_id,
reason="mint_retry_after_invalid_token",
previous_refresh_token_fp=_token_fingerprint(latest_refresh_token),
new_refresh_token_fp=_token_fingerprint(refresh_token),
)
# Persist retry refresh immediately for crash safety and cross-process visibility.
_persist_state("post_refresh_mint_retry")
mint_payload = _mint_agent_key(
client=client, portal_base_url=portal_base_url,
@ -3895,6 +4231,14 @@ def _config_provider_matches(provider_id: Optional[str]) -> bool:
return _get_config_provider() == provider_id.strip().lower()
def _should_reset_config_provider_on_logout(provider_id: Optional[str]) -> bool:
"""Return True when logout should reset the model provider config."""
if not provider_id:
return False
normalized = provider_id.strip().lower()
return normalized in PROVIDER_REGISTRY and _config_provider_matches(normalized)
def _logout_default_provider_from_config() -> Optional[str]:
"""Fallback logout target when auth.json has no active provider.
@ -4980,15 +5324,18 @@ def logout_command(args) -> None:
print("No provider is currently logged in.")
return
config_matches = _config_provider_matches(target)
should_reset_config = _should_reset_config_provider_on_logout(target)
provider_name = get_auth_provider_display_name(target)
if clear_provider_auth(target) or config_matches:
_reset_config_provider()
if clear_provider_auth(target) or should_reset_config:
if should_reset_config:
_reset_config_provider()
print(f"Logged out of {provider_name}.")
if os.getenv("OPENROUTER_API_KEY"):
if should_reset_config and os.getenv("OPENROUTER_API_KEY"):
print("Hermes will use OpenRouter for inference.")
else:
elif should_reset_config:
print("Run `hermes model` or configure an API key to use Hermes.")
else:
print("Model provider configuration was unchanged.")
else:
print(f"No auth state found for {provider_name}.")

244
hermes_cli/checkpoints.py Normal file
View file

@ -0,0 +1,244 @@
"""`hermes checkpoints` CLI subcommand.
Gives users direct visibility and control over the filesystem checkpoint
store at ``~/.hermes/checkpoints/``. Actions:
hermes checkpoints # same as `status`
hermes checkpoints status # total size, project count, breakdown
hermes checkpoints list # per-project checkpoint counts + workdir
hermes checkpoints prune [opts] # force a sweep (ignores the 24h marker)
hermes checkpoints clear [-f] # nuke the entire base (asks first)
hermes checkpoints clear-legacy # delete just the legacy-* archives
Examples::
hermes checkpoints
hermes checkpoints prune --retention-days 3 --max-size-mb 200
hermes checkpoints clear -f
None of these require the agent to be running. Safe to call any time.
"""
from __future__ import annotations
import argparse
import time
from datetime import datetime
from pathlib import Path
from typing import Any, Dict
def _fmt_bytes(n: int) -> str:
units = ("B", "KB", "MB", "GB", "TB")
size = float(n or 0)
for unit in units:
if size < 1024 or unit == units[-1]:
if unit == "B":
return f"{int(size)} {unit}"
return f"{size:.1f} {unit}"
size /= 1024
return f"{size:.1f} TB"
def _fmt_ts(ts: Any) -> str:
try:
return datetime.fromtimestamp(float(ts)).strftime("%Y-%m-%d %H:%M")
except (TypeError, ValueError):
return ""
def _fmt_age(ts: Any) -> str:
try:
age = time.time() - float(ts)
except (TypeError, ValueError):
return ""
if age < 0:
return "now"
if age < 60:
return f"{int(age)}s ago"
if age < 3600:
return f"{int(age / 60)}m ago"
if age < 86400:
return f"{int(age / 3600)}h ago"
return f"{int(age / 86400)}d ago"
def cmd_status(args: argparse.Namespace) -> int:
from tools.checkpoint_manager import store_status
info = store_status()
base = info["base"]
print(f"Checkpoint base: {base}")
print(f"Total size: {_fmt_bytes(info['total_size_bytes'])}")
print(f" store/ {_fmt_bytes(info['store_size_bytes'])}")
print(f" legacy-* {_fmt_bytes(info['legacy_size_bytes'])}")
print(f"Projects: {info['project_count']}")
projects = sorted(
info["projects"],
key=lambda p: (p.get("last_touch") or 0),
reverse=True,
)
if projects:
print()
print(f" {'WORKDIR':<60} {'COMMITS':>7} {'LAST TOUCH':>12} STATE")
for p in projects[: args.limit if hasattr(args, "limit") and args.limit else 20]:
wd = p.get("workdir") or "(unknown)"
if len(wd) > 60:
wd = "" + wd[-59:]
exists = p.get("exists")
state = "live" if exists else "orphan"
commits = p.get("commits", 0)
last = _fmt_age(p.get("last_touch"))
print(f" {wd:<60} {commits:>7} {last:>12} {state}")
legacy = info.get("legacy_archives", [])
if legacy:
print()
print(f"Legacy archives ({len(legacy)}):")
for arch in sorted(legacy, key=lambda a: a.get("mtime", 0), reverse=True):
print(f" {arch['name']:<40} {_fmt_bytes(arch['size_bytes']):>10}")
print()
print("Clear with: hermes checkpoints clear-legacy")
return 0
def cmd_list(args: argparse.Namespace) -> int:
# `list` is just a terser status — already covered.
return cmd_status(args)
def cmd_prune(args: argparse.Namespace) -> int:
from tools.checkpoint_manager import prune_checkpoints
retention_days = args.retention_days
max_size_mb = args.max_size_mb
print("Pruning checkpoint store…")
print(f" retention_days: {retention_days}")
print(f" delete_orphans: {not args.keep_orphans}")
print(f" max_total_size_mb: {max_size_mb}")
print()
result = prune_checkpoints(
retention_days=retention_days,
delete_orphans=not args.keep_orphans,
max_total_size_mb=max_size_mb,
)
print(f"Scanned: {result['scanned']}")
print(f"Deleted orphan: {result['deleted_orphan']}")
print(f"Deleted stale: {result['deleted_stale']}")
print(f"Errors: {result['errors']}")
print(f"Bytes reclaimed: {_fmt_bytes(result['bytes_freed'])}")
return 0
def _confirm(prompt: str) -> bool:
try:
resp = input(f"{prompt} [y/N]: ").strip().lower()
except (EOFError, KeyboardInterrupt):
print()
return False
return resp in ("y", "yes")
def cmd_clear(args: argparse.Namespace) -> int:
from tools.checkpoint_manager import CHECKPOINT_BASE, clear_all, store_status
info = store_status()
if info["total_size_bytes"] == 0 and not Path(CHECKPOINT_BASE).exists():
print("Nothing to clear — checkpoint base does not exist.")
return 0
print(f"This will delete the ENTIRE checkpoint base at {info['base']}")
print(f" size: {_fmt_bytes(info['total_size_bytes'])}")
print(f" projects: {info['project_count']}")
print(f" legacy dirs: {len(info.get('legacy_archives', []))}")
print()
print("All /rollback history for every working directory will be lost.")
if not args.force and not _confirm("Proceed?"):
print("Aborted.")
return 1
result = clear_all()
if result["deleted"]:
print(f"Cleared. Reclaimed {_fmt_bytes(result['bytes_freed'])}.")
return 0
print("Could not clear checkpoint base (see logs).")
return 2
def cmd_clear_legacy(args: argparse.Namespace) -> int:
from tools.checkpoint_manager import clear_legacy, store_status
info = store_status()
legacy = info.get("legacy_archives", [])
if not legacy:
print("No legacy archives to clear.")
return 0
total = sum(a.get("size_bytes", 0) for a in legacy)
print(f"Found {len(legacy)} legacy archive(s), total {_fmt_bytes(total)}:")
for arch in legacy:
print(f" {arch['name']:<40} {_fmt_bytes(arch['size_bytes']):>10}")
print()
print("Legacy archives hold pre-v2 per-project shadow repos, moved aside")
print("during the single-store migration. Delete when you're confident")
print("you don't need the old /rollback history.")
if not args.force and not _confirm("Delete all legacy archives?"):
print("Aborted.")
return 1
result = clear_legacy()
print(f"Deleted {result['deleted']} archive(s), reclaimed {_fmt_bytes(result['bytes_freed'])}.")
return 0
def register_cli(parser: argparse.ArgumentParser) -> None:
"""Wire subcommands onto the ``hermes checkpoints`` parser."""
parser.set_defaults(func=cmd_status) # bare `hermes checkpoints` → status
subs = parser.add_subparsers(dest="checkpoints_command", metavar="COMMAND")
p_status = subs.add_parser(
"status",
help="Show total size, project count, and per-project breakdown",
)
p_status.add_argument("--limit", type=int, default=20,
help="Max projects to list (default 20)")
p_status.set_defaults(func=cmd_status)
p_list = subs.add_parser(
"list",
help="Alias for 'status'",
)
p_list.add_argument("--limit", type=int, default=20)
p_list.set_defaults(func=cmd_list)
p_prune = subs.add_parser(
"prune",
help="Delete orphan/stale checkpoints and GC the store",
)
p_prune.add_argument("--retention-days", type=int, default=7,
help="Drop projects whose last_touch is older than N days (default 7)")
p_prune.add_argument("--max-size-mb", type=int, default=500,
help="After orphan/stale prune, drop oldest commits "
"per project until total size <= this (default 500)")
p_prune.add_argument("--keep-orphans", action="store_true",
help="Skip deleting projects whose workdir no longer exists")
p_prune.set_defaults(func=cmd_prune)
p_clear = subs.add_parser(
"clear",
help="Delete the entire checkpoint base (all /rollback history)",
)
p_clear.add_argument("-f", "--force", action="store_true",
help="Skip confirmation prompt")
p_clear.set_defaults(func=cmd_clear)
p_legacy = subs.add_parser(
"clear-legacy",
help="Delete only the legacy-<ts>/ archives from v1 migration",
)
p_legacy.add_argument("-f", "--force", action="store_true",
help="Skip confirmation prompt")
p_legacy.set_defaults(func=cmd_clear_legacy)

View file

@ -235,6 +235,9 @@ def _scan_workspace_state(source_dir: Path) -> list[tuple[Path, str]]:
"""
findings: list[tuple[Path, str]] = []
if not source_dir.exists():
return findings
# Direct state files in the root
for name in ("todo.json", "sessions", "logs"):
candidate = source_dir / name
@ -243,7 +246,12 @@ def _scan_workspace_state(source_dir: Path) -> list[tuple[Path, str]]:
findings.append((candidate, f"Root {kind}: {name}"))
# State files inside workspace directories
for child in sorted(source_dir.iterdir()):
try:
children = sorted(source_dir.iterdir())
except OSError:
return findings
for child in children:
if not child.is_dir() or child.name.startswith("."):
continue
# Check for workspace-like subdirectories

View file

@ -65,6 +65,8 @@ COMMAND_REGISTRY: list[CommandDef] = [
# Session
CommandDef("new", "Start a new session (fresh session ID + history)", "Session",
aliases=("reset",), args_hint="[name]"),
CommandDef("topic", "Enable or inspect Telegram DM topic sessions", "Session",
gateway_only=True, args_hint="[off|help|session-id]"),
CommandDef("clear", "Clear screen and start a new session", "Session",
cli_only=True),
CommandDef("redraw", "Force a full UI repaint (recovers from terminal drift)", "Session",
@ -107,6 +109,9 @@ COMMAND_REGISTRY: list[CommandDef] = [
CommandDef("resume", "Resume a previously-named session", "Session",
args_hint="[name]"),
# Configuration
CommandDef("sessions", "Browse and resume previous sessions", "Session"),
# Configuration
CommandDef("config", "Show current configuration", "Configuration",
cli_only=True),
@ -155,9 +160,9 @@ COMMAND_REGISTRY: list[CommandDef] = [
CommandDef("cron", "Manage scheduled tasks", "Tools & Skills",
cli_only=True, args_hint="[subcommand]",
subcommands=("list", "add", "create", "edit", "pause", "resume", "run", "remove")),
CommandDef("curator", "Background skill maintenance (status, run, pin, archive)",
CommandDef("curator", "Background skill maintenance (status, run, pin, archive, list-archived)",
"Tools & Skills", args_hint="[subcommand]",
subcommands=("status", "run", "pause", "resume", "pin", "unpin", "restore")),
subcommands=("status", "run", "pause", "resume", "pin", "unpin", "restore", "list-archived")),
CommandDef("kanban", "Multi-profile collaboration board (tasks, links, comments)",
"Tools & Skills", args_hint="[subcommand]",
subcommands=("list", "ls", "show", "create", "assign", "link", "unlink",

View file

@ -544,12 +544,25 @@ DEFAULT_CONFIG = {
# via TERMINAL_LOCAL_PERSISTENT env var.
"persistent_shell": True,
},
"web": {
"backend": "", # shared fallback — applies to both search and extract
"search_backend": "", # per-capability override for web_search (e.g. "searxng")
"extract_backend": "", # per-capability override for web_extract (e.g. "native")
},
"browser": {
"inactivity_timeout": 120,
"command_timeout": 30, # Timeout for browser commands in seconds (screenshot, navigate, etc.)
"record_sessions": False, # Auto-record browser sessions as WebM videos
"allow_private_urls": False, # Allow navigating to private/internal IPs (localhost, 192.168.x.x, etc.)
# Browser engine for local mode. Passed as ``--engine <value>`` to
# agent-browser v0.25.3+.
# "auto" — use Chrome (default, don't pass --engine at all)
# "lightpanda" — use Lightpanda (1.3-5.8x faster navigation, no screenshots)
# "chrome" — explicitly request Chrome
# Also settable via AGENT_BROWSER_ENGINE env var.
"engine": "auto",
"auto_local_for_private_urls": True, # When a cloud provider is set, auto-spawn local Chromium for LAN/localhost URLs instead of sending them to the cloud
"cdp_url": "", # Optional persistent CDP endpoint for attaching to an existing Chromium/Chrome
# CDP supervisor — dialog + frame detection via a persistent WebSocket.
@ -567,21 +580,39 @@ DEFAULT_CONFIG = {
},
# Filesystem checkpoints — automatic snapshots before destructive file ops.
# When enabled, the agent takes a snapshot of the working directory once per
# conversation turn (on first write_file/patch call). Use /rollback to restore.
# When enabled, the agent takes a snapshot of the working directory once
# per conversation turn (on first write_file/patch call). Use /rollback
# to restore.
#
# Defaults changed in v2 (single shared shadow store, real pruning):
# - enabled: True -> False (opt-in; most users never use /rollback)
# - max_snapshots: 50 -> 20 (now actually enforced via ref rewrite)
# - auto_prune: False -> True (orphans/stale pruned automatically)
# Opt in via ``hermes chat --checkpoints`` or set enabled=True here.
"checkpoints": {
"enabled": True,
"max_snapshots": 50, # Max checkpoints to keep per directory
# Auto-maintenance: shadow repos accumulate forever under
# ~/.hermes/checkpoints/ (one per cd'd working directory). Field
# reports put the typical offender at 1000+ repos / ~12 GB. When
# auto_prune is on, hermes sweeps at startup (at most once per
# min_interval_hours) and deletes:
# * orphan repos: HERMES_WORKDIR no longer exists on disk
# * stale repos: newest mtime older than retention_days
# Opt-in so users who rely on /rollback against long-ago sessions
# never lose data silently.
"auto_prune": False,
"enabled": False,
# Max checkpoints to keep per working directory. Pre-v2 this only
# limited the `/rollback` listing; v2 actually rewrites the ref and
# garbage-collects older commits.
"max_snapshots": 20,
# Hard ceiling on total ``~/.hermes/checkpoints/`` size (MB). When
# exceeded, the oldest checkpoint per project is dropped in a
# round-robin pass until total size falls under the cap.
# 0 disables the size cap.
"max_total_size_mb": 500,
# Skip any single file larger than this when staging a checkpoint.
# Prevents accidental snapshotting of datasets, model weights, and
# other large generated assets. 0 disables the filter.
"max_file_size_mb": 10,
# Auto-maintenance: hermes sweeps the checkpoint base at startup
# (at most once per ``min_interval_hours``) and:
# * deletes project entries whose workdir no longer exists (orphan)
# * deletes project entries whose last_touch is older than
# ``retention_days``
# * GCs the single shared store to reclaim unreachable objects
# * enforces ``max_total_size_mb`` across remaining projects
# * deletes ``legacy-*`` archives older than ``retention_days``
"auto_prune": True,
"retention_days": 7,
"delete_orphans": True,
"min_interval_hours": 24,
@ -749,6 +780,19 @@ DEFAULT_CONFIG = {
"timeout": 30,
"extra_body": {},
},
# Triage specifier — flesh out a rough one-liner in the Kanban
# Triage column into a concrete spec, then promote it to ``todo``.
# Invoked by ``hermes kanban specify`` (single id or --all). Set a
# cheap, capable model here (gemini-flash works well); the main
# model is overkill for short spec expansion.
"triage_specifier": {
"provider": "auto",
"model": "",
"base_url": "",
"api_key": "",
"timeout": 120,
"extra_body": {},
},
# Curator — skill-usage review fork. Timeout is generous because the
# review pass can take several minutes on reasoning models (umbrella
# building over hundreds of candidate skills). "auto" = use main chat
@ -778,9 +822,19 @@ DEFAULT_CONFIG = {
"show_reasoning": False,
"streaming": False,
"final_response_markdown": "strip", # render | strip | raw
# Preserve recent classic CLI output across Ctrl+L, /redraw, and
# terminal resize full-screen clears. Disable if a terminal emulator
# behaves badly with replayed scrollback.
"persistent_output": True,
"persistent_output_max_lines": 200,
"inline_diffs": True, # Show inline diff previews for write actions (write_file, patch, skill_manage)
"show_cost": False, # Show $ cost in the status bar (off by default)
"skin": "default",
# UI language for static user-facing messages (approval prompts, a
# handful of gateway slash-command replies). Does NOT affect agent
# responses, log lines, tool outputs, or slash-command descriptions.
# Supported: en, zh, ja, de, es, fr, tr, uk. Unknown values fall back to en.
"language": "en",
# TUI busy indicator style: kaomoji (default), emoji, unicode (braille
# spinner), or ascii. Live-swappable via `/indicator <style>`.
"tui_status_indicator": "kaomoji",
@ -809,6 +863,7 @@ DEFAULT_CONFIG = {
"enabled": False,
"fields": ["model", "context_pct", "cwd"], # Order shown; drop any to hide
},
"copy_shortcut": "auto", # "auto" (platform default) | "ctrl_c" | "ctrl_shift_c" | "disabled"
},
# Web dashboard settings
@ -1058,6 +1113,14 @@ DEFAULT_CONFIG = {
# Empty string means use server-local time.
"timezone": "",
# Slack platform settings (gateway mode)
"slack": {
"require_mention": True, # Require @mention to respond in channels
"free_response_channels": "", # Comma-separated channel IDs where bot responds without mention
"allowed_channels": "", # If set, bot ONLY responds in these channel IDs (whitelist)
"channel_prompts": {}, # Per-channel ephemeral system prompts
},
# Discord platform settings (gateway mode)
"discord": {
"require_mention": True, # Require @mention to respond in server channels
@ -1066,6 +1129,12 @@ DEFAULT_CONFIG = {
"auto_thread": True, # Auto-create threads on @mention in channels (like Slack)
"reactions": True, # Add 👀/✅/❌ reactions to messages during processing
"channel_prompts": {}, # Per-channel ephemeral system prompts (forum parents apply to child threads)
# Opt-in DM role-based auth (#12136). By default, DISCORD_ALLOWED_ROLES
# authorizes only guild messages in the role's own guild — DMs require
# DISCORD_ALLOWED_USERS. Set dm_role_auth_guild to a guild ID to also
# authorize DMs from members of that one trusted guild holding the
# allowed role. Unset / empty / 0 = secure default (DM role-auth off).
"dm_role_auth_guild": "",
# discord / discord_admin tools: restrict which actions the agent may call.
# Default (empty) = all actions allowed (subject to bot privileged intents).
# Accepts comma-separated string ("list_guilds,list_channels,fetch_messages")
@ -1088,18 +1157,24 @@ DEFAULT_CONFIG = {
"telegram": {
"reactions": False, # Add 👀/✅/❌ reactions to messages during processing
"channel_prompts": {}, # Per-chat/topic ephemeral system prompts (topics inherit from parent group)
},
# Slack platform settings (gateway mode)
"slack": {
"channel_prompts": {}, # Per-channel ephemeral system prompts
"allowed_chats": "", # If set, bot ONLY responds in these group/supergroup chat IDs (whitelist)
},
# Mattermost platform settings (gateway mode)
"mattermost": {
"require_mention": True, # Require @mention to respond in channels
"free_response_channels": "", # Comma-separated channel IDs where bot responds without mention
"allowed_channels": "", # If set, bot ONLY responds in these channel IDs (whitelist)
"channel_prompts": {}, # Per-channel ephemeral system prompts
},
# Matrix platform settings (gateway mode)
"matrix": {
"require_mention": True, # Require @mention to respond in rooms
"free_response_rooms": "", # Comma-separated room IDs where bot responds without mention
"allowed_rooms": "", # If set, bot ONLY responds in these room IDs (whitelist)
},
# Approval mode for dangerous commands:
# manual — always prompt the user (default)
# smart — use auxiliary LLM to auto-approve low-risk commands, prompt for high-risk
@ -1149,7 +1224,7 @@ DEFAULT_CONFIG = {
# Pre-exec security scanning via tirith
"security": {
"allow_private_urls": False, # Allow requests to private/internal IPs (for OpenWrt, proxies, VPNs)
"redact_secrets": False,
"redact_secrets": True,
"tirith_enabled": True,
"tirith_path": "tirith",
"tirith_timeout": 5,
@ -1188,6 +1263,10 @@ DEFAULT_CONFIG = {
# Seconds between dispatcher ticks (idle or not). Lower = snappier
# pickup of newly-ready tasks; higher = less SQL pressure.
"dispatch_interval_seconds": 60,
# Auto-block after this many consecutive non-success attempts for the
# same task/profile (spawn_failed, timed_out, or crashed). Reassignment
# resets the streak for the new profile.
"failure_limit": 2,
},
# execute_code settings — controls the tool used for programmatic tool calls.
@ -1790,6 +1869,22 @@ OPTIONAL_ENV_VARS = {
"password": True,
"category": "tool",
},
"SEARXNG_URL": {
"description": "URL of your SearXNG instance for free self-hosted web search",
"prompt": "SearXNG URL (e.g. http://localhost:8080)",
"url": "https://searxng.github.io/searxng/",
"tools": ["web_search"],
"password": False,
"category": "tool",
},
"BRAVE_SEARCH_API_KEY": {
"description": "Brave Search API subscription token (free tier: 2,000 queries/mo)",
"prompt": "Brave Search subscription token",
"url": "https://brave.com/search/api/",
"tools": ["web_search"],
"password": True,
"category": "tool",
},
"BROWSERBASE_API_KEY": {
"description": "Browserbase API key for cloud browser (optional — local browser works without this)",
"prompt": "Browserbase API key",
@ -1821,6 +1916,15 @@ OPTIONAL_ENV_VARS = {
"password": False,
"category": "tool",
},
"AGENT_BROWSER_ENGINE": {
"description": "Browser engine for local mode: auto (default Chrome), lightpanda (faster, no screenshots), chrome",
"prompt": "Browser engine (auto/lightpanda/chrome)",
"url": "https://github.com/vercel-labs/agent-browser",
"tools": ["browser_navigate", "browser_snapshot", "browser_click", "browser_vision"],
"password": False,
"category": "tool",
"advanced": True,
},
"CAMOFOX_URL": {
"description": "Camofox browser server URL for local anti-detection browsing (e.g. http://localhost:9377)",
"prompt": "Camofox server URL",
@ -1899,7 +2003,7 @@ OPTIONAL_ENV_VARS = {
"LINEAR_API_KEY": {
"description": "Linear personal API key (used by the `linear` skill)",
"prompt": "Linear API key",
"url": "https://linear.app/settings/api",
"url": "https://linear.app/settings/account/security",
"password": True,
"category": "skill",
"advanced": True,
@ -3915,10 +4019,10 @@ def load_config() -> Dict[str, Any]:
_SECURITY_COMMENT = """
# ── Security ──────────────────────────────────────────────────────────
# Secret redaction is OFF by default — tool output (terminal stdout,
# read_file results, web content) passes through unmodified. Set
# redact_secrets to true to mask strings that look like API keys, tokens,
# and passwords before they enter the model context and logs.
# Secret redaction is ON by default — strings that look like API keys,
# tokens, and passwords are masked in tool output, logs, and chat
# responses before the model or user ever sees them. Set redact_secrets
# to false to disable (e.g. when developing the redactor itself).
# tirith pre-exec scanning is enabled by default when the tirith binary
# is available. Configure via security.tirith_* keys or env vars
# (TIRITH_ENABLED, TIRITH_BIN, TIRITH_TIMEOUT, TIRITH_FAIL_OPEN).
@ -3946,6 +4050,7 @@ _FALLBACK_COMMENT = """
# kimi-coding-cn (KIMI_CN_API_KEY) — Kimi / Moonshot (China)
# minimax (MINIMAX_API_KEY) — MiniMax
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
# bedrock (AWS IAM / boto3) — AWS Bedrock (Converse API)
#
# For custom OpenAI-compatible endpoints, add base_url and key_env.
#
@ -3957,8 +4062,8 @@ _FALLBACK_COMMENT = """
_COMMENTED_SECTIONS = """
# ── Security ──────────────────────────────────────────────────────────
# Secret redaction is OFF by default. Set to true to mask strings that
# look like API keys, tokens, and passwords in tool output and logs.
# Secret redaction is ON by default. Set to false to pass tool output,
# logs, and chat responses through unmodified (e.g. for redactor dev).
#
# security:
# redact_secrets: true
@ -3977,6 +4082,7 @@ _COMMENTED_SECTIONS = """
# kimi-coding-cn (KIMI_CN_API_KEY) — Kimi / Moonshot (China)
# minimax (MINIMAX_API_KEY) — MiniMax
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
# bedrock (AWS IAM / boto3) — AWS Bedrock (Converse API)
#
# For custom OpenAI-compatible endpoints, add base_url and key_env.
#
@ -4834,3 +4940,142 @@ def config_command(args):
print(" hermes config path Show config file path")
print(" hermes config env-path Show .env file path")
sys.exit(1)
# ── Profile-driven env var injection ─────────────────────────────────────────
# Any provider registered in providers/ with auth_type="api_key" automatically
# gets its env_vars exposed in OPTIONAL_ENV_VARS without editing this file.
# Runs once at import time.
_profile_env_vars_injected = False
def _inject_profile_env_vars() -> None:
"""Populate OPTIONAL_ENV_VARS from provider profiles not already listed.
Called once at module load time. Idempotent repeated calls are no-ops.
"""
global _profile_env_vars_injected
if _profile_env_vars_injected:
return
_profile_env_vars_injected = True
try:
from providers import list_providers
for _pp in list_providers():
if _pp.auth_type not in ("api_key",):
continue
for _var in _pp.env_vars:
if _var in OPTIONAL_ENV_VARS:
continue
_is_key = not _var.endswith("_BASE_URL") and not _var.endswith("_URL")
OPTIONAL_ENV_VARS[_var] = {
"description": f"{_pp.display_name or _pp.name} {'API key' if _is_key else 'base URL override'}",
"prompt": f"{_pp.display_name or _pp.name} {'API key' if _is_key else 'base URL (leave empty for default)'}",
"url": _pp.signup_url or None,
"password": _is_key,
"category": "provider",
"advanced": True,
}
except Exception:
pass
# Eagerly inject so that OPTIONAL_ENV_VARS is fully populated at import time.
_inject_profile_env_vars()
# ── Platform-plugin env var injection ────────────────────────────────────────
# Bundled platform plugins under ``plugins/platforms/*/plugin.yaml`` declare
# their required env vars via ``requires_env``. This mirror of
# ``_inject_profile_env_vars`` surfaces them in ``hermes config`` UI so users
# can configure Teams / IRC / Google Chat without the core repo ever needing
# to know they exist.
#
# Each ``requires_env`` entry may be a bare string (name only) or a dict:
#
# requires_env:
# - TEAMS_CLIENT_ID # minimal
# - name: TEAMS_CLIENT_SECRET # rich
# description: "Teams bot client secret"
# url: "https://portal.azure.com/"
# password: true
# prompt: "Teams client secret"
#
# An optional ``optional_env`` block surfaces non-required vars the same way
# (e.g. allowlist, home channel).
_platform_plugin_env_vars_injected = False
def _inject_platform_plugin_env_vars() -> None:
"""Populate OPTIONAL_ENV_VARS from bundled platform plugin manifests.
Called once at module load time. Idempotent repeated calls are no-ops.
Failures are swallowed so a malformed plugin.yaml can't break CLI import.
"""
global _platform_plugin_env_vars_injected
if _platform_plugin_env_vars_injected:
return
_platform_plugin_env_vars_injected = True
try:
import yaml # type: ignore
# Resolve the bundled plugins dir from this file's location so the
# injector works regardless of CWD.
repo_root = Path(__file__).resolve().parents[1]
platforms_dir = repo_root / "plugins" / "platforms"
if not platforms_dir.is_dir():
return
for child in platforms_dir.iterdir():
if not child.is_dir():
continue
manifest_path = child / "plugin.yaml"
if not manifest_path.exists():
manifest_path = child / "plugin.yml"
if not manifest_path.exists():
continue
try:
with open(manifest_path, "r", encoding="utf-8") as f:
manifest = yaml.safe_load(f) or {}
except Exception:
continue
label = manifest.get("label") or manifest.get("name") or child.name
# Merge required + optional env var declarations.
entries = list(manifest.get("requires_env") or [])
entries.extend(manifest.get("optional_env") or [])
for entry in entries:
if isinstance(entry, str):
name = entry
meta: dict = {}
elif isinstance(entry, dict) and entry.get("name"):
name = entry["name"]
meta = entry
else:
continue
if name in OPTIONAL_ENV_VARS:
continue # hardcoded entry wins (back-compat)
# Heuristic: anything named *TOKEN, *SECRET, *KEY, *PASSWORD
# is a password field unless explicitly overridden.
name_upper = name.upper()
is_secret = bool(meta.get("password") or meta.get("secret"))
if not is_secret and not meta.get("password") is False:
is_secret = any(
name_upper.endswith(suf)
for suf in ("_TOKEN", "_SECRET", "_KEY", "_PASSWORD", "_JSON")
)
OPTIONAL_ENV_VARS[name] = {
"description": (
meta.get("description")
or f"{label} configuration"
),
"prompt": meta.get("prompt") or name,
"url": meta.get("url") or None,
"password": is_secret,
"category": meta.get("category") or "messaging",
}
except Exception:
pass
# Eagerly inject so that platform plugin env vars show up in the setup wizard.
_inject_platform_plugin_env_vars()

View file

@ -212,9 +212,9 @@ def copilot_device_code_login(
print(" Waiting for authorization...", end="", flush=True)
# Step 3: Poll for completion
deadline = time.time() + timeout_seconds
deadline = time.monotonic() + timeout_seconds
while time.time() < deadline:
while time.monotonic() < deadline:
time.sleep(interval + _DEVICE_CODE_POLL_SAFETY_MARGIN)
poll_data = urllib.parse.urlencode({

View file

@ -93,6 +93,8 @@ def cron_list(show_all: bool = False):
script = job.get("script")
if script:
print(f" Script: {script}")
if job.get("no_agent"):
print(f" Mode: {color('no-agent', Colors.DIM)} (script stdout delivered directly)")
workdir = job.get("workdir")
if workdir:
print(f" Workdir: {workdir}")
@ -172,6 +174,7 @@ def cron_create(args):
skills=_normalize_skills(getattr(args, "skill", None), getattr(args, "skills", None)),
script=getattr(args, "script", None),
workdir=getattr(args, "workdir", None),
no_agent=getattr(args, "no_agent", False) or None,
)
if not result.get("success"):
print(color(f"Failed to create job: {result.get('error', 'unknown error')}", Colors.RED))
@ -184,6 +187,8 @@ def cron_create(args):
job_data = result.get("job", {})
if job_data.get("script"):
print(f" Script: {job_data['script']}")
if job_data.get("no_agent"):
print(" Mode: no-agent (script stdout delivered directly)")
if job_data.get("workdir"):
print(f" Workdir: {job_data['workdir']}")
print(f" Next run: {result['next_run_at']}")
@ -225,6 +230,7 @@ def cron_edit(args):
skills=final_skills,
script=getattr(args, "script", None),
workdir=getattr(args, "workdir", None),
no_agent=getattr(args, "no_agent", None),
)
if not result.get("success"):
print(color(f"Failed to update job: {result.get('error', 'unknown error')}", Colors.RED))
@ -240,6 +246,8 @@ def cron_edit(args):
print(" Skills: none")
if updated.get("script"):
print(f" Script: {updated['script']}")
if updated.get("no_agent"):
print(" Mode: no-agent (script stdout delivered directly)")
if updated.get("workdir"):
print(f" Workdir: {updated['workdir']}")
return 0

View file

@ -12,6 +12,7 @@ from __future__ import annotations
import argparse
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
@ -57,7 +58,8 @@ def _cmd_status(args) -> int:
print(f" last summary: {summary}")
_report = state.get("last_report_path")
if _report:
print(f" last report: {_report}")
suffix = "" if Path(_report).exists() else " (missing)"
print(f" last report: {_report}{suffix}")
_ih = curator.get_interval_hours()
_interval_label = (
f"{_ih // 24}d" if _ih % 24 == 0 and _ih >= 24
@ -161,6 +163,8 @@ def _cmd_run(args) -> int:
return 1
dry = bool(getattr(args, "dry_run", False))
background = bool(getattr(args, "background", False))
synchronous = bool(getattr(args, "synchronous", False)) or not background
if dry:
print("curator: running DRY-RUN (report only, no mutations)...")
else:
@ -171,7 +175,7 @@ def _cmd_run(args) -> int:
result = curator.run_curator_review(
on_summary=_on_summary,
synchronous=bool(args.synchronous),
synchronous=synchronous,
dry_run=dry,
)
auto = result.get("auto_transitions", {})
@ -188,13 +192,19 @@ def _cmd_run(args) -> int:
f"archived={auto.get('archived', 0)} "
f"reactivated={auto.get('reactivated', 0)}"
)
if not args.synchronous:
if not synchronous:
print("llm pass running in background — check `hermes curator status` later")
if dry:
print(
"dry-run: no changes applied. When the report lands, read it with "
"`hermes curator status` and run `hermes curator run` (no flag) to apply."
)
if synchronous:
print(
"dry-run: no changes applied. Read the report with "
"`hermes curator status` and run `hermes curator run` (no flag) to apply."
)
else:
print(
"dry-run: no changes applied. When the report lands, read it with "
"`hermes curator status` and run `hermes curator run` (no flag) to apply."
)
return 0
@ -245,6 +255,111 @@ def _cmd_restore(args) -> int:
return 0 if ok else 1
def _cmd_archive(args) -> int:
"""Manually archive an agent-created skill. Refuses if pinned.
The auto-curator archives stale skills on its own schedule; this verb is
for the user who wants to archive *now* without waiting for a run.
"""
from tools import skill_usage
if skill_usage.get_record(args.skill).get("pinned"):
print(
f"curator: '{args.skill}' is pinned — unpin first with "
f"`hermes curator unpin {args.skill}`"
)
return 1
ok, msg = skill_usage.archive_skill(args.skill)
print(f"curator: {msg}")
return 0 if ok else 1
def _idle_days(record: dict) -> Optional[int]:
"""Days since the skill's last activity (view / use / patch).
Falls back to ``created_at`` so a skill that was authored but never used
can still be pruned otherwise never-touched skills would be immortal.
Returns None only when both fields are missing or unparseable.
"""
ts = record.get("last_activity_at") or record.get("created_at")
if not ts:
return None
try:
dt = datetime.fromisoformat(str(ts))
except (TypeError, ValueError):
return None
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
return max(0, (datetime.now(timezone.utc) - dt).days)
def _cmd_prune(args) -> int:
"""Bulk-archive agent-created skills idle for >= N days.
Pinned skills are exempt. Already-archived skills are skipped. Default
``--days 90`` matches a conservative read of the curator's own archive
threshold; adjust with ``--days``. Use ``--dry-run`` to preview.
"""
from tools import skill_usage
days = getattr(args, "days", 90)
if days < 1:
print(f"curator: --days must be >= 1 (got {days})", file=sys.stderr)
return 2
dry_run = bool(getattr(args, "dry_run", False))
skip_confirm = bool(getattr(args, "yes", False))
candidates = []
for r in skill_usage.agent_created_report():
if r.get("pinned"):
continue
if r.get("state") == skill_usage.STATE_ARCHIVED:
continue
idle = _idle_days(r)
if idle is None or idle < days:
continue
candidates.append((r["name"], idle))
if not candidates:
print(f"curator: nothing to prune (no unpinned skills idle >= {days}d)")
return 0
candidates.sort(key=lambda c: -c[1])
print(f"curator: {len(candidates)} skill(s) idle >= {days}d:")
for name, idle in candidates:
print(f" {name:40s} idle {idle}d")
if dry_run:
print("\n(dry run — no changes made)")
return 0
if not skip_confirm:
try:
reply = input(f"\nArchive {len(candidates)} skill(s)? [y/N] ").strip().lower()
except (EOFError, KeyboardInterrupt):
print("\ncurator: aborted")
return 1
if reply not in ("y", "yes"):
print("curator: aborted")
return 1
archived = 0
failures = []
for name, _ in candidates:
ok, msg = skill_usage.archive_skill(name)
if ok:
archived += 1
else:
failures.append((name, msg))
print(f"\ncurator: archived {archived}/{len(candidates)}")
if failures:
print("failures:")
for name, msg in failures:
print(f" {name}: {msg}")
return 1
return 0
def _cmd_backup(args) -> int:
"""Take a manual snapshot of the skills tree. Same mechanism as the
automatic pre-run snapshot, just user-initiated."""
@ -337,6 +452,18 @@ def _cmd_rollback(args) -> int:
return 1
def _cmd_list_archived(args) -> int:
"""List archived (recoverable) skills."""
from tools import skill_usage
names = skill_usage.list_archived_skill_names()
if not names:
print("curator: no archived skills")
return 0
for name in names:
print(name)
return 0
# ---------------------------------------------------------------------------
# argparse wiring (called from hermes_cli.main)
# ---------------------------------------------------------------------------
@ -356,7 +483,11 @@ def register_cli(parent: argparse.ArgumentParser) -> None:
p_run = subs.add_parser("run", help="Trigger a curator review now")
p_run.add_argument(
"--sync", "--synchronous", dest="synchronous", action="store_true",
help="Wait for the LLM review pass to finish (default: background thread)",
help="Wait for the LLM review pass to finish (default for manual runs)",
)
p_run.add_argument(
"--background", dest="background", action="store_true",
help="Start the LLM review pass in a background thread and return immediately",
)
p_run.add_argument(
"--dry-run", dest="dry_run", action="store_true",
@ -383,6 +514,34 @@ def register_cli(parent: argparse.ArgumentParser) -> None:
p_restore.add_argument("skill", help="Skill name")
p_restore.set_defaults(func=_cmd_restore)
subs.add_parser("list-archived", help="List archived skills") \
.set_defaults(func=_cmd_list_archived)
p_archive = subs.add_parser(
"archive",
help="Manually archive a skill (move to .archive/, excluded from prompt)",
)
p_archive.add_argument("skill", help="Skill name")
p_archive.set_defaults(func=_cmd_archive)
p_prune = subs.add_parser(
"prune",
help="Bulk-archive agent-created skills idle for >= N days (default 90)",
)
p_prune.add_argument(
"--days", type=int, default=90,
help="Archive skills idle for at least N days (default: 90)",
)
p_prune.add_argument(
"-y", "--yes", action="store_true",
help="Skip the confirmation prompt",
)
p_prune.add_argument(
"--dry-run", dest="dry_run", action="store_true",
help="Show what would be archived without doing it",
)
p_prune.set_defaults(func=_cmd_prune)
p_backup = subs.add_parser(
"backup",
help="Take a manual tar.gz snapshot of ~/.hermes/skills/ "

View file

@ -12,6 +12,7 @@ import importlib.util
from pathlib import Path
from hermes_cli.config import get_project_root, get_hermes_home, get_env_path
from hermes_cli.env_loader import load_hermes_dotenv
from hermes_constants import display_hermes_home
PROJECT_ROOT = get_project_root()
@ -19,15 +20,8 @@ HERMES_HOME = get_hermes_home()
_DHH = display_hermes_home() # user-facing display path (e.g. ~/.hermes or ~/.hermes/profiles/coder)
# Load environment variables from ~/.hermes/.env so API key checks work
from dotenv import load_dotenv
_env_path = get_env_path()
if _env_path.exists():
try:
load_dotenv(_env_path, encoding="utf-8")
except UnicodeDecodeError:
load_dotenv(_env_path, encoding="latin-1")
# Also try project .env as dev fallback
load_dotenv(PROJECT_ROOT / ".env", override=False, encoding="utf-8")
load_hermes_dotenv(hermes_home=_env_path.parent, project_env=PROJECT_ROOT / ".env")
from hermes_cli.colors import Colors, color
from hermes_cli.models import _HERMES_USER_AGENT
@ -97,6 +91,15 @@ def _termux_browser_setup_steps(node_installed: bool) -> list[str]:
return steps
def _termux_install_all_fallback_notes() -> list[str]:
return [
"Termux install profile: use .[termux-all] for broad compatibility (installer default on Termux).",
"Matrix E2EE extra is excluded on Termux (python-olm currently fails to build).",
"Local faster-whisper extra is excluded on Termux (ctranslate2/av build path unavailable).",
"STT fallback: use Groq Whisper (set GROQ_API_KEY) or OpenAI Whisper (set VOICE_TOOLS_OPENAI_KEY).",
]
def _has_provider_env_config(content: str) -> bool:
"""Return True when ~/.hermes/.env contains provider auth/base URL settings."""
return any(key in content for key in _PROVIDER_ENV_HINTS)
@ -113,15 +116,35 @@ def _honcho_is_configured_for_doctor() -> bool:
return False
def _is_kanban_worker_env_gate(item: dict) -> bool:
"""Return True when Kanban is unavailable only because this is not a worker process."""
if item.get("name") != "kanban":
return False
if os.environ.get("HERMES_KANBAN_TASK"):
return False
tools = item.get("tools") or []
return bool(tools) and all(str(tool).startswith("kanban_") for tool in tools)
def _doctor_tool_availability_detail(toolset: str) -> str:
"""Optional explanatory suffix for toolsets whose doctor status needs context."""
if toolset == "kanban" and not os.environ.get("HERMES_KANBAN_TASK"):
return "(runtime-gated; loaded only for dispatcher-spawned workers)"
return ""
def _apply_doctor_tool_availability_overrides(available: list[str], unavailable: list[dict]) -> tuple[list[str], list[dict]]:
"""Adjust runtime-gated tool availability for doctor diagnostics."""
if not _honcho_is_configured_for_doctor():
return available, unavailable
updated_available = list(available)
updated_unavailable = []
for item in unavailable:
if item.get("name") == "honcho":
name = item.get("name")
if _is_kanban_worker_env_gate(item):
if "kanban" not in updated_available:
updated_available.append("kanban")
continue
if name == "honcho" and _honcho_is_configured_for_doctor():
if "honcho" not in updated_available:
updated_available.append("honcho")
continue
@ -175,6 +198,85 @@ def _check_gateway_service_linger(issues: list[str]) -> None:
check_warn("Could not verify systemd linger", f"({linger_detail})")
_APIKEY_PROVIDERS_CACHE: list | None = None
def _build_apikey_providers_list() -> list:
"""Build the API-key provider health-check list once and cache it.
Tuple format: (name, env_vars, default_url, base_env, supports_models_endpoint)
Base list augmented with any ProviderProfile with auth_type="api_key" not
already present adding plugins/model-providers/<name>/ is sufficient to get into doctor.
"""
_static = [
("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True),
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True),
("StepFun Step Plan", ("STEPFUN_API_KEY",), "https://api.stepfun.ai/step_plan/v1/models", "STEPFUN_BASE_URL", True),
("Kimi / Moonshot (China)", ("KIMI_CN_API_KEY",), "https://api.moonshot.cn/v1/models", None, True),
("Arcee AI", ("ARCEEAI_API_KEY",), "https://api.arcee.ai/api/v1/models", "ARCEE_BASE_URL", True),
("GMI Cloud", ("GMI_API_KEY",), "https://api.gmi-serving.com/v1/models", "GMI_BASE_URL", True),
("DeepSeek", ("DEEPSEEK_API_KEY",), "https://api.deepseek.com/v1/models", "DEEPSEEK_BASE_URL", True),
("Hugging Face", ("HF_TOKEN",), "https://router.huggingface.co/v1/models", "HF_BASE_URL", True),
("NVIDIA NIM", ("NVIDIA_API_KEY",), "https://integrate.api.nvidia.com/v1/models", "NVIDIA_BASE_URL", True),
("Alibaba/DashScope", ("DASHSCOPE_API_KEY",), "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", "DASHSCOPE_BASE_URL", True),
# MiniMax global: /v1 endpoint supports /models.
("MiniMax", ("MINIMAX_API_KEY",), "https://api.minimax.io/v1/models", "MINIMAX_BASE_URL", True),
# MiniMax CN: /v1 endpoint does NOT support /models (returns 404).
("MiniMax (China)", ("MINIMAX_CN_API_KEY",), "https://api.minimaxi.com/v1/models", "MINIMAX_CN_BASE_URL", False),
("Vercel AI Gateway", ("AI_GATEWAY_API_KEY",), "https://ai-gateway.vercel.sh/v1/models", "AI_GATEWAY_BASE_URL", True),
("Kilo Code", ("KILOCODE_API_KEY",), "https://api.kilo.ai/api/gateway/models", "KILOCODE_BASE_URL", True),
("OpenCode Zen", ("OPENCODE_ZEN_API_KEY",), "https://opencode.ai/zen/v1/models", "OPENCODE_ZEN_BASE_URL", True),
# OpenCode Go has no shared /models endpoint; skip the health check.
("OpenCode Go", ("OPENCODE_GO_API_KEY",), None, "OPENCODE_GO_BASE_URL", False),
]
_known_names = {t[0] for t in _static}
# Also index by profile canonical name so profiles without display_name
# don't create duplicate entries for providers already in the static list.
_known_canonical: set[str] = set()
_name_to_canonical = {
"Z.AI / GLM": "zai", "Kimi / Moonshot": "kimi-coding",
"StepFun Step Plan": "stepfun", "Kimi / Moonshot (China)": "kimi-coding-cn",
"Arcee AI": "arcee", "GMI Cloud": "gmi", "DeepSeek": "deepseek",
"Hugging Face": "huggingface", "NVIDIA NIM": "nvidia",
"Alibaba/DashScope": "alibaba", "MiniMax": "minimax",
"MiniMax (China)": "minimax-cn", "Vercel AI Gateway": "ai-gateway",
"Kilo Code": "kilocode", "OpenCode Zen": "opencode-zen",
"OpenCode Go": "opencode-go",
}
for _label, _canonical in _name_to_canonical.items():
_known_canonical.add(_canonical)
try:
from providers import list_providers
from providers.base import ProviderProfile as _PP
for _pp in list_providers():
if not isinstance(_pp, _PP) or _pp.auth_type != "api_key" or not _pp.env_vars:
continue
_label = _pp.display_name or _pp.name
if _label in _known_names or _pp.name in _known_canonical:
continue
# Separate API-key vars from base-URL override vars — the health-check
# loop sends the first found value as Authorization: Bearer, so a URL
# string must never be picked.
_key_vars = tuple(
v for v in _pp.env_vars
if not v.endswith("_BASE_URL") and not v.endswith("_URL")
)
_base_var = next(
(v for v in _pp.env_vars if v.endswith("_BASE_URL") or v.endswith("_URL")),
None,
)
if not _key_vars:
continue
_models_url = (
(_pp.models_url or (_pp.base_url.rstrip("/") + "/models"))
if _pp.base_url else None
)
_static.append((_label, _key_vars, _models_url, _base_var, True))
except Exception:
pass
return _static
def run_doctor(args):
"""Run diagnostic checks."""
should_fix = getattr(args, 'fix', False)
@ -998,6 +1100,11 @@ def run_doctor(args):
except Exception:
pass
if _is_termux():
check_info("Termux compatibility fallbacks:")
for note in _termux_install_all_fallback_notes():
check_info(note)
# =========================================================================
# Check: API connectivity
# =========================================================================
@ -1094,27 +1201,11 @@ def run_doctor(args):
# -- API-key providers --
# Tuple: (name, env_vars, default_url, base_env, supports_models_endpoint)
# If supports_models_endpoint is False, we skip the health check and just show "configured"
_apikey_providers = [
("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True),
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True),
("StepFun Step Plan", ("STEPFUN_API_KEY",), "https://api.stepfun.ai/step_plan/v1/models", "STEPFUN_BASE_URL", True),
("Kimi / Moonshot (China)", ("KIMI_CN_API_KEY",), "https://api.moonshot.cn/v1/models", None, True),
("Arcee AI", ("ARCEEAI_API_KEY",), "https://api.arcee.ai/api/v1/models", "ARCEE_BASE_URL", True),
("GMI Cloud", ("GMI_API_KEY",), "https://api.gmi-serving.com/v1/models", "GMI_BASE_URL", True),
("DeepSeek", ("DEEPSEEK_API_KEY",), "https://api.deepseek.com/v1/models", "DEEPSEEK_BASE_URL", True),
("Hugging Face", ("HF_TOKEN",), "https://router.huggingface.co/v1/models", "HF_BASE_URL", True),
("NVIDIA NIM", ("NVIDIA_API_KEY",), "https://integrate.api.nvidia.com/v1/models", "NVIDIA_BASE_URL", True),
("Alibaba/DashScope", ("DASHSCOPE_API_KEY",), "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", "DASHSCOPE_BASE_URL", True),
# MiniMax global: /v1 endpoint supports /models.
("MiniMax", ("MINIMAX_API_KEY",), "https://api.minimax.io/v1/models", "MINIMAX_BASE_URL", True),
# MiniMax CN: /v1 endpoint does NOT support /models (returns 404).
("MiniMax (China)", ("MINIMAX_CN_API_KEY",), "https://api.minimaxi.com/v1/models", "MINIMAX_CN_BASE_URL", False),
("Vercel AI Gateway", ("AI_GATEWAY_API_KEY",), "https://ai-gateway.vercel.sh/v1/models", "AI_GATEWAY_BASE_URL", True),
("Kilo Code", ("KILOCODE_API_KEY",), "https://api.kilo.ai/api/gateway/models", "KILOCODE_BASE_URL", True),
("OpenCode Zen", ("OPENCODE_ZEN_API_KEY",), "https://opencode.ai/zen/v1/models", "OPENCODE_ZEN_BASE_URL", True),
# OpenCode Go has no shared /models endpoint; skip the health check.
("OpenCode Go", ("OPENCODE_GO_API_KEY",), None, "OPENCODE_GO_BASE_URL", False),
]
# Cached at module level after first build — profiles auto-extend it.
global _APIKEY_PROVIDERS_CACHE
if _APIKEY_PROVIDERS_CACHE is None:
_APIKEY_PROVIDERS_CACHE = _build_apikey_providers_list()
_apikey_providers = _APIKEY_PROVIDERS_CACHE
for _pname, _env_vars, _default_url, _base_env, _supports_health_check in _apikey_providers:
_key = ""
for _ev in _env_vars:
@ -1155,6 +1246,16 @@ def run_doctor(args):
headers=_headers,
timeout=10,
)
if (
_pname == "Alibaba/DashScope"
and not _base
and _resp.status_code == 401
):
_resp = httpx.get(
"https://dashscope.aliyuncs.com/compatible-mode/v1/models",
headers=_headers,
timeout=10,
)
if _resp.status_code == 200:
print(f"\r {color('', Colors.GREEN)} {_label} ")
elif _resp.status_code == 401:
@ -1228,7 +1329,7 @@ def run_doctor(args):
for tid in available:
info = TOOLSET_REQUIREMENTS.get(tid, {})
check_ok(info.get("name", tid))
check_ok(info.get("name", tid), _doctor_tool_availability_detail(tid))
for item in unavailable:
env_vars = item.get("missing_vars") or item.get("env_vars") or []
@ -1271,9 +1372,23 @@ def run_doctor(args):
check_warn("Skills Hub directory not initialized", "(run: hermes skills list)")
from hermes_cli.config import get_env_value
def _gh_authenticated() -> bool:
"""Check if gh CLI is authenticated via token file or device flow."""
try:
result = subprocess.run(
["gh", "auth", "status", "--json", "authenticated"],
capture_output=True, timeout=10,
)
return result.returncode == 0
except (FileNotFoundError, subprocess.TimeoutExpired):
return False
github_token = get_env_value("GITHUB_TOKEN") or get_env_value("GH_TOKEN")
if github_token:
check_ok("GitHub token configured (authenticated API access)")
elif _gh_authenticated():
check_ok("GitHub authenticated via gh CLI", "(full API access — no GITHUB_TOKEN needed)")
else:
check_warn("No GITHUB_TOKEN", f"(60 req/hr rate limit — set in {_DHH}/.env for better rates)")

View file

@ -14,6 +14,7 @@ import sys
from pathlib import Path
from hermes_cli.config import get_hermes_home, get_env_path, get_project_root, load_config
from hermes_cli.env_loader import load_hermes_dotenv
from hermes_constants import display_hermes_home
@ -195,15 +196,11 @@ def run_dump(args):
show_keys = getattr(args, "show_keys", False)
# Load env from .env file so key checks work
from dotenv import load_dotenv
env_path = get_env_path()
if env_path.exists():
try:
load_dotenv(env_path, encoding="utf-8")
except UnicodeDecodeError:
load_dotenv(env_path, encoding="latin-1")
# Also try project .env as dev fallback
load_dotenv(get_project_root() / ".env", override=False, encoding="utf-8")
load_hermes_dotenv(
hermes_home=env_path.parent,
project_env=get_project_root() / ".env",
)
project_root = get_project_root()
hermes_home = get_hermes_home()

View file

@ -505,6 +505,7 @@ def _read_systemd_unit_properties(
"SubState",
"Result",
"ExecMainStatus",
"MainPID",
),
) -> dict[str, str]:
"""Return selected ``systemctl show`` properties for the gateway unit."""
@ -538,6 +539,41 @@ def _read_systemd_unit_properties(
return parsed
def _systemd_main_pid_from_props(props: dict[str, str]) -> int | None:
try:
pid = int(props.get("MainPID", "0") or "0")
except (TypeError, ValueError):
return None
return pid if pid > 0 else None
def _systemd_main_pid(system: bool = False) -> int | None:
return _systemd_main_pid_from_props(_read_systemd_unit_properties(system=system))
def _read_gateway_runtime_status() -> dict | None:
try:
from gateway.status import read_runtime_status
state = read_runtime_status()
except Exception:
return None
return state if isinstance(state, dict) else None
def _gateway_runtime_status_for_pid(pid: int | None) -> dict | None:
if not pid:
return None
state = _read_gateway_runtime_status()
if not state:
return None
try:
state_pid = int(state.get("pid", 0) or 0)
except (TypeError, ValueError):
return None
return state if state_pid == pid else None
def _wait_for_systemd_service_restart(
*,
system: bool = False,
@ -549,9 +585,10 @@ def _wait_for_systemd_service_restart(
svc = get_service_name()
scope_label = _service_scope_label(system).capitalize()
deadline = time.time() + timeout
deadline = time.monotonic() + timeout
printed_runtime_wait = False
while time.time() < deadline:
while time.monotonic() < deadline:
props = _read_systemd_unit_properties(system=system)
active_state = props.get("ActiveState", "")
sub_state = props.get("SubState", "")
@ -562,19 +599,32 @@ def _wait_for_systemd_service_restart(
new_pid = get_running_pid()
except Exception:
new_pid = None
if not new_pid:
new_pid = _systemd_main_pid_from_props(props)
if active_state == "active":
if new_pid and (previous_pid is None or new_pid != previous_pid):
print(f"{scope_label} service restarted (PID {new_pid})")
return True
if previous_pid is None:
print(f"{scope_label} service restarted")
return True
runtime_state = _gateway_runtime_status_for_pid(new_pid)
gateway_state = (runtime_state or {}).get("gateway_state")
if gateway_state == "running":
print(f"{scope_label} service restarted (PID {new_pid})")
return True
if gateway_state == "startup_failed":
reason = (runtime_state or {}).get("exit_reason") or "startup failed"
print(f"{scope_label} service process restarted (PID {new_pid}), but gateway startup failed: {reason}")
return False
if not printed_runtime_wait:
print(f"{scope_label} service process started (PID {new_pid}); waiting for gateway runtime...")
printed_runtime_wait = True
if active_state == "activating" and sub_state == "auto-restart":
time.sleep(1)
continue
if _systemd_unit_is_start_limited(props):
_print_systemd_start_limit_wait(system=system)
return False
time.sleep(2)
print(
@ -585,6 +635,46 @@ def _wait_for_systemd_service_restart(
return False
def _systemd_unit_is_start_limited(props: dict[str, str]) -> bool:
result = props.get("Result", "").lower()
sub_state = props.get("SubState", "").lower()
return result == "start-limit-hit" or sub_state == "start-limit-hit"
def _systemd_error_indicates_start_limit(exc: subprocess.CalledProcessError) -> bool:
parts: list[str] = []
for attr in ("stderr", "stdout", "output"):
value = getattr(exc, attr, None)
if not value:
continue
if isinstance(value, bytes):
value = value.decode(errors="replace")
parts.append(str(value))
text = "\n".join(parts).lower()
return (
"start-limit-hit" in text
or "start request repeated too quickly" in text
or "start-limit" in text
)
def _systemd_service_is_start_limited(system: bool = False) -> bool:
return _systemd_unit_is_start_limited(_read_systemd_unit_properties(system=system))
def _print_systemd_start_limit_wait(system: bool = False) -> None:
svc = get_service_name()
scope_label = _service_scope_label(system).capitalize()
scope_flag = " --system" if system else ""
systemctl_prefix = "systemctl " if system else "systemctl --user "
journal_prefix = "journalctl " if system else "journalctl --user "
print(f"{scope_label} service is temporarily rate-limited by systemd.")
print(" systemd is refusing another immediate start after repeated exits.")
print(f" Wait for the start-limit window to expire, then run: {'sudo ' if system else ''}hermes gateway restart{scope_flag}")
print(f" Or clear the failed state manually: {systemctl_prefix}reset-failed {svc}")
print(f" Check logs: {journal_prefix}-u {svc} -l --since '5 min ago'")
def _recover_pending_systemd_restart(system: bool = False, previous_pid: int | None = None) -> bool:
"""Recover a planned service restart that is stuck in systemd state."""
props = _read_systemd_unit_properties(system=system)
@ -740,6 +830,46 @@ def _print_other_profiles_gateway_status() -> None:
pass
def _gateway_list() -> None:
"""List all profiles and their gateway running status.
Provides a single-command overview of every known profile and whether
its gateway is currently running, so multi-profile users don't have to
check each profile individually.
"""
try:
from hermes_cli.profiles import list_profiles, get_active_profile_name
except Exception:
print("Unable to list profiles.")
return
profiles = list_profiles()
if not profiles:
print("No profiles found.")
return
current = get_active_profile_name()
print("Gateways:")
for prof in profiles:
marker = "" if prof.gateway_running else ""
label = prof.name
if prof.name == current:
label += " (current)"
parts = [f" {marker} {label:<24s}"]
if prof.gateway_running:
try:
from gateway.status import get_running_pid
pid = get_running_pid(prof.path / "gateway.pid", cleanup_stale=False)
if pid:
parts.append(f"PID {pid}")
except Exception:
pass
else:
parts.append("not running")
print("".join(parts))
def kill_gateway_processes(force: bool = False, exclude_pids: set | None = None,
all_profiles: bool = False) -> int:
"""Kill any running gateway processes. Returns count killed.
@ -785,6 +915,12 @@ def stop_profile_gateway() -> bool:
if pid is None:
return False
try:
from gateway.status import write_planned_stop_marker
write_planned_stop_marker(pid)
except Exception:
pass
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
@ -961,6 +1097,27 @@ class UserSystemdUnavailableError(RuntimeError):
"""
class SystemScopeRequiresRootError(RuntimeError):
"""Raised when a system-scope gateway operation is attempted as non-root.
System-scope units live in ``/etc/systemd/system/`` and require root for
install / uninstall / start / stop / restart via ``systemctl``. The
previous behavior was ``sys.exit(1)`` which blew past the wizard's
``except Exception`` guards and dumped the user at a bare shell prompt
with no guidance. Raising a typed exception lets callers that can
recover (the setup wizard) print actionable remediation instead, while
``gateway_command`` still exits 1 with the same message for the direct
CLI path.
``args[0]`` carries the user-facing message, ``args[1]`` the action name.
``str(e)`` returns only the message (not the tuple repr) so format
strings like ``f"Failed: {e}"`` render cleanly.
"""
def __str__(self) -> str:
return self.args[0] if self.args else ""
def _user_dbus_socket_path() -> Path:
"""Return the expected per-user D-Bus socket path (regardless of existence)."""
xdg = os.environ.get("XDG_RUNTIME_DIR") or f"/run/user/{os.getuid()}"
@ -1376,8 +1533,10 @@ def print_systemd_scope_conflict_warning() -> None:
def _require_root_for_system_service(action: str) -> None:
if os.geteuid() != 0:
print(f"System gateway {action} requires root. Re-run with sudo.")
sys.exit(1)
raise SystemScopeRequiresRootError(
f"System gateway {action} requires root. Re-run with sudo.",
action,
)
def _system_service_identity(run_as_user: str | None = None) -> tuple[str, str, str]:
@ -1608,6 +1767,46 @@ def _build_user_local_paths(home: Path, path_entries: list[str]) -> list[str]:
return [p for p in candidates if p not in path_entries and Path(p).exists()]
def _build_wsl_interop_paths(path_entries: list[str]) -> list[str]:
"""Return WSL Windows interop PATH entries for generated systemd units.
WSL shells normally inherit Windows PATH entries such as
``/mnt/c/WINDOWS/System32``. systemd user services do not, so gateway tools
that call ``powershell.exe``/``cmd.exe`` work in a terminal but fail in the
background service unless we persist the relevant entries at install time.
"""
if not is_wsl():
return []
candidates: list[str] = []
for entry in os.environ.get("PATH", "").split(os.pathsep):
if entry.startswith("/mnt/"):
candidates.append(entry)
for executable in ("powershell.exe", "cmd.exe", "explorer.exe", "wsl.exe"):
resolved = shutil.which(executable)
if resolved:
candidates.append(str(Path(resolved).parent))
for entry in (
"/mnt/c/WINDOWS/system32",
"/mnt/c/WINDOWS",
"/mnt/c/WINDOWS/System32/Wbem",
"/mnt/c/WINDOWS/System32/WindowsPowerShell/v1.0/",
"/mnt/c/WINDOWS/System32/OpenSSH/",
):
if Path(entry).exists():
candidates.append(entry)
result: list[str] = []
seen = set(path_entries)
for entry in candidates:
if entry and entry not in seen:
seen.add(entry)
result.append(entry)
return result
def _remap_path_for_user(path: str, target_home_dir: str) -> str:
"""Remap *path* from the current user's home to *target_home_dir*.
@ -1699,6 +1898,7 @@ def generate_systemd_unit(system: bool = False, run_as_user: str | None = None)
node_bin = _remap_path_for_user(node_bin, home_dir)
path_entries = [_remap_path_for_user(p, home_dir) for p in path_entries]
path_entries.extend(_build_user_local_paths(Path(home_dir), path_entries))
path_entries.extend(_build_wsl_interop_paths(path_entries))
path_entries.extend(common_bin_paths)
sane_path = ":".join(path_entries)
return f"""[Unit]
@ -1738,6 +1938,7 @@ WantedBy=multi-user.target
hermes_home = str(get_hermes_home().resolve())
profile_arg = _profile_arg(hermes_home)
path_entries.extend(_build_user_local_paths(Path.home(), path_entries))
path_entries.extend(_build_wsl_interop_paths(path_entries))
path_entries.extend(common_bin_paths)
sane_path = ":".join(path_entries)
return f"""[Unit]
@ -1882,6 +2083,47 @@ def _select_systemd_scope(system: bool = False) -> bool:
return get_systemd_unit_path(system=True).exists() and not get_systemd_unit_path(system=False).exists()
def _system_scope_wizard_would_need_root(system: bool = False) -> bool:
"""True when the setup wizard is about to trigger a system-scope operation
as a non-root user.
Replicates the decision ``_select_systemd_scope`` makes inside
``systemd_start`` / ``systemd_restart`` / ``systemd_stop`` so the wizard
can detect the dead-end BEFORE prompting, rather than letting
``SystemScopeRequiresRootError`` propagate out and leave the user
staring at a bare shell.
"""
if os.geteuid() == 0:
return False
return _select_systemd_scope(system=system)
def _print_system_scope_remediation(action: str) -> None:
"""Print actionable remediation when the wizard skips a system-scope
prompt because the user isn't root. Keeps the wizard flowing instead of
aborting.
"""
svc = get_service_name()
print_warning(
f"Gateway is installed as a system-wide service — "
f"{action} requires root."
)
print_info(" Options:")
print_info(f" 1. {action.capitalize()} it this time:")
if action == "start":
print_info(f" sudo systemctl start {svc}")
elif action == "stop":
print_info(f" sudo systemctl stop {svc}")
elif action == "restart":
print_info(f" sudo systemctl restart {svc}")
else:
print_info(f" sudo systemctl {action} {svc}")
print_info(" 2. Switch to a per-user service (recommended for personal use):")
print_info(" sudo hermes gateway uninstall --system")
print_info(" hermes gateway install")
print_info(" hermes gateway start")
def _get_restart_drain_timeout() -> float:
"""Return the configured gateway restart drain timeout in seconds."""
raw = os.getenv("HERMES_RESTART_DRAIN_TIMEOUT", "").strip()
@ -2001,6 +2243,13 @@ def systemd_stop(system: bool = False):
if system:
_require_root_for_system_service("stop")
_require_service_installed("stop", system=system)
try:
from gateway.status import get_running_pid, write_planned_stop_marker
pid = get_running_pid(cleanup_stale=False)
if pid is not None:
write_planned_stop_marker(pid)
except Exception:
pass
_run_systemctl(["stop", get_service_name()], system=system, check=True, timeout=90)
print(f"{_service_scope_label(system).capitalize()} service stopped")
@ -2016,41 +2265,52 @@ def systemd_restart(system: bool = False):
refresh_systemd_unit_if_needed(system=system)
from gateway.status import get_running_pid
pid = get_running_pid()
if pid is not None and _request_gateway_self_restart(pid):
import time
pid = get_running_pid() or _systemd_main_pid(system=system)
if pid is not None:
scope_label = _service_scope_label(system).capitalize()
svc = get_service_name()
drain_timeout = _get_restart_drain_timeout()
# Phase 1: wait for old process to exit (drain + shutdown)
print(f"{scope_label} service draining active work...")
deadline = time.time() + 90
while time.time() < deadline:
try:
os.kill(pid, 0)
time.sleep(1)
except (ProcessLookupError, PermissionError):
break # old process is gone
else:
print(f"⚠ Old process (PID {pid}) still alive after 90s")
print(f"{scope_label} service restarting gracefully (PID {pid})...")
if _graceful_restart_via_sigusr1(pid, drain_timeout + 5):
# The gateway exits with code 75 for a planned service restart.
# RestartSec can otherwise delay the relaunch even though the
# operator asked for an immediate restart, so kick the unit once
# the old PID has exited and then wait for the replacement PID.
_run_systemctl(
["reset-failed", svc],
system=system,
check=False,
timeout=30,
)
_run_systemctl(
["restart", svc],
system=system,
check=False,
timeout=90,
)
if _wait_for_systemd_service_restart(system=system, previous_pid=pid):
return
if _systemd_service_is_start_limited(system=system):
return
# The gateway exits with code 75 for a planned service restart.
# systemd can sit in the RestartSec window or even wedge itself into a
# failed/rate-limited state if the operator asks for another restart in
# the middle of that handoff. Clear any stale failed state and kick the
# unit immediately so `hermes gateway restart` behaves idempotently.
print(
f"⚠ Graceful restart did not complete within {int(drain_timeout + 5)}s; "
"forcing a service restart..."
)
_run_systemctl(
["reset-failed", svc],
system=system,
check=False,
timeout=30,
)
_run_systemctl(
["start", svc],
system=system,
check=False,
timeout=90,
)
try:
_run_systemctl(["restart", svc], system=system, check=True, timeout=90)
except subprocess.CalledProcessError as exc:
if _systemd_error_indicates_start_limit(exc) or _systemd_service_is_start_limited(system=system):
_print_systemd_start_limit_wait(system=system)
return
raise
_wait_for_systemd_service_restart(system=system, previous_pid=pid)
return
@ -2063,8 +2323,14 @@ def systemd_restart(system: bool = False):
check=False,
timeout=30,
)
_run_systemctl(["reload-or-restart", get_service_name()], system=system, check=True, timeout=90)
print(f"{_service_scope_label(system).capitalize()} service restarted")
try:
_run_systemctl(["restart", get_service_name()], system=system, check=True, timeout=90)
except subprocess.CalledProcessError as exc:
if _systemd_error_indicates_start_limit(exc) or _systemd_service_is_start_limited(system=system):
_print_systemd_start_limit_wait(system=system)
return
raise
_wait_for_systemd_service_restart(system=system, previous_pid=pid)
@ -2136,6 +2402,10 @@ def systemd_status(deep: bool = False, system: bool = False, full: bool = False)
result_code = unit_props.get("Result", "")
if active_state == "activating" and sub_state == "auto-restart":
print(" ⏳ Restart pending: systemd is waiting to relaunch the gateway")
elif _systemd_unit_is_start_limited(unit_props):
print(" ⏳ Restart pending: systemd is temporarily rate-limiting starts")
print(f" Run after the start-limit window expires: {'sudo ' if system else ''}hermes gateway restart{scope_flag}")
print(f" Or clear it manually: systemctl {'--user ' if not system else ''}reset-failed {get_service_name()}")
elif active_state == "failed" and exec_main_status == str(GATEWAY_SERVICE_RESTART_EXIT_CODE):
print(" ⚠ Planned restart is stuck in systemd failed state (exit 75)")
print(f" Run: systemctl {'--user ' if not system else ''}reset-failed {get_service_name()} && {'sudo ' if system else ''}hermes gateway start{scope_flag}")
@ -2362,6 +2632,13 @@ def launchd_start():
def launchd_stop():
label = get_launchd_label()
target = f"{_launchd_domain()}/{label}"
try:
from gateway.status import get_running_pid, write_planned_stop_marker
pid = get_running_pid(cleanup_stale=False)
if pid is not None:
write_planned_stop_marker(pid)
except Exception:
pass
# bootout unloads the service definition so KeepAlive doesn't respawn
# the process. A plain `kill SIGTERM` only signals the process — launchd
# immediately restarts it because KeepAlive.SuccessfulExit = false.
@ -2493,6 +2770,42 @@ def launchd_status(deep: bool = False):
# Gateway Runner
# =============================================================================
def _truthy_env(value: str | None) -> bool:
return str(value or "").strip().lower() in {"1", "true", "yes", "on"}
def _is_official_docker_checkout() -> bool:
return (
str(PROJECT_ROOT) == "/opt/hermes"
and (PROJECT_ROOT / "docker" / "entrypoint.sh").is_file()
)
def _guard_official_docker_root_gateway() -> None:
"""Refuse gateway startup when the official Docker privilege drop was bypassed."""
if not hasattr(os, "geteuid") or os.geteuid() != 0:
return
if _truthy_env(os.getenv("HERMES_ALLOW_ROOT_GATEWAY")):
return
if not _is_official_docker_checkout():
return
print_error(
"Refusing to run the Hermes gateway as root inside the official Docker image."
)
print(
" The image entrypoint normally drops privileges to the 'hermes' user. "
"If you override entrypoint in Docker Compose, include "
"/opt/hermes/docker/entrypoint.sh before the Hermes command."
)
print(
" Running the gateway as root can leave root-owned files in "
"$HERMES_HOME and break later non-root dashboard/gateway runs."
)
print(" Set HERMES_ALLOW_ROOT_GATEWAY=1 only if you intentionally accept this risk.")
sys.exit(1)
def run_gateway(verbose: int = 0, quiet: bool = False, replace: bool = False):
"""Run the gateway in foreground.
@ -2503,6 +2816,7 @@ def run_gateway(verbose: int = 0, quiet: bool = False, replace: bool = False):
This prevents systemd restart loops when the old process
hasn't fully exited yet.
"""
_guard_official_docker_root_gateway()
sys.path.insert(0, str(PROJECT_ROOT))
# Refresh the systemd unit definition on every boot so that restart
@ -4053,7 +4367,9 @@ def gateway_setup():
print_success("Gateway service is installed and running.")
elif service_installed:
print_warning("Gateway service is installed but not running.")
if prompt_yes_no(" Start it now?", True):
if supports_systemd_services() and _system_scope_wizard_would_need_root():
_print_system_scope_remediation("start")
elif prompt_yes_no(" Start it now?", True):
try:
if supports_systemd_services():
systemd_start()
@ -4063,6 +4379,12 @@ def gateway_setup():
print_error(" Failed to start — user systemd not reachable:")
for line in str(e).splitlines():
print(f" {line}")
except SystemScopeRequiresRootError as e:
# Defense in depth: the pre-check above should have caught
# this, but handle the race/edge case gracefully instead of
# letting the exception escape the wizard.
print_error(f" Failed to start: {e}")
_print_system_scope_remediation("start")
except subprocess.CalledProcessError as e:
print_error(f" Failed to start: {e}")
else:
@ -4112,7 +4434,9 @@ def gateway_setup():
service_running = _is_service_running()
if service_running:
if prompt_yes_no(" Restart the gateway to pick up changes?", True):
if supports_systemd_services() and _system_scope_wizard_would_need_root():
_print_system_scope_remediation("restart")
elif prompt_yes_no(" Restart the gateway to pick up changes?", True):
try:
if supports_systemd_services():
systemd_restart()
@ -4125,10 +4449,15 @@ def gateway_setup():
print_error(" Restart failed — user systemd not reachable:")
for line in str(e).splitlines():
print(f" {line}")
except SystemScopeRequiresRootError as e:
print_error(f" Restart failed: {e}")
_print_system_scope_remediation("restart")
except subprocess.CalledProcessError as e:
print_error(f" Restart failed: {e}")
elif service_installed:
if prompt_yes_no(" Start the gateway service?", True):
if supports_systemd_services() and _system_scope_wizard_would_need_root():
_print_system_scope_remediation("start")
elif prompt_yes_no(" Start the gateway service?", True):
try:
if supports_systemd_services():
systemd_start()
@ -4138,6 +4467,9 @@ def gateway_setup():
print_error(" Start failed — user systemd not reachable:")
for line in str(e).splitlines():
print(f" {line}")
except SystemScopeRequiresRootError as e:
print_error(f" Start failed: {e}")
_print_system_scope_remediation("start")
except subprocess.CalledProcessError as e:
print_error(f" Start failed: {e}")
else:
@ -4211,6 +4543,14 @@ def gateway_command(args):
for line in str(e).splitlines():
print(f" {line}")
sys.exit(1)
except SystemScopeRequiresRootError as e:
# The direct ``hermes gateway install|uninstall|start|stop|restart``
# path lands here when the user typed a system-scope action without
# sudo. Same exit code as before — just gives the wizard a way to
# intercept the same condition with friendlier guidance before the
# error is raised.
print(str(e))
sys.exit(1)
def _gateway_command_inner(args):
@ -4535,6 +4875,9 @@ def _gateway_command_inner(args):
# Show other profiles' gateway status for multi-profile awareness
_print_other_profiles_gateway_status()
elif subcmd == "list":
_gateway_list()
elif subcmd == "migrate-legacy":
# Stop, disable, and remove legacy Hermes gateway unit files from
# pre-rename installs (e.g. hermes.service). Profile units and

View file

@ -70,6 +70,7 @@ def _task_to_dict(t: kb.Task) -> dict[str, Any]:
"completed_at": t.completed_at,
"result": t.result,
"skills": list(t.skills) if t.skills else [],
"max_retries": t.max_retries,
}
@ -284,6 +285,15 @@ def build_parser(parent_subparsers: argparse._SubParsersAction) -> argparse.Argu
"(repeatable). Appended to the built-in "
"kanban-worker skill. Example: "
"--skill translation --skill github-code-review")
p_create.add_argument("--max-retries", type=int, default=None,
metavar="N",
help="Per-task override for the consecutive-failure "
"circuit breaker. Trip on the Nth failure — "
"e.g. --max-retries 1 blocks on the first "
"failure (no retries), --max-retries 3 allows "
"two retries. Omit to use the dispatcher's "
"kanban.failure_limit config "
f"(default {kb.DEFAULT_FAILURE_LIMIT}).")
p_create.add_argument("--json", action="store_true", help="Emit JSON output")
# --- list ---
@ -308,6 +318,57 @@ def build_parser(parent_subparsers: argparse._SubParsersAction) -> argparse.Argu
p_assign.add_argument("task_id")
p_assign.add_argument("profile", help="Profile name (or 'none' to unassign)")
# --- reclaim / reassign (recovery) ---
p_reclaim = sub.add_parser(
"reclaim",
help="Release an active worker claim on a running task",
)
p_reclaim.add_argument("task_id")
p_reclaim.add_argument(
"--reason", default=None,
help="Human-readable reason (recorded on the reclaimed event)",
)
p_reassign = sub.add_parser(
"reassign",
help="Reassign a task to a different profile, optionally reclaiming first",
)
p_reassign.add_argument("task_id")
p_reassign.add_argument(
"profile",
help="New profile name (or 'none' to unassign)",
)
p_reassign.add_argument(
"--reclaim", action="store_true",
help="Release any active claim before reassigning (required if task is running)",
)
p_reassign.add_argument(
"--reason", default=None,
help="Human-readable reason (recorded on the reclaimed event)",
)
# --- diagnostics (board-wide health) ---
p_diag = sub.add_parser(
"diagnostics",
aliases=["diag"],
help="List active diagnostics on the current board",
)
p_diag.add_argument(
"--severity",
choices=["warning", "error", "critical"],
default=None,
help="Only show diagnostics at or above this severity",
)
p_diag.add_argument(
"--task",
default=None,
help="Only show diagnostics for one task id",
)
p_diag.add_argument(
"--json", action="store_true",
help="Emit JSON (structured) instead of the default human table",
)
# --- link / unlink ---
p_link = sub.add_parser("link", help="Add a parent->child dependency")
p_link.add_argument("parent_id")
@ -343,6 +404,27 @@ def build_parser(parent_subparsers: argparse._SubParsersAction) -> argparse.Argu
help='JSON dict of structured facts (e.g. \'{"changed_files": [...], '
'"tests_run": 12}\'). Stored on the closing run.')
p_edit = sub.add_parser(
"edit",
help="Edit recovery fields on an already-completed task",
)
p_edit.add_argument("task_id")
p_edit.add_argument(
"--result",
required=True,
help="Backfilled task result text for a done task",
)
p_edit.add_argument(
"--summary",
default=None,
help="Structured handoff summary. Falls back to --result if omitted.",
)
p_edit.add_argument(
"--metadata",
default=None,
help="JSON dict of structured facts to store on the latest completed run.",
)
p_block = sub.add_parser("block", help="Mark one or more tasks blocked")
p_block.add_argument("task_id")
p_block.add_argument("reason", nargs="*", help="Reason (also appended as a comment)")
@ -371,8 +453,8 @@ def build_parser(parent_subparsers: argparse._SubParsersAction) -> argparse.Argu
help="Cap number of spawns this pass")
p_disp.add_argument("--failure-limit", type=int,
default=kb.DEFAULT_SPAWN_FAILURE_LIMIT,
help=f"Auto-block a task after this many consecutive spawn failures "
f"(default: {kb.DEFAULT_SPAWN_FAILURE_LIMIT})")
help=f"Auto-block a task after this many consecutive non-success attempts "
f"(spawn_failed, timed_out, or crashed; default: {kb.DEFAULT_SPAWN_FAILURE_LIMIT})")
p_disp.add_argument("--json", action="store_true")
# --- daemon (deprecated) ---
@ -488,6 +570,42 @@ def build_parser(parent_subparsers: argparse._SubParsersAction) -> argparse.Argu
)
p_ctx.add_argument("task_id")
# --- specify --- (triage → todo via auxiliary LLM)
p_specify = sub.add_parser(
"specify",
help="Flesh out a triage-column task into a concrete spec "
"(title + body) and promote it to todo. Uses the auxiliary "
"LLM configured under auxiliary.triage_specifier.",
)
p_specify.add_argument(
"task_id",
nargs="?",
default=None,
help="Task id to specify (required unless --all is given)",
)
p_specify.add_argument(
"--all",
dest="all_triage",
action="store_true",
help="Specify every task currently in the triage column",
)
p_specify.add_argument(
"--tenant",
default=None,
help="When used with --all, restrict the sweep to this tenant",
)
p_specify.add_argument(
"--author",
default=None,
help="Author name recorded on the audit comment "
"(default: $HERMES_PROFILE or 'specifier')",
)
p_specify.add_argument(
"--json",
action="store_true",
help="Emit one JSON object per task on stdout",
)
# --- gc ---
p_gc = sub.add_parser(
"gc", help="Garbage-collect archived-task workspaces, old events, and old logs",
@ -576,11 +694,16 @@ def kanban_command(args: argparse.Namespace) -> int:
"ls": _cmd_list,
"show": _cmd_show,
"assign": _cmd_assign,
"reclaim": _cmd_reclaim,
"reassign": _cmd_reassign,
"diagnostics": _cmd_diagnostics,
"diag": _cmd_diagnostics,
"link": _cmd_link,
"unlink": _cmd_unlink,
"claim": _cmd_claim,
"comment": _cmd_comment,
"complete": _cmd_complete,
"edit": _cmd_edit,
"block": _cmd_block,
"unblock": _cmd_unblock,
"archive": _cmd_archive,
@ -597,6 +720,7 @@ def kanban_command(args: argparse.Namespace) -> int:
"notify-list": _cmd_notify_list,
"notify-unsubscribe": _cmd_notify_unsubscribe,
"context": _cmd_context,
"specify": _cmd_specify,
"gc": _cmd_gc,
}
handler = handlers.get(action)
@ -866,7 +990,12 @@ def _cmd_init(args: argparse.Namespace) -> int:
def _cmd_heartbeat(args: argparse.Namespace) -> int:
with kb.connect() as conn:
ok = kb.heartbeat_worker(conn, args.task_id, note=getattr(args, "note", None))
ok = kb.heartbeat_worker(
conn,
args.task_id,
note=getattr(args, "note", None),
expected_run_id=_worker_run_id_for(args.task_id),
)
if not ok:
print(f"cannot heartbeat {args.task_id} (not running?)", file=sys.stderr)
return 1
@ -900,6 +1029,14 @@ def _cmd_create(args: argparse.Namespace) -> int:
except ValueError as exc:
print(f"kanban: --max-runtime: {exc}", file=sys.stderr)
return 2
max_retries = getattr(args, "max_retries", None)
if max_retries is not None and max_retries < 1:
print(
f"kanban: --max-retries must be >= 1 (got {max_retries}); "
"use 1 to trip on the first failure.",
file=sys.stderr,
)
return 2
with kb.connect() as conn:
task_id = kb.create_task(
conn,
@ -916,6 +1053,7 @@ def _cmd_create(args: argparse.Namespace) -> int:
idempotency_key=getattr(args, "idempotency_key", None),
max_runtime_seconds=max_runtime,
skills=getattr(args, "skills", None) or None,
max_retries=max_retries,
)
task = kb.get_task(conn, task_id)
if getattr(args, "json", False):
@ -989,10 +1127,16 @@ def _cmd_show(args: argparse.Namespace) -> int:
parents = kb.parent_ids(conn, args.task_id)
children = kb.child_ids(conn, args.task_id)
runs = kb.list_runs(conn, args.task_id)
# Workers hand off via ``task_runs.summary`` (kanban-worker skill);
# ``tasks.result`` is left NULL unless the caller explicitly passed
# ``result=``. Surfacing the latest summary here keeps ``show`` from
# looking like a no-op when the worker actually did real work.
latest_summary = kb.latest_summary(conn, args.task_id)
if getattr(args, "json", False):
payload = {
"task": _task_to_dict(task),
"latest_summary": latest_summary,
"parents": parents,
"children": children,
"comments": [
@ -1037,7 +1181,49 @@ def _cmd_show(args: argparse.Namespace) -> int:
(f" @ {task.workspace_path}" if task.workspace_path else ""))
if task.skills:
print(f" skills: {', '.join(task.skills)}")
# Effective retry threshold. Show the per-task override if set,
# otherwise the dispatcher's resolved value from config (or the
# default if config doesn't set it either). Helps operators see
# why a task auto-blocked earlier/later than they expected.
if task.max_retries is not None:
print(f" max-retries: {task.max_retries} (task)")
else:
try:
from hermes_cli.config import load_config
cfg = load_config()
cfg_val = (cfg.get("kanban", {}) or {}).get("failure_limit")
except Exception:
cfg_val = None
if cfg_val is not None and int(cfg_val) != kb.DEFAULT_FAILURE_LIMIT:
print(f" max-retries: {int(cfg_val)} (config kanban.failure_limit)")
else:
print(f" max-retries: {kb.DEFAULT_FAILURE_LIMIT} (default)")
print(f" created: {_fmt_ts(task.created_at)} by {task.created_by or '-'}")
# Diagnostics section — surface active distress signals at the top
# of show output so CLI users see them before scrolling through
# comments / runs.
from hermes_cli import kanban_diagnostics as kd
diags = kd.compute_task_diagnostics(task, events, runs)
if diags:
sev_marker = {"warning": "", "error": "!!", "critical": "!!!"}
print(f"\n Diagnostics ({len(diags)}):")
for d in diags:
print(f" {sev_marker.get(d.severity, '?')} [{d.severity}] {d.title}")
if d.data:
bits = []
for k, v in d.data.items():
if isinstance(v, list):
bits.append(f"{k}={','.join(str(x) for x in v)}")
else:
bits.append(f"{k}={v}")
if bits:
print(f" data: {' | '.join(bits)}")
# Only show suggested actions in show output to keep it tight;
# full list is available via `kanban diagnostics --task <id>`.
for a in d.actions:
if a.suggested:
print(f"{a.label}")
if task.started_at:
print(f" started: {_fmt_ts(task.started_at)}")
if task.completed_at:
@ -1054,6 +1240,13 @@ def _cmd_show(args: argparse.Namespace) -> int:
print()
print("Result:")
print(task.result)
elif latest_summary:
# Worker handoff lives on the latest run, not on tasks.result.
# Surface it at top-level so a glance at ``hermes kanban show <id>``
# tells you what the worker did even if tasks.result is empty.
print()
print("Latest summary:")
print(latest_summary)
if comments:
print()
print(f"Comments ({len(comments)}):")
@ -1095,6 +1288,167 @@ def _cmd_assign(args: argparse.Namespace) -> int:
return 0
def _cmd_reclaim(args: argparse.Namespace) -> int:
with kb.connect() as conn:
ok = kb.reclaim_task(
conn, args.task_id,
reason=getattr(args, "reason", None),
)
if not ok:
print(
f"cannot reclaim {args.task_id} (not running or unknown id)",
file=sys.stderr,
)
return 1
print(f"Reclaimed {args.task_id}")
return 0
def _cmd_reassign(args: argparse.Namespace) -> int:
profile = None if args.profile.lower() in ("none", "-", "null") else args.profile
with kb.connect() as conn:
ok = kb.reassign_task(
conn, args.task_id, profile,
reclaim_first=bool(getattr(args, "reclaim", False)),
reason=getattr(args, "reason", None),
)
if not ok:
print(
f"cannot reassign {args.task_id} "
f"(unknown id, or still running — pass --reclaim to release first)",
file=sys.stderr,
)
return 1
print(
f"Reassigned {args.task_id} to "
f"{profile or '(unassigned)'}"
+ (" (claim reclaimed)" if getattr(args, "reclaim", False) else "")
)
return 0
def _cmd_diagnostics(args: argparse.Namespace) -> int:
"""List active diagnostics on the board. Wraps the same rule engine
the dashboard uses, so CLI output matches what the UI shows.
"""
from hermes_cli import kanban_diagnostics as kd
with kb.connect() as conn:
# Either one-task mode or fleet mode.
if getattr(args, "task", None):
task = kb.get_task(conn, args.task)
if task is None:
print(f"no such task: {args.task}", file=sys.stderr)
return 1
diags_by_task = {
args.task: kd.compute_task_diagnostics(
task,
kb.list_events(conn, args.task),
kb.list_runs(conn, args.task),
)
}
else:
# Fleet mode: pull all non-archived tasks + their events/runs.
rows = list(conn.execute(
"SELECT * FROM tasks WHERE status != 'archived'"
).fetchall())
ids = [r["id"] for r in rows]
if not ids:
diags_by_task = {}
else:
placeholders = ",".join(["?"] * len(ids))
ev_by = {i: [] for i in ids}
for row in conn.execute(
f"SELECT * FROM task_events WHERE task_id IN ({placeholders}) ORDER BY id",
tuple(ids),
):
ev_by.setdefault(row["task_id"], []).append(row)
run_by = {i: [] for i in ids}
for row in conn.execute(
f"SELECT * FROM task_runs WHERE task_id IN ({placeholders}) ORDER BY id",
tuple(ids),
):
run_by.setdefault(row["task_id"], []).append(row)
diags_by_task = {}
for r in rows:
tid = r["id"]
dl = kd.compute_task_diagnostics(r, ev_by.get(tid, []), run_by.get(tid, []))
if dl:
diags_by_task[tid] = dl
# Severity filter.
sev = getattr(args, "severity", None)
if sev:
for tid in list(diags_by_task.keys()):
kept = [d for d in diags_by_task[tid] if d.severity == sev]
if kept:
diags_by_task[tid] = kept
else:
del diags_by_task[tid]
# Map task_id → title/status/assignee for the table output.
meta: dict[str, dict] = {}
if diags_by_task:
placeholders = ",".join(["?"] * len(diags_by_task))
for r in conn.execute(
f"SELECT id, title, status, assignee FROM tasks WHERE id IN ({placeholders})",
tuple(diags_by_task.keys()),
):
meta[r["id"]] = {
"title": r["title"], "status": r["status"],
"assignee": r["assignee"],
}
if getattr(args, "json", False):
out_json = [
{
"task_id": tid,
**meta.get(tid, {}),
"diagnostics": [d.to_dict() for d in dl],
}
for tid, dl in diags_by_task.items()
]
print(json.dumps(out_json, indent=2, ensure_ascii=False))
return 0
if not diags_by_task:
print("No active diagnostics on this board.")
return 0
# Human-readable summary: grouped by task, severity-marked, with
# suggested actions inline.
sev_marker = {"warning": "", "error": "!!", "critical": "!!!"}
total = sum(len(dl) for dl in diags_by_task.values())
print(
f"{total} active diagnostic(s) across "
f"{len(diags_by_task)} task(s):\n"
)
for tid, dl in diags_by_task.items():
m = meta.get(tid, {})
title = m.get("title") or "(untitled)"
status = m.get("status") or "?"
assignee = m.get("assignee") or "(unassigned)"
print(f" {tid} {status:8s} @{assignee:18s} {title}")
for d in dl:
print(f" {sev_marker.get(d.severity, '?')} [{d.severity}] {d.kind}: {d.title}")
if d.data:
# Compact key:value pairs on one line.
bits = []
for k, v in d.data.items():
if isinstance(v, list):
bits.append(f"{k}={','.join(str(x) for x in v)}")
else:
bits.append(f"{k}={v}")
if bits:
print(f" data: {' | '.join(bits)}")
# Suggested actions first.
for a in d.actions:
if a.suggested:
print(f"{a.label}")
print()
return 0
def _cmd_link(args: argparse.Namespace) -> int:
with kb.connect() as conn:
kb.link_tasks(conn, args.parent_id, args.child_id)
@ -1143,6 +1497,18 @@ def _cmd_comment(args: argparse.Namespace) -> int:
return 0
def _worker_run_id_for(task_id: str) -> Optional[int]:
if os.environ.get("HERMES_KANBAN_TASK") != task_id:
return None
raw = os.environ.get("HERMES_KANBAN_RUN_ID")
if not raw:
return None
try:
return int(raw)
except ValueError:
return None
def _cmd_complete(args: argparse.Namespace) -> int:
"""Mark one or more tasks done. Supports a single id or a list."""
ids = list(args.task_ids or [])
@ -1179,6 +1545,7 @@ def _cmd_complete(args: argparse.Namespace) -> int:
result=args.result,
summary=summary,
metadata=metadata,
expected_run_id=_worker_run_id_for(tid),
):
failed.append(tid)
print(f"cannot complete {tid} (unknown id or terminal state)", file=sys.stderr)
@ -1187,6 +1554,34 @@ def _cmd_complete(args: argparse.Namespace) -> int:
return 0 if not failed else 1
def _cmd_edit(args: argparse.Namespace) -> int:
raw_meta = getattr(args, "metadata", None)
metadata = None
if raw_meta:
try:
metadata = json.loads(raw_meta)
if not isinstance(metadata, dict):
raise ValueError("must be a JSON object")
except (ValueError, json.JSONDecodeError) as exc:
print(f"kanban: --metadata: {exc}", file=sys.stderr)
return 2
with kb.connect() as conn:
if not kb.edit_completed_task_result(
conn,
args.task_id,
result=args.result,
summary=getattr(args, "summary", None),
metadata=metadata,
):
print(
f"cannot edit {args.task_id} (unknown id or task is not done)",
file=sys.stderr,
)
return 1
print(f"Edited {args.task_id}")
return 0
def _cmd_block(args: argparse.Namespace) -> int:
reason = " ".join(args.reason).strip() if args.reason else None
author = _profile_author()
@ -1196,7 +1591,12 @@ def _cmd_block(args: argparse.Namespace) -> int:
for tid in ids:
if reason:
kb.add_comment(conn, tid, author, f"BLOCKED: {reason}")
if not kb.block_task(conn, tid, reason=reason):
if not kb.block_task(
conn,
tid,
reason=reason,
expected_run_id=_worker_run_id_for(tid),
):
failed.append(tid)
print(f"cannot block {tid}", file=sys.stderr)
else:
@ -1274,6 +1674,7 @@ def _cmd_dispatch(args: argparse.Namespace) -> int:
for (tid, who, ws) in res.spawned
],
"skipped_unassigned": res.skipped_unassigned,
"skipped_nonspawnable": res.skipped_nonspawnable,
}, indent=2))
return 0
print(f"Reclaimed: {res.reclaimed}")
@ -1293,6 +1694,11 @@ def _cmd_dispatch(args: argparse.Namespace) -> int:
print(f" - {tid} -> {who} @ {ws or '-'}{tag}")
if res.skipped_unassigned:
print(f"Skipped (unassigned): {', '.join(res.skipped_unassigned)}")
if res.skipped_nonspawnable:
print(
f"Skipped (non-spawnable assignee — terminal lane, OK): "
f"{', '.join(res.skipped_nonspawnable)}"
)
return 0
@ -1324,6 +1730,7 @@ def _cmd_daemon(args: argparse.Namespace) -> int:
" kanban:\n"
" dispatch_in_gateway: true # default\n"
" dispatch_interval_seconds: 60\n"
" failure_limit: 2 # consecutive non-success attempts before auto-block\n"
"\n"
"Running both the gateway AND this standalone daemon will\n"
"race for claims. If you truly need the old standalone\n"
@ -1404,16 +1811,18 @@ def _cmd_daemon(args: argparse.Namespace) -> int:
)
def _ready_queue_nonempty() -> bool:
"""Cheap SELECT — just asks whether there's at least one ready
task with an assignee that the dispatcher could have picked up."""
"""Cheap probe — is there at least one ready+assigned+unclaimed
task whose assignee maps to a real Hermes profile (i.e. one the
dispatcher would actually try to spawn for)?
Filters out tasks assigned to control-plane lanes
(e.g. ``orion-cc``, ``orion-research``) that are pulled by
terminals via ``claim_task`` directly those are correctly idle
from the dispatcher's perspective, not stuck.
"""
try:
with kb.connect() as conn:
row = conn.execute(
"SELECT 1 FROM tasks "
"WHERE status = 'ready' AND assignee IS NOT NULL "
" AND claim_lock IS NULL LIMIT 1"
).fetchone()
return row is not None
return kb.has_spawnable_ready(conn)
except Exception:
return False
@ -1608,6 +2017,80 @@ def _cmd_context(args: argparse.Namespace) -> int:
return 0
def _cmd_specify(args: argparse.Namespace) -> int:
"""Flesh out a triage task (or all of them) via auxiliary LLM,
then promote to todo. Thin wrapper over ``kanban_specify``."""
from hermes_cli import kanban_specify as spec
all_flag = bool(getattr(args, "all_triage", False))
tenant = getattr(args, "tenant", None)
author = getattr(args, "author", None) or _profile_author()
want_json = bool(getattr(args, "json", False))
if args.task_id and all_flag:
print(
"kanban: pass either a task id OR --all, not both",
file=sys.stderr,
)
return 2
if all_flag:
ids = spec.list_triage_ids(tenant=tenant)
if not ids:
msg = (
"No triage tasks"
+ (f" for tenant {tenant!r}" if tenant else "")
+ "."
)
if want_json:
print(json.dumps({"specified": 0, "total": 0}))
else:
print(msg)
return 0
elif args.task_id:
ids = [args.task_id]
else:
print(
"kanban: specify requires a task id or --all",
file=sys.stderr,
)
return 2
ok_count = 0
fail_count = 0
for tid in ids:
outcome = spec.specify_task(tid, author=author)
if outcome.ok:
ok_count += 1
else:
fail_count += 1
if want_json:
print(json.dumps({
"task_id": outcome.task_id,
"ok": outcome.ok,
"reason": outcome.reason,
"new_title": outcome.new_title,
}))
else:
if outcome.ok:
title_suffix = (
f" — retitled: {outcome.new_title!r}"
if outcome.new_title
else ""
)
print(f"Specified {outcome.task_id} → todo{title_suffix}")
else:
print(
f"kanban: specify {outcome.task_id}: {outcome.reason}",
file=sys.stderr,
)
if not all_flag:
return 0 if ok_count == 1 else 1
# --all: succeed if at least one promotion landed; exit 1 only when
# every candidate failed (honest signal for scripts).
return 0 if (ok_count > 0 or not ids) else 1
def _cmd_gc(args: argparse.Namespace) -> int:
"""Remove scratch workspaces of archived tasks, prune old events, and
delete old worker logs."""

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,649 @@
"""Kanban diagnostics — structured, actionable distress signals for tasks.
A ``Diagnostic`` is a machine-readable description of something that's wrong
with a kanban task: a hallucinated card id, a spawn crash-loop, a task
stuck blocked for too long, etc. Each one carries:
* A **kind** (canonical code; UI/tests match on this).
* A **severity** (``warning`` / ``error`` / ``critical``).
* A **title** (one-line human description) and **detail** (longer text).
* A list of **suggested actions** structured entries the dashboard
turns into buttons and the CLI turns into hints.
Rules run over (task, recent events, recent runs) and emit diagnostics.
They are stateless and read-only no DB writes. Callers compute
diagnostics on demand (on ``/board`` load, ``/tasks/:id`` fetch, or
``hermes kanban diagnostics``).
Design goals:
* Fixable-on-the-operator's-side signals only (missing config, phantom
ids, crash loop). Not "the provider returned 502 once" that's a
transient runtime blip, not a diagnostic.
* Recoverable: every diagnostic comes with at least one suggested
recovery action the operator can actually take from the UI.
* Auto-clearing: when the underlying failure mode resolves (a clean
``completed`` event arrives, a spawn succeeds, the task gets
unblocked), the diagnostic stops firing. The audit event trail stays.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable, Iterable, Optional
import json
import time
# Severity rungs, ordered least → most urgent. The UI colors them
# amber (warning), orange (error), red (critical). Sorted outputs put
# critical first so operators see the worst fires at the top.
SEVERITY_ORDER = ("warning", "error", "critical")
@dataclass
class DiagnosticAction:
"""A single recovery action attached to a diagnostic.
The ``kind`` determines how both the UI and CLI render it:
* ``reclaim`` / ``reassign`` POST to the matching /tasks/:id/*
endpoint; dashboard wires into the existing recovery popover.
* ``unblock`` PATCH status back to ``ready`` (for stuck-blocked
diagnostics).
* ``cli_hint`` print/copy a shell command (e.g.
``hermes -p <profile> auth``). No HTTP side effect.
* ``open_docs`` deep-link to the docs URL named in ``payload.url``.
* ``comment`` nudge the operator to add a comment (for
stuck-blocked tasks that need human input).
``suggested=True`` marks the action as the recommended first step;
the UI highlights it. Multiple actions can be suggested if they're
equally valid.
"""
kind: str
label: str
payload: dict = field(default_factory=dict)
suggested: bool = False
def to_dict(self) -> dict:
return {
"kind": self.kind,
"label": self.label,
"payload": self.payload,
"suggested": self.suggested,
}
@dataclass
class Diagnostic:
"""One active distress signal on a task."""
kind: str
severity: str # "warning" | "error" | "critical"
title: str
detail: str
actions: list[DiagnosticAction] = field(default_factory=list)
first_seen_at: int = 0
last_seen_at: int = 0
count: int = 1
# Optional: the run id this diagnostic is scoped to. None = task-wide.
run_id: Optional[int] = None
# Optional structured payload for the UI (phantom ids, failure count).
data: dict = field(default_factory=dict)
def to_dict(self) -> dict:
return {
"kind": self.kind,
"severity": self.severity,
"title": self.title,
"detail": self.detail,
"actions": [a.to_dict() for a in self.actions],
"first_seen_at": self.first_seen_at,
"last_seen_at": self.last_seen_at,
"count": self.count,
"run_id": self.run_id,
"data": self.data,
}
# ---------------------------------------------------------------------------
# Rule helpers
# ---------------------------------------------------------------------------
def _task_field(task, name, default=None):
"""Read a field from a task regardless of representation.
Callers pass sqlite3.Row (dict-like with [] but no attribute
access), kanban_db.Task dataclasses (attribute access), or plain
dicts (both). This normalises them so rule functions don't have
to branch on type each time.
"""
if task is None:
return default
# sqlite Row + plain dicts both support mapping access; Row also
# supports .keys().
try:
# Row raises IndexError if the key isn't a column in the query;
# dicts return default via .get. Handle both.
if hasattr(task, "keys") and name in task.keys():
return task[name]
except Exception:
pass
if isinstance(task, dict):
return task.get(name, default)
return getattr(task, name, default)
def _parse_payload(ev) -> dict:
"""Tolerate event.payload being either a dict or a JSON string."""
p = _task_field(ev, "payload", None)
if p is None:
return {}
if isinstance(p, dict):
return p
if isinstance(p, str):
try:
return json.loads(p) or {}
except Exception:
return {}
return {}
def _event_kind(ev) -> str:
return _task_field(ev, "kind", "") or ""
def _event_ts(ev) -> int:
t = _task_field(ev, "created_at", 0)
return int(t or 0)
def _active_hallucination_events(
events: Iterable[Any],
kind: str,
) -> list[Any]:
"""Return events of ``kind`` that have no ``completed``/``edited``
event *strictly after* them. Walks chronologically: each clean
event resets the accumulator; each matching event gets appended.
Events must be sorted by id (i.e. arrival order); callers pass the
task's full event list which the DB already returns in that order.
"""
# Events arrive sorted by id asc (chronological). Walk once, track
# which hallucination events are still "active" (no clean event
# supersedes them).
active: list[Any] = []
for ev in events:
k = _event_kind(ev)
if k in ("completed", "edited"):
active.clear()
elif k == kind:
active.append(ev)
return active
def _latest_clean_event_ts(events: Iterable[Any]) -> int:
"""Timestamp of the most recent clean completion / edit event.
Kept for general "has this task ever been successfully completed"
lookups; hallucination rules use ``_active_hallucination_events``
instead because they need strict ordering.
"""
latest = 0
for ev in events:
if _event_kind(ev) in ("completed", "edited"):
t = _event_ts(ev)
if t > latest:
latest = t
return latest
# Standard always-available actions. Every diagnostic can offer these as
# fallbacks regardless of kind — they're the two baseline recovery
# primitives the kernel supports.
def _generic_recovery_actions(task: Any, *, running: bool) -> list[DiagnosticAction]:
out: list[DiagnosticAction] = []
if running:
out.append(DiagnosticAction(
kind="reclaim",
label="Reclaim task",
payload={},
))
out.append(DiagnosticAction(
kind="reassign",
label="Reassign to different profile",
payload={"reclaim_first": running},
))
return out
# ---------------------------------------------------------------------------
# Rule implementations
# ---------------------------------------------------------------------------
# Each rule takes (task, events, runs, now_ts, config) and returns
# zero or more Diagnostic instances. ``events`` / ``runs`` are lists of
# kanban_db.Event / kanban_db.Run (or plain dicts matching the same
# shape — for test convenience).
RuleFn = Callable[[Any, list[Any], list[Any], int, dict], list[Diagnostic]]
def _rule_hallucinated_cards(task, events, runs, now, cfg) -> list[Diagnostic]:
"""Blocked-hallucination gate fires: a worker called kanban_complete
with created_cards that didn't exist or weren't created by the
completing profile. Task stayed in its prior state; the operator
needs to decide how to proceed.
Auto-clears when a successful completion (or edit) follows the
blocked event.
"""
hits = _active_hallucination_events(events, "completion_blocked_hallucination")
if not hits:
return []
phantom_ids: list[str] = []
first = _event_ts(hits[0])
last = _event_ts(hits[-1])
for ev in hits:
payload = _parse_payload(ev)
for pid in payload.get("phantom_cards", []) or []:
if pid not in phantom_ids:
phantom_ids.append(pid)
running = _task_field(task, "status") == "running"
actions: list[DiagnosticAction] = []
actions.append(DiagnosticAction(
kind="comment",
label="Add a comment explaining what to do",
suggested=False,
))
actions.extend(_generic_recovery_actions(task, running=running))
return [Diagnostic(
kind="hallucinated_cards",
severity="error",
title="Worker claimed cards that don't exist",
detail=(
f"The completing worker declared created_cards that either didn't "
f"exist or weren't created by its profile. The completion was "
f"blocked and the task stayed in its prior state. "
f"Usually means the worker hallucinated ids instead of capturing "
f"return values from kanban_create."
),
actions=actions,
first_seen_at=first,
last_seen_at=last,
count=len(hits),
data={"phantom_ids": phantom_ids},
)]
def _rule_prose_phantom_refs(task, events, runs, now, cfg) -> list[Diagnostic]:
"""Advisory prose-scan: the completion summary mentions ``t_<hex>``
ids that don't resolve. Non-blocking; surfaced as a warning only.
Auto-clears when a fresh clean completion arrives AFTER the
suspected event.
"""
hits = _active_hallucination_events(events, "suspected_hallucinated_references")
if not hits:
return []
phantom_refs: list[str] = []
for ev in hits:
for pid in _parse_payload(ev).get("phantom_refs", []) or []:
if pid not in phantom_refs:
phantom_refs.append(pid)
running = _task_field(task, "status") == "running"
return [Diagnostic(
kind="prose_phantom_refs",
severity="warning",
title="Completion summary references unknown task ids",
detail=(
"The completion summary mentions task ids that don't resolve "
"in this board's database. The completion itself succeeded, "
"but downstream consumers parsing the summary may be pointed "
"at cards that never existed."
),
actions=_generic_recovery_actions(task, running=running),
first_seen_at=_event_ts(hits[0]),
last_seen_at=_event_ts(hits[-1]),
count=len(hits),
data={"phantom_refs": phantom_refs},
)]
def _rule_repeated_failures(task, events, runs, now, cfg) -> list[Diagnostic]:
"""Task's unified ``consecutive_failures`` counter is climbing —
something about this task+profile combo is broken and each retry
fails the same way. Triggers regardless of the specific failure
mode (spawn error, timeout, crash) because operationally they
all look the same: the kernel keeps retrying and the operator
needs to intervene.
Threshold: cfg["failure_threshold"] (default 3). A threshold of 3
is one below the circuit-breaker's default (5), so the diagnostic
surfaces BEFORE the breaker trips giving operators a window to
fix the problem while the dispatcher's still retrying.
Accepts the legacy ``spawn_failure_threshold`` config key for
back-compat.
"""
threshold = int(cfg.get(
"failure_threshold",
cfg.get("spawn_failure_threshold", 3),
))
# Read the new unified counter name, with a fallback to the legacy
# column name so this rule keeps working against old DB rows the
# caller somehow materialised without running the migration.
failures = (
_task_field(task, "consecutive_failures", None)
if _task_field(task, "consecutive_failures", None) is not None
else _task_field(task, "spawn_failures", 0)
)
if failures is None or failures < threshold:
return []
last_err = (
_task_field(task, "last_failure_error", None)
if _task_field(task, "last_failure_error", None) is not None
else _task_field(task, "last_spawn_error", None)
)
assignee = _task_field(task, "assignee")
# Classify the most recent failure by peeking at run outcomes so
# the title + suggested action can be specific without a separate
# per-outcome rule.
ordered_runs = sorted(runs, key=lambda r: _task_field(r, "id", 0))
most_recent_outcome = None
for r in reversed(ordered_runs):
oc = _task_field(r, "outcome")
if oc in ("spawn_failed", "timed_out", "crashed"):
most_recent_outcome = oc
break
actions: list[DiagnosticAction] = []
if most_recent_outcome == "spawn_failed" and assignee and assignee != "default":
# Spawn is failing specifically — profile setup issue.
actions.append(DiagnosticAction(
kind="cli_hint",
label=f"Verify profile: hermes -p {assignee} doctor",
payload={"command": f"hermes -p {assignee} doctor"},
suggested=True,
))
actions.append(DiagnosticAction(
kind="cli_hint",
label=f"Fix profile auth: hermes -p {assignee} auth",
payload={"command": f"hermes -p {assignee} auth"},
))
elif most_recent_outcome in ("timed_out", "crashed"):
# Worker got off the ground but died. Logs are the right place
# to diagnose; reclaim/reassign are the recovery levers.
task_id = _task_field(task, "id")
if task_id:
actions.append(DiagnosticAction(
kind="cli_hint",
label=f"Check logs: hermes kanban log {task_id}",
payload={"command": f"hermes kanban log {task_id}"},
suggested=True,
))
actions.extend(_generic_recovery_actions(
task, running=_task_field(task, "status") == "running",
))
severity = "critical" if failures >= threshold * 2 else "error"
err_text = (last_err or "").strip() if last_err else ""
err_snippet = err_text[:500] + ("" if len(err_text) > 500 else "") if err_text else ""
outcome_label = {
"spawn_failed": "spawn",
"timed_out": "timeout",
"crashed": "crash",
}.get(most_recent_outcome or "", "failure")
if err_snippet:
title = f"Agent {outcome_label} x{failures}: {err_snippet.splitlines()[0][:160]}"
detail = (
f"This task has failed {failures} times in a row "
f"(most recent: {outcome_label}). Full last error:\n\n"
f"{err_snippet}\n\n"
f"The dispatcher will keep retrying until the consecutive-"
f"failures counter trips the circuit breaker (default 5), "
f"at which point the task auto-blocks. Fix the root cause "
f"and reclaim to retry."
)
else:
title = f"Agent {outcome_label} x{failures} (no error recorded)"
detail = (
f"This task has failed {failures} times in a row "
f"(most recent: {outcome_label}) but no error text was "
f"captured. Check the suggested command or the worker log."
)
return [Diagnostic(
kind="repeated_failures",
severity=severity,
title=title,
detail=detail,
actions=actions,
first_seen_at=now,
last_seen_at=now,
count=failures,
data={
"consecutive_failures": failures,
"most_recent_outcome": most_recent_outcome,
"last_error": last_err,
},
)]
def _rule_repeated_crashes(task, events, runs, now, cfg) -> list[Diagnostic]:
"""The worker spawns fine but keeps crashing mid-run. Check the last
N runs' outcomes; N consecutive ``crashed`` without a successful
``completed`` means something about the task + profile combo is
broken (OOM, missing dependency, tool it needs is down).
Threshold: cfg["crash_threshold"] (default 2).
Narrower than ``repeated_failures`` fires earlier (2 crashes vs 3
total failures) so the operator gets a crash-specific heads-up
before the unified rule kicks in. Suppresses itself when the
unified rule is also about to fire, to avoid double-flagging.
"""
failure_threshold = int(cfg.get(
"failure_threshold",
cfg.get("spawn_failure_threshold", 3),
))
unified_counter = (
_task_field(task, "consecutive_failures", 0) or 0
)
# Unified rule will catch this — let it handle to avoid double fire.
if unified_counter >= failure_threshold:
return []
threshold = int(cfg.get("crash_threshold", 2))
ordered = sorted(runs, key=lambda r: _task_field(r, "id", 0))
# Count trailing consecutive 'crashed' outcomes.
consecutive = 0
last_err = None
for r in reversed(ordered):
outcome = _task_field(r, "outcome")
if outcome == "crashed":
consecutive += 1
if last_err is None:
last_err = _task_field(r, "error")
elif outcome in ("completed", "reclaimed"):
# A success (or manual reclaim) breaks the streak.
break
else:
# Other outcomes (timed_out, blocked, spawn_failed, gave_up)
# aren't crash signals — don't count them, but they also
# don't break the crash streak.
continue
if consecutive < threshold:
return []
task_id = _task_field(task, "id")
actions: list[DiagnosticAction] = []
if task_id:
actions.append(DiagnosticAction(
kind="cli_hint",
label=f"Check logs: hermes kanban log {task_id}",
payload={"command": f"hermes kanban log {task_id}"},
suggested=True,
))
running = _task_field(task, "status") == "running"
actions.extend(_generic_recovery_actions(task, running=running))
severity = "critical" if consecutive >= threshold * 2 else "error"
# Put the actual error up-front so operators see WHAT broke without
# having to open the logs. Truncate defensively — these can be huge
# (full tracebacks).
err_text = (last_err or "").strip() if last_err else ""
err_snippet = err_text[:500] + ("" if len(err_text) > 500 else "") if err_text else ""
if err_snippet:
title = f"Agent crashed {consecutive}x: {err_snippet.splitlines()[0][:160]}"
detail = (
f"The last {consecutive} runs ended with outcome=crashed. "
f"Full last error:\n\n{err_snippet}"
)
else:
title = f"Agent crashed {consecutive}x (no error recorded)"
detail = (
f"The last {consecutive} runs ended with outcome=crashed but "
f"no error text was captured. Check the worker log for more."
)
return [Diagnostic(
kind="repeated_crashes",
severity=severity,
title=title,
detail=detail,
actions=actions,
first_seen_at=now,
last_seen_at=now,
count=consecutive,
data={"consecutive_crashes": consecutive, "last_error": last_err},
)]
def _rule_stuck_in_blocked(task, events, runs, now, cfg) -> list[Diagnostic]:
"""Task has been in ``blocked`` status for too long without a comment.
Threshold: cfg["blocked_stale_hours"] (default 24).
Surfaced as a warning so humans know there's a pending unblock.
"""
hours = float(cfg.get("blocked_stale_hours", 24))
status = _task_field(task, "status")
if status != "blocked":
return []
# Find the most recent ``blocked`` event.
last_blocked_ts = 0
for ev in events:
if _event_kind(ev) == "blocked":
t = _event_ts(ev)
if t > last_blocked_ts:
last_blocked_ts = t
if last_blocked_ts == 0:
return []
age_hours = (now - last_blocked_ts) / 3600.0
if age_hours < hours:
return []
# Any comment / unblock after the block breaks the "stale" signal.
for ev in events:
if _event_kind(ev) in ("commented", "unblocked") and _event_ts(ev) > last_blocked_ts:
return []
actions: list[DiagnosticAction] = [
DiagnosticAction(
kind="comment",
label="Add a comment / unblock the task",
suggested=True,
),
]
return [Diagnostic(
kind="stuck_in_blocked",
severity="warning",
title=f"Task has been blocked for {int(age_hours)}h",
detail=(
f"This task transitioned to blocked {int(age_hours)}h ago and "
f"has had no comments or unblock attempts since. Blocked tasks "
f"are waiting for human input — check the block reason and "
f"either unblock with feedback or answer with a comment."
),
actions=actions,
first_seen_at=last_blocked_ts,
last_seen_at=last_blocked_ts,
count=1,
data={"blocked_at": last_blocked_ts, "age_hours": round(age_hours, 1)},
)]
# Registry — order matters: rules higher on the list render first when
# severity ties. Add new rules here.
_RULES: list[RuleFn] = [
_rule_hallucinated_cards,
_rule_prose_phantom_refs,
_rule_repeated_failures,
_rule_repeated_crashes,
_rule_stuck_in_blocked,
]
# Known kinds (for the UI's filter / legend / i18n keys). Update when
# rules are added.
DIAGNOSTIC_KINDS = (
"hallucinated_cards",
"prose_phantom_refs",
"repeated_failures",
"repeated_crashes",
"stuck_in_blocked",
)
DEFAULT_CONFIG = {
"failure_threshold": 3,
# Legacy alias accepted at read time by _rule_repeated_failures.
"spawn_failure_threshold": 3,
"crash_threshold": 2,
"blocked_stale_hours": 24,
}
def compute_task_diagnostics(
task,
events: list,
runs: list,
*,
now: Optional[int] = None,
config: Optional[dict] = None,
) -> list[Diagnostic]:
"""Run every rule against a single task's state and return a
severity-sorted list of active diagnostics.
Sorting: critical first, then error, then warning; ties broken by
most-recent ``last_seen_at``.
"""
now_ts = int(now if now is not None else time.time())
cfg = {**DEFAULT_CONFIG, **(config or {})}
out: list[Diagnostic] = []
for rule in _RULES:
try:
out.extend(rule(task, events, runs, now_ts, cfg))
except Exception:
# A broken rule must never crash the dashboard. Rule bugs
# get caught in tests; in production we'd rather drop the
# diagnostic than 500 a whole /board request.
continue
severity_idx = {s: i for i, s in enumerate(SEVERITY_ORDER)}
out.sort(
key=lambda d: (
-severity_idx.get(d.severity, -1),
-(d.last_seen_at or 0),
)
)
return out
def severity_of_highest(diagnostics: Iterable[Diagnostic]) -> Optional[str]:
"""Highest severity present in the list, or None if empty. Useful
for card badges that need a single color."""
highest_idx = -1
highest = None
for d in diagnostics:
idx = SEVERITY_ORDER.index(d.severity) if d.severity in SEVERITY_ORDER else -1
if idx > highest_idx:
highest_idx = idx
highest = d.severity
return highest

View file

@ -0,0 +1,265 @@
"""Kanban triage specifier — flesh out a one-liner into a real spec.
Used by ``hermes kanban specify [task_id | --all]``. Takes a task that
lives in the Triage column (a rough idea, typically only a title), calls
the auxiliary LLM to produce:
* A tightened title (optional only replaces if the model proposes a
materially different one)
* A concrete body: goal, proposed approach, acceptance criteria
and then flips the task ``triage -> todo`` via
``kanban_db.specify_triage_task``. The dispatcher promotes it to
``ready`` on its next tick (or immediately if there are no open parents).
Design notes
------------
* This module intentionally mirrors ``hermes_cli/goals.py`` same aux
client pattern, same "empty config => skip, don't crash" tolerance.
Keeps the surface area tiny and the failure modes predictable.
* The prompt is a short system + user pair. We ask for JSON with
``{title, body}``; if parsing fails, we fall back to treating the
whole response as the body and leave the title untouched. No
retry loop one shot, keep cost bounded.
* Structured output / JSON mode is not requested explicitly so the
specifier works on providers that don't implement it. The parse
is lenient (tolerates markdown code fences around the JSON).
"""
from __future__ import annotations
import json
import logging
import os
import re
from dataclasses import dataclass
from typing import Optional
from hermes_cli import kanban_db as kb
logger = logging.getLogger(__name__)
_SYSTEM_PROMPT = """You are the Kanban triage specifier for the Hermes Agent board.
A user dropped a rough idea into the Triage column. Your job is to turn it
into a concrete, actionable task spec that an autonomous worker can pick up
and execute without further clarification.
Output a single JSON object with exactly two keys:
{
"title": "<tightened task title, <= 80 chars, imperative voice>",
"body": "<multi-line spec, see structure below>"
}
The body MUST include these sections, each prefixed with a bold markdown
heading, in this order:
**Goal** one sentence, user-facing outcome.
**Approach** 2-5 bullets on how a worker should tackle it.
**Acceptance criteria** checklist of concrete, verifiable conditions.
**Out of scope** short list of things NOT to touch (omit if nothing
obvious; never invent scope creep).
Rules:
- Keep the tightened title close in meaning to the original idea do
NOT invent a different project.
- If the original idea is already detailed, preserve its substance and
just reformat into the sections above.
- Never add invented requirements the user didn't hint at.
- No preamble, no closing remarks, no code fences around the JSON.
- Output only the JSON object and nothing else.
"""
_USER_TEMPLATE = """Task id: {task_id}
Current title: {title}
Current body:
{body}
"""
@dataclass
class SpecifyOutcome:
"""Result of specifying a single triage task."""
task_id: str
ok: bool
reason: str = ""
new_title: Optional[str] = None
def _truncate(text: str, limit: int) -> str:
if len(text) <= limit:
return text
return text[: limit - 1] + ""
_FENCE_RE = re.compile(r"^\s*```(?:json)?\s*|\s*```\s*$", re.IGNORECASE)
def _extract_json_blob(raw: str) -> Optional[dict]:
"""Lenient JSON extraction — tolerates fenced code blocks and
leading/trailing whitespace. Returns None if nothing parses."""
if not raw:
return None
stripped = _FENCE_RE.sub("", raw.strip())
# Greedy: find the first `{` and last `}` and try that slice.
first = stripped.find("{")
last = stripped.rfind("}")
if first == -1 or last == -1 or last <= first:
return None
candidate = stripped[first : last + 1]
try:
val = json.loads(candidate)
except (ValueError, json.JSONDecodeError):
return None
if not isinstance(val, dict):
return None
return val
def _profile_author() -> str:
"""Mirror of ``hermes_cli.kanban._profile_author``. Kept local to
avoid a circular import when kanban.py imports this module."""
return (
os.environ.get("HERMES_PROFILE")
or os.environ.get("USER")
or "specifier"
)
def specify_task(
task_id: str,
*,
author: Optional[str] = None,
timeout: Optional[int] = None,
) -> SpecifyOutcome:
"""Specify a single triage task and promote it to ``todo``.
Returns an outcome describing what happened. Never raises for expected
failure modes (task not in triage, no aux client configured, API
error, malformed response) those surface via ``ok=False`` so the
``--all`` sweep can continue past individual failures.
"""
with kb.connect() as conn:
task = kb.get_task(conn, task_id)
if task is None:
return SpecifyOutcome(task_id, False, "unknown task id")
if task.status != "triage":
return SpecifyOutcome(
task_id, False, f"task is not in triage (status={task.status!r})"
)
try:
from agent.auxiliary_client import get_text_auxiliary_client
except Exception as exc: # pragma: no cover — import smoke test
logger.debug("specify: auxiliary client import failed: %s", exc)
return SpecifyOutcome(task_id, False, "auxiliary client unavailable")
try:
client, model = get_text_auxiliary_client("triage_specifier")
except Exception as exc:
logger.debug("specify: get_text_auxiliary_client failed: %s", exc)
return SpecifyOutcome(task_id, False, "auxiliary client unavailable")
if client is None or not model:
return SpecifyOutcome(
task_id, False, "no auxiliary client configured"
)
user_msg = _USER_TEMPLATE.format(
task_id=task.id,
title=_truncate(task.title or "", 400),
body=_truncate(task.body or "(no body)", 4000),
)
try:
resp = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": _SYSTEM_PROMPT},
{"role": "user", "content": user_msg},
],
temperature=0.3,
max_tokens=1500,
timeout=timeout or 120,
)
except Exception as exc:
logger.info(
"specify: API call failed for %s (%s) — skipping",
task_id, exc,
)
return SpecifyOutcome(
task_id, False, f"LLM error: {type(exc).__name__}"
)
try:
raw = resp.choices[0].message.content or ""
except Exception:
raw = ""
parsed = _extract_json_blob(raw)
new_title: Optional[str]
new_body: Optional[str]
if parsed is None:
# Fall back: treat the whole reply as the body, leave title as-is.
# Worst case the user edits afterward — still better than stranding
# the task in triage on a malformed LLM reply.
stripped_raw = raw.strip()
if not stripped_raw:
return SpecifyOutcome(
task_id, False, "LLM returned an empty response"
)
new_title = None
new_body = stripped_raw
else:
title_val = parsed.get("title")
body_val = parsed.get("body")
new_title = (
title_val.strip()
if isinstance(title_val, str) and title_val.strip()
else None
)
new_body = (
body_val if isinstance(body_val, str) and body_val.strip() else None
)
if new_body is None and new_title is None:
return SpecifyOutcome(
task_id, False, "LLM response missing title and body"
)
with kb.connect() as conn:
ok = kb.specify_triage_task(
conn,
task_id,
title=new_title,
body=new_body,
author=author or _profile_author(),
)
if not ok:
# Race: someone else promoted / archived the task between our
# read above and the write. Report, don't crash.
return SpecifyOutcome(
task_id, False, "task moved out of triage before promotion"
)
return SpecifyOutcome(task_id, True, "specified", new_title=new_title)
def list_triage_ids(*, tenant: Optional[str] = None) -> list[str]:
"""Return task ids currently in the triage column.
``tenant`` narrows the sweep; ``None`` returns every triage task.
"""
with kb.connect() as conn:
tasks = kb.list_tasks(
conn,
status="triage",
tenant=tenant,
include_archived=False,
)
return [t.id for t in tasks]

File diff suppressed because it is too large Load diff

View file

@ -221,7 +221,10 @@ def cmd_mcp_add(args):
"""Add a new MCP server with discovery-first tool selection."""
name = args.name
url = getattr(args, "url", None)
command = getattr(args, "command", None)
# Read from `mcp_command` (set by --command via explicit dest) — see
# mcp_add_p.add_argument("--command", dest="mcp_command", ...) in
# hermes_cli/main.py for why the dest is renamed.
command = getattr(args, "mcp_command", None)
cmd_args = getattr(args, "args", None) or []
auth_type = getattr(args, "auth", None)
preset_name = getattr(args, "preset", None)

View file

@ -393,14 +393,21 @@ def normalize_model_for_provider(model_input: str, target_provider: str) -> str:
if provider in _AGGREGATOR_PROVIDERS:
return _prepend_vendor(name)
# --- OpenCode Zen: Claude stays hyphenated; other models keep dots ---
if provider == "opencode-zen":
bare = _strip_matching_provider_prefix(name, provider)
if "/" in bare:
return bare
if bare.lower().startswith("claude-"):
return _dots_to_hyphens(bare)
return bare
# --- OpenCode Zen / OpenCode Go: flat-namespace resellers.
# Their /v1/models API returns bare IDs only (no vendor prefix), and
# the inference endpoint rejects vendor-prefixed names with HTTP 401
# "Model not supported". Strip ANY leading ``vendor/`` so config
# entries like ``minimax/minimax-m2.7`` or ``deepseek/deepseek-v4-flash``
# — commonly copied from aggregator slugs into fallback_model lists —
# resolve to bare ``minimax-m2.7`` / ``deepseek-v4-flash`` the API
# actually serves. See PR reviewing opencode-go fallback 401s. ---
if provider in {"opencode-zen", "opencode-go"}:
if "/" in name:
_, bare_after_slash = name.split("/", 1)
name = bare_after_slash.strip() or name
if provider == "opencode-zen" and name.lower().startswith("claude-"):
return _dots_to_hyphens(name)
return name
# --- Anthropic: strip matching provider prefix, dots -> hyphens ---
if provider in _DOT_TO_HYPHEN_PROVIDERS:

View file

@ -190,11 +190,18 @@ def _load_direct_aliases() -> dict[str, DirectAlias]:
model: "minimax-m2.7"
provider: custom
base_url: "https://ollama.com/v1"
Also reads ``model.aliases`` (set by ``hermes config set model.aliases.xxx``)
and converts simple string entries (``ds-flash: deepseek/deepseek-v4-flash``)
into DirectAlias objects. The provider is parsed from the ``provider/``
prefix in the value; if no slash, the current provider is used.
"""
merged = dict(_BUILTIN_DIRECT_ALIASES)
try:
from hermes_cli.config import load_config
cfg = load_config()
# --- model_aliases (dict-based format) ---
user_aliases = cfg.get("model_aliases")
if isinstance(user_aliases, dict):
for name, entry in user_aliases.items():
@ -207,6 +214,30 @@ def _load_direct_aliases() -> dict[str, DirectAlias]:
merged[name.strip().lower()] = DirectAlias(
model=model, provider=provider, base_url=base_url,
)
# --- model.aliases (string-based format, from config set) ---
model_section = cfg.get("model", {})
if isinstance(model_section, dict):
simple_aliases = model_section.get("aliases")
if isinstance(simple_aliases, dict):
current_provider = model_section.get("provider", "")
for name, value in simple_aliases.items():
if not isinstance(value, str) or not value.strip():
continue
key = name.strip().lower()
if key in merged:
continue # don't override explicit model_aliases entries
val = value.strip()
if "/" in val:
provider, model = val.split("/", 1)
else:
provider = current_provider
model = val
merged[key] = DirectAlias(
model=model.strip(),
provider=provider.strip() or current_provider,
base_url="",
)
except Exception:
pass
return merged
@ -768,6 +799,12 @@ def switch_model(
)
# --- Step d: Aggregator catalog search ---
# Track whether the live catalog of the CURRENT provider resolved the
# model — if so, step e must not second-guess and switch providers.
# Critical for flat-namespace resellers like opencode-go / opencode-zen
# whose live /v1/models returns bare IDs (e.g. "deepseek-v4-flash") that
# coincidentally match entries in native providers' static catalogs.
resolved_in_current_catalog = False
if is_aggregator(target_provider) and not resolved_alias:
catalog = list_provider_models(target_provider)
if catalog:
@ -775,6 +812,7 @@ def switch_model(
for mid in catalog:
if mid.lower() == new_model_lower:
new_model = mid
resolved_in_current_catalog = True
break
else:
for mid in catalog:
@ -782,6 +820,7 @@ def switch_model(
_, bare = mid.split("/", 1)
if bare.lower() == new_model_lower:
new_model = mid
resolved_in_current_catalog = True
break
# --- Step e: detect_provider_for_model() as last resort ---
@ -794,6 +833,7 @@ def switch_model(
target_provider == current_provider
and not is_custom
and not resolved_alias
and not resolved_in_current_catalog
):
detected = detect_provider_for_model(new_model, current_provider)
if detected:
@ -1597,7 +1637,8 @@ def list_authenticated_providers(
groups[group_key]["models"].append(m)
_section4_emitted_slugs: set = set()
for grp in groups.values():
for grp_key, grp in groups.items():
api_url, api_key = grp_key
slug = grp["slug"]
# If the slug is already claimed by a built-in / overlay /
# user-provider row (sections 1-3), skip this custom group
@ -1635,6 +1676,18 @@ def list_authenticated_providers(
_grp_url_norm = _pair_key[1]
if _grp_url_norm and _grp_url_norm in _builtin_endpoints:
continue
# Live model discovery from custom provider endpoints (matches
# Section 3 behavior for user ``providers:`` entries).
if api_url and api_key:
try:
from hermes_cli.models import fetch_api_models
live_models = fetch_api_models(api_key, api_url)
if live_models:
grp["models"] = live_models
grp["total_models"] = len(live_models)
except Exception:
pass
results.append({
"slug": slug,
"name": grp["name"],
@ -1652,3 +1705,63 @@ def list_authenticated_providers(
results.sort(key=lambda r: (not r["is_current"], -r["total_models"]))
return results
def list_picker_providers(
current_provider: str = "",
current_base_url: str = "",
user_providers: dict = None,
custom_providers: list | None = None,
max_models: int = 8,
current_model: str = "",
) -> List[dict]:
"""Interactive-picker variant of :func:`list_authenticated_providers`.
Post-processes the base list so the ``/model`` picker (Telegram/Discord
inline keyboards) only surfaces models that are actually callable in the
current install:
- OpenRouter's model list is replaced with the output of
:func:`hermes_cli.models.fetch_openrouter_models`, which filters the
curated ``OPENROUTER_MODELS`` snapshot against the live OpenRouter
catalog. IDs the live catalog no longer carries drop out, so the
picker never offers a model the user can't call.
- Provider rows whose model list ends up empty are dropped, except
custom endpoints (``is_user_defined=True`` with an ``api_url``) where
the user may supply their own model set through config.
All other providers and metadata fields are passed through unchanged.
The typed ``/model <name>`` path is unaffected -- only the interactive
picker payload is narrowed.
"""
from hermes_cli.models import fetch_openrouter_models
providers = list_authenticated_providers(
current_provider=current_provider,
current_base_url=current_base_url,
user_providers=user_providers,
custom_providers=custom_providers,
max_models=max_models,
current_model=current_model,
)
filtered: List[dict] = []
for p in providers:
slug = str(p.get("slug", "")).lower()
if slug == "openrouter":
try:
live = fetch_openrouter_models()
live_ids = [mid for mid, _ in live]
except Exception:
live_ids = list(p.get("models", []))
p = dict(p)
p["models"] = live_ids[:max_models]
p["total_models"] = len(live_ids)
has_models = bool(p.get("models"))
is_custom_endpoint = bool(p.get("is_user_defined")) and bool(p.get("api_url"))
if not has_models and not is_custom_endpoint:
continue
filtered.append(p)
return filtered

View file

@ -46,6 +46,7 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [
("xiaomi/mimo-v2.5-pro", ""),
("xiaomi/mimo-v2.5", ""),
("tencent/hy3-preview:free", "free"),
("tencent/hy3-preview", ""),
("openai/gpt-5.3-codex", ""),
("google/gemini-3-pro-image-preview", ""),
("google/gemini-3-flash-preview", ""),
@ -61,12 +62,14 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [
("z-ai/glm-5v-turbo", ""),
("z-ai/glm-5-turbo", ""),
("x-ai/grok-4.20", ""),
("x-ai/grok-4.3", ""),
("nvidia/nemotron-3-super-120b-a12b", ""),
("nvidia/nemotron-3-super-120b-a12b:free", "free"),
("arcee-ai/trinity-large-preview:free", "free"),
("arcee-ai/trinity-large-thinking", ""),
("openai/gpt-5.5-pro", ""),
("openai/gpt-5.4-nano", ""),
("deepseek/deepseek-v4-pro", ""),
]
_openrouter_catalog_cache: list[tuple[str, str]] | None = None
@ -181,10 +184,12 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"z-ai/glm-5v-turbo",
"z-ai/glm-5-turbo",
"x-ai/grok-4.20-beta",
"x-ai/grok-4.3",
"nvidia/nemotron-3-super-120b-a12b",
"arcee-ai/trinity-large-thinking",
"openai/gpt-5.5-pro",
"openai/gpt-5.4-nano",
"deepseek/deepseek-v4-pro",
],
# Native OpenAI Chat Completions (api.openai.com). Used by /model counts and
# provider_model_ids fallback when /v1/models is unavailable.
@ -412,6 +417,18 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"glm-4.7",
"MiniMax-M2.5",
],
# Alibaba Coding Plan — same platform as alibaba (DashScope coding-intl),
# separate provider ID with its own base_url_env_var.
"alibaba-coding-plan": [
"qwen3.6-plus",
"qwen3.5-plus",
"qwen3-coder-plus",
"qwen3-coder-next",
"kimi-k2.5",
"glm-5",
"glm-4.7",
"MiniMax-M2.5",
],
# Curated HF model list — only agentic models that map to OpenRouter defaults.
"huggingface": [
"moonshotai/Kimi-K2.5",
@ -806,6 +823,25 @@ CANONICAL_PROVIDERS: list[ProviderEntry] = [
ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway"),
]
# Auto-extend CANONICAL_PROVIDERS with any provider registered in providers/
# that is not already in the list above. Adding plugins/model-providers/<name>/
# is sufficient to expose a new provider in the model picker, /model, and all
# downstream consumers — no edits to this file needed.
_canonical_slugs = {p.slug for p in CANONICAL_PROVIDERS}
try:
from providers import list_providers as _list_providers_for_canonical
for _pp in _list_providers_for_canonical():
if _pp.name in _canonical_slugs:
continue
if _pp.auth_type in ("oauth_device_code", "oauth_external", "external_process", "aws_sdk", "copilot"):
continue # non-api-key flows need bespoke picker UX; skip auto-inject
_label = _pp.display_name or _pp.name
_desc = _pp.description or f"{_label} (direct API)"
CANONICAL_PROVIDERS.append(ProviderEntry(_pp.name, _label, _desc))
_canonical_slugs.add(_pp.name)
except Exception:
pass
# Derived dicts — used throughout the codebase
_PROVIDER_LABELS = {p.slug: p.label for p in CANONICAL_PROVIDERS}
_PROVIDER_LABELS["custom"] = "Custom endpoint" # special case: not a named provider
@ -2023,6 +2059,34 @@ def provider_model_ids(provider: Optional[str], *, force_refresh: bool = False)
return ids
except Exception:
pass
# ── Profile-based generic live fetch (all simple api-key providers) ──
# Handles any provider registered in providers/ with auth_type="api_key".
# Replaces per-provider copy-paste blocks (stepfun, gmi, zai, etc.).
try:
from providers import get_provider_profile
from hermes_cli.auth import resolve_api_key_provider_credentials
_p = get_provider_profile(normalized)
if _p and _p.auth_type == "api_key" and _p.base_url:
try:
creds = resolve_api_key_provider_credentials(normalized)
api_key = str(creds.get("api_key") or "").strip()
base_url = str(creds.get("base_url") or "").strip()
except Exception:
api_key, base_url = "", _p.base_url
if not base_url:
base_url = _p.base_url
if api_key:
live = _p.fetch_models(api_key=api_key)
if live:
return live
# Use profile's fallback_models if defined
if _p.fallback_models:
return list(_p.fallback_models)
except Exception:
pass
curated_static = list(_PROVIDER_MODELS.get(normalized, []))
if normalized in _MODELS_DEV_PREFERRED:
return _merge_with_models_dev(normalized, curated_static)
@ -2906,6 +2970,19 @@ def fetch_api_models(
_OLLAMA_CLOUD_CACHE_TTL = 3600 # 1 hour
def _strip_ollama_cloud_suffix(model_id: str) -> str:
"""Strip :cloud / -cloud suffixes that models.dev appends to Ollama Cloud IDs.
The live API uses clean IDs (e.g. 'kimi-k2.6') while models.dev sometimes
returns them as 'kimi-k2.6:cloud'. Normalising before the dedup merge
prevents duplicate entries in the merged model list.
"""
for suffix in (":cloud", "-cloud"):
if model_id.endswith(suffix):
return model_id[: -len(suffix)]
return model_id
def _ollama_cloud_cache_path() -> Path:
"""Return the path for the Ollama Cloud model cache."""
from hermes_constants import get_hermes_home
@ -3001,9 +3078,10 @@ def fetch_ollama_cloud_models(
seen.add(m)
merged.append(m)
for m in mdev_models:
if m and m not in seen:
seen.add(m)
merged.append(m)
normalized = _strip_ollama_cloud_suffix(m)
if normalized and normalized not in seen:
seen.add(normalized)
merged.append(normalized)
if merged:
_save_ollama_cloud_cache(merged)
return merged

View file

@ -255,6 +255,10 @@ def get_nous_subscription_features(
terminal_cfg = config.get("terminal") if isinstance(config.get("terminal"), dict) else {}
web_backend = str(web_cfg.get("backend") or "").strip().lower()
# Per-capability overrides: if set, they determine which backend is active for
# search/extract independently of web.backend.
web_search_backend = str(web_cfg.get("search_backend") or "").strip().lower()
web_extract_backend = str(web_cfg.get("extract_backend") or "").strip().lower()
tts_provider = str(tts_cfg.get("provider") or "edge").strip().lower()
browser_provider_explicit = "cloud_provider" in browser_cfg
browser_provider = normalize_browser_cloud_provider(
@ -280,6 +284,7 @@ def get_nous_subscription_features(
direct_firecrawl = bool(get_env_value("FIRECRAWL_API_KEY") or get_env_value("FIRECRAWL_API_URL"))
direct_parallel = bool(get_env_value("PARALLEL_API_KEY"))
direct_tavily = bool(get_env_value("TAVILY_API_KEY"))
direct_searxng = bool(get_env_value("SEARXNG_URL"))
direct_fal = fal_key_is_configured()
direct_openai_tts = bool(resolve_openai_audio_api_key())
direct_elevenlabs = bool(get_env_value("ELEVENLABS_API_KEY"))
@ -323,10 +328,18 @@ def get_nous_subscription_features(
or (web_backend == "firecrawl" and direct_firecrawl)
or (web_backend == "parallel" and direct_parallel)
or (web_backend == "tavily" and direct_tavily)
or (web_backend == "searxng" and direct_searxng)
# Per-capability overrides: search_backend or extract_backend may be set
# without web.backend (using the new split config from #20061)
or (web_search_backend == "searxng" and direct_searxng)
or (web_search_backend == "exa" and direct_exa)
or (web_search_backend == "firecrawl" and direct_firecrawl)
or (web_search_backend == "parallel" and direct_parallel)
or (web_search_backend == "tavily" and direct_tavily)
)
)
web_available = bool(
managed_web_available or direct_exa or direct_firecrawl or direct_parallel or direct_tavily
managed_web_available or direct_exa or direct_firecrawl or direct_parallel or direct_tavily or direct_searxng
)
image_managed = image_tool_enabled and managed_image_available and not direct_fal
@ -412,8 +425,8 @@ def get_nous_subscription_features(
managed_by_nous=web_managed,
direct_override=web_active and not web_managed,
toolset_enabled=web_tool_enabled,
current_provider=web_backend or "",
explicit_configured=bool(web_backend),
current_provider=web_backend or web_search_backend or "",
explicit_configured=bool(web_backend or web_search_backend),
),
"image_gen": NousFeatureState(
key="image_gen",

View file

@ -73,6 +73,24 @@ def _cmd_approve(store, platform: str, code: str):
display = f"{name} ({uid})" if name else uid
print(f"\n Approved! User {display} on {platform} can now use the bot~")
print(" They'll be recognized automatically on their next message.\n")
elif store._is_locked_out(platform):
# Disambiguate: approve_code returns None for both invalid codes
# and lockout. Tell the operator it's lockout so they don't chase
# a "wrong code" rabbit hole (#10195).
import time as _time
limits = store._load_json(store._rate_limit_path())
lockout_until = limits.get(f"_lockout:{platform}", 0)
remaining = max(0, int(lockout_until - _time.time()))
mins = remaining // 60
print(
f"\n Platform '{platform}' is locked out after too many failed "
f"approval attempts."
)
print(f" Lockout clears in ~{mins} minute(s).")
print(
" To reset sooner, delete the '_lockout:{0}' entry from "
"~/.hermes/platforms/pairing/_rate_limits.json\n".format(platform)
)
else:
print(f"\n Code '{code}' not found or expired for platform '{platform}'.")
print(" Run 'hermes pairing list' to see pending codes.\n")

View file

@ -80,6 +80,10 @@ VALID_HOOKS: Set[str] = {
"post_tool_call",
"transform_terminal_output",
"transform_tool_result",
# Transform LLM output before it's returned to the user.
# Plugins return a string to replace the response text, or None/empty to leave unchanged.
# First non-None string wins. Useful for vocabulary/personality transformation.
"transform_llm_output",
"pre_llm_call",
"post_llm_call",
"pre_api_request",
@ -173,7 +177,7 @@ def _get_enabled_plugins() -> Optional[set]:
# Data classes
# ---------------------------------------------------------------------------
_VALID_PLUGIN_KINDS: Set[str] = {"standalone", "backend", "exclusive", "platform"}
_VALID_PLUGIN_KINDS: Set[str] = {"standalone", "backend", "exclusive", "platform", "model-provider"}
@dataclass
@ -643,15 +647,17 @@ class PluginManager:
# - flat: ``plugins/disk-cleanup/plugin.yaml`` (standalone)
# - category: ``plugins/image_gen/openai/plugin.yaml`` (backend)
#
# ``memory/`` and ``context_engine/`` are skipped at the top level —
# they have their own discovery systems. ``platforms/`` is a category
# holding platform adapters (scanned one level deeper below).
# ``memory/``, ``context_engine/``, and ``model-providers/`` are
# skipped at the top level — they have their own discovery systems
# (plugins/memory/__init__.py, providers/__init__.py). ``platforms/``
# is a category holding platform adapters (scanned one level deeper
# below).
repo_plugins = get_bundled_plugins_dir()
manifests.extend(
self._scan_directory(
repo_plugins,
source="bundled",
skip_names={"memory", "context_engine", "platforms"},
skip_names={"memory", "context_engine", "platforms", "model-providers"},
)
)
manifests.extend(
@ -709,6 +715,21 @@ class PluginManager:
)
continue
# Model provider plugins are loaded by providers/__init__.py
# (its own lazy discovery keyed off first get_provider_profile()
# call). We record the manifest here for introspection but do
# not import the module — a second import would create two
# ProviderProfile instances and break the "last writer wins"
# override semantics between bundled and user plugins.
if manifest.kind == "model-provider":
loaded = LoadedPlugin(manifest=manifest, enabled=True)
self._plugins[lookup_key] = loaded
logger.debug(
"Skipping '%s' (model-provider, handled by providers/ discovery)",
lookup_key,
)
continue
# Built-in backends auto-load — they ship with hermes and must
# just work. Selection among them (e.g. which image_gen backend
# services calls) is driven by ``<category>.provider`` config,
@ -886,6 +907,19 @@ class PluginManager:
"treating as kind='exclusive'",
key,
)
elif (
"register_provider" in source_text
and "ProviderProfile" in source_text
):
# Model provider plugin (calls register_provider()
# from ``providers`` with a ProviderProfile). Route
# to providers/__init__.py discovery.
kind = "model-provider"
logger.debug(
"Plugin %s: detected model provider, "
"treating as kind='model-provider'",
key,
)
except Exception:
pass

View file

@ -71,6 +71,22 @@ _CLONE_ALL_STRIP = [
"processes.json",
]
# Marker file written by `hermes profile create --no-skills`. When present in
# a profile's root, callers of seed_profile_skills() (fresh-create, `hermes
# update`'s all-profile sync, the web dashboard) skip bundled-skill seeding
# for that profile. The user can still install skills manually via
# `hermes skills install` or drop SKILL.md files into the profile's skills/.
# Delete the marker file to opt back in.
NO_BUNDLED_SKILLS_MARKER = ".no-bundled-skills"
def has_bundled_skills_opt_out(profile_dir: Path) -> bool:
"""Return True if the profile opted out of bundled-skill seeding."""
try:
return (profile_dir / NO_BUNDLED_SKILLS_MARKER).exists()
except OSError:
return False
def _clone_all_copytree_ignore(source_dir: Path):
"""Ignore ``profiles/`` at the root of *source_dir* only.
@ -427,6 +443,7 @@ def create_profile(
clone_all: bool = False,
clone_config: bool = False,
no_alias: bool = False,
no_skills: bool = False,
) -> Path:
"""Create a new profile directory.
@ -444,12 +461,22 @@ def create_profile(
skills, and selected profile identity files from the source profile.
no_alias:
If True, skip wrapper script creation.
no_skills:
If True, create an empty profile with no bundled skills, and write
a marker file so ``hermes update`` skips re-seeding this profile's
skills. Mutually exclusive with ``clone_config``/``clone_all`` (those
explicitly copy skills from the source).
Returns
-------
Path
The newly created profile directory.
"""
if no_skills and (clone_config or clone_all):
raise ValueError(
"--no-skills is mutually exclusive with --clone / --clone-all "
"(cloning explicitly copies skills from the source profile)."
)
canon = normalize_profile_name(name)
validate_profile_name(canon)
@ -527,6 +554,19 @@ def create_profile(
except Exception:
pass # best-effort — don't fail profile creation over this
# Write the opt-out marker so seed_profile_skills() and `hermes update`'s
# all-profile sync loop both skip this profile for bundled-skill seeding.
if no_skills:
try:
(profile_dir / NO_BUNDLED_SKILLS_MARKER).write_text(
"This profile opted out of bundled-skill seeding "
"(`hermes profile create --no-skills`).\n"
"Delete this file to re-enable sync on the next `hermes update`.\n",
encoding="utf-8",
)
except OSError:
pass # best-effort — the feature still works via the empty skills/ dir
return profile_dir
@ -535,7 +575,19 @@ def seed_profile_skills(profile_dir: Path, quiet: bool = False) -> Optional[dict
Uses subprocess because sync_skills() caches HERMES_HOME at module level.
Returns the sync result dict, or None on failure.
Profiles that opted out of bundled skills (via ``hermes profile create
--no-skills`` which writes ``.no-bundled-skills`` to the profile root)
are skipped and get an empty-result dict so callers can report
"opted out" instead of "failed".
"""
if has_bundled_skills_opt_out(profile_dir):
return {
"copied": [],
"updated": [],
"user_modified": [],
"skipped_opt_out": True,
}
project_root = Path(__file__).parent.parent.resolve()
try:
result = subprocess.run(

View file

@ -319,9 +319,10 @@ def _try_resolve_from_custom_pool(
base_url: str,
provider_label: str,
api_mode_override: Optional[str] = None,
provider_name: Optional[str] = None,
) -> Optional[Dict[str, Any]]:
"""Check if a credential pool exists for a custom endpoint and return a runtime dict if so."""
pool_key = get_custom_provider_pool_key(base_url)
pool_key = get_custom_provider_pool_key(base_url, provider_name=provider_name)
if not pool_key:
return None
try:
@ -521,7 +522,7 @@ def _resolve_named_custom_runtime(
return None
# Check if a credential pool exists for this custom endpoint
pool_result = _try_resolve_from_custom_pool(base_url, "custom", custom_provider.get("api_mode"))
pool_result = _try_resolve_from_custom_pool(base_url, "custom", custom_provider.get("api_mode"), provider_name=custom_provider.get("name"))
if pool_result:
# Propagate the model name even when using pooled credentials —
# the pool doesn't know about the custom_providers model field.
@ -640,8 +641,11 @@ def _resolve_openrouter_runtime(
# For custom endpoints, check if a credential pool exists
if effective_provider == "custom" and base_url:
# Pass requested_provider so pool lookup prefers name match over base_url,
# fixing credential mix-ups when multiple custom providers share a base_url.
pool_result = _try_resolve_from_custom_pool(
base_url, effective_provider, _parse_api_mode(model_cfg.get("api_mode")),
provider_name=requested_provider if requested_norm != "custom" else None,
)
if pool_result:
return pool_result

View file

@ -15,6 +15,7 @@ import importlib.util
import json
import logging
import os
import re
import shutil
import sys
import copy
@ -208,12 +209,23 @@ def prompt(question: str, default: str = None, password: bool = False) -> str:
else:
value = input(color(display, Colors.YELLOW))
return value.strip() or default or ""
cleaned = _sanitize_pasted_input(value)
return cleaned.strip() or default or ""
except (KeyboardInterrupt, EOFError):
print()
sys.exit(1)
_BRACKETED_PASTE_PATTERN = re.compile(r"\x1b\[\s*200~|\x1b\[\s*201~")
def _sanitize_pasted_input(value: str) -> str:
"""Strip terminal bracketed-paste control markers from pasted text."""
if not isinstance(value, str) or not value:
return value
return _BRACKETED_PASTE_PATTERN.sub("", value)
def _curses_prompt_choice(question: str, choices: list, default: int = 0, description: str | None = None) -> int:
"""Single-select menu using curses. Delegates to curses_radiolist."""
from hermes_cli.curses_ui import curses_radiolist
@ -382,7 +394,7 @@ def _print_setup_summary(config: dict, hermes_home):
label = f"Web Search & Extract ({subscription_features.web.current_provider})"
tool_status.append((label, True, None))
else:
tool_status.append(("Web Search & Extract", False, "EXA_API_KEY, PARALLEL_API_KEY, FIRECRAWL_API_KEY/FIRECRAWL_API_URL, or TAVILY_API_KEY"))
tool_status.append(("Web Search & Extract", False, "EXA_API_KEY, PARALLEL_API_KEY, FIRECRAWL_API_KEY/FIRECRAWL_API_URL, TAVILY_API_KEY, or SEARXNG_URL"))
# Browser tools (local Chromium, Camofox, Browserbase, Browser Use, or Firecrawl)
browser_provider = subscription_features.browser.current_provider
@ -2450,6 +2462,9 @@ def setup_gateway(config: dict):
launchd_start,
launchd_restart,
UserSystemdUnavailableError,
SystemScopeRequiresRootError,
_system_scope_wizard_would_need_root,
_print_system_scope_remediation,
)
service_installed = _is_service_installed()
@ -2467,7 +2482,9 @@ def setup_gateway(config: dict):
print()
if service_running:
if prompt_yes_no(" Restart the gateway to pick up changes?", True):
if supports_systemd and _system_scope_wizard_would_need_root():
_print_system_scope_remediation("restart")
elif prompt_yes_no(" Restart the gateway to pick up changes?", True):
try:
if supports_systemd:
systemd_restart()
@ -2477,10 +2494,19 @@ def setup_gateway(config: dict):
print_error(" Restart failed — user systemd not reachable:")
for line in str(e).splitlines():
print(f" {line}")
except SystemScopeRequiresRootError as e:
# Defense in depth: the pre-check above should have
# caught this, but a race (unit file appearing mid-run)
# could still land here. Previously this exited the
# whole wizard via sys.exit(1).
print_error(f" Restart failed: {e}")
_print_system_scope_remediation("restart")
except Exception as e:
print_error(f" Restart failed: {e}")
elif service_installed:
if prompt_yes_no(" Start the gateway service?", True):
if supports_systemd and _system_scope_wizard_would_need_root():
_print_system_scope_remediation("start")
elif prompt_yes_no(" Start the gateway service?", True):
try:
if supports_systemd:
systemd_start()
@ -2490,6 +2516,9 @@ def setup_gateway(config: dict):
print_error(" Start failed — user systemd not reachable:")
for line in str(e).splitlines():
print(f" {line}")
except SystemScopeRequiresRootError as e:
print_error(f" Start failed: {e}")
_print_system_scope_remediation("start")
except Exception as e:
print_error(f" Start failed: {e}")
elif supports_service_manager:
@ -2517,6 +2546,9 @@ def setup_gateway(config: dict):
print_error(" Start failed — user systemd not reachable:")
for line in str(e).splitlines():
print(f" {line}")
except SystemScopeRequiresRootError as e:
print_error(f" Start failed: {e}")
_print_system_scope_remediation("start")
except Exception as e:
print_error(f" Start failed: {e}")
except Exception as e:

View file

@ -42,6 +42,7 @@ All fields are optional. Missing values inherit from the ``default`` skin.
session_border: "#8B8682" # Session ID dim color
status_bar_bg: "#1a1a2e" # TUI status/usage bar background
voice_status_bg: "#1a1a2e" # TUI voice status background
selection_bg: "#333355" # TUI mouse-selection highlight background
completion_menu_bg: "#1a1a2e" # Completion menu background
completion_menu_current_bg: "#333355" # Active completion row background
completion_menu_meta_bg: "#1a1a2e" # Completion meta column background

View file

@ -192,7 +192,7 @@ TIPS = [
"Voice messages on Telegram, Discord, WhatsApp, and Slack are auto-transcribed.",
# --- Gateway & Messaging ---
"Hermes runs on 18 platforms: Telegram, Discord, Slack, WhatsApp, Signal, Matrix, email, and more.",
"Hermes runs on 21 messaging platforms: Telegram, Discord, Slack, WhatsApp, Signal, Matrix, IRC, Microsoft Teams, email, and more.",
"hermes gateway install sets it up as a system service that starts on boot.",
"DingTalk uses Stream Mode — no webhooks or public URL needed.",
"BlueBubbles brings iMessage to Hermes via a local macOS server.",
@ -334,6 +334,144 @@ TIPS = [
"MCP ${ENV_VAR} placeholders in config are resolved at server spawn — including vars from ~/.hermes/.env.",
"Skills from trusted repos (NousResearch) get a 'trusted' security level; community skills get extra scanning.",
"The skills quarantine at ~/.hermes/skills/.hub/quarantine/ holds skills pending security review.",
# --- Advanced Slash Commands ---
'/steer <prompt> injects a note after the next tool call — nudge direction mid-task without interrupting.',
'/goal <text> sets a standing Ralph-loop objective — Hermes auto-continues turn after turn until a judge says done.',
'/snapshot create [label] saves a full state snapshot of Hermes config; /snapshot restore <id> reverts later.',
'/copy [N] copies the last assistant response to your clipboard, or the Nth-from-last with a number.',
'/redraw forces a full UI repaint, fixing terminal drift after tmux resize or mouse selection artifacts.',
'/agents (alias /tasks) shows active agents and running background tasks across the current session.',
'/footer toggles the gateway footer on final replies showing model, tool counts, and turn timing.',
'/busy queue|steer|interrupt controls what pressing Enter does while Hermes is working.',
'/topic in Telegram DMs enables user-managed multi-session topic mode — /topic <id> restores past sessions inline.',
'/approve session|always runs a pending dangerous command with your chosen trust scope; /deny rejects it.',
'/restart gracefully restarts the gateway after draining active runs, then pings the requester when back up.',
'/kanban boards switch <slug> changes the active multi-project Kanban board from inside chat.',
'/reload reloads ~/.hermes/.env into the running session — pick up new API keys without restarting.',
# --- Cron (no-agent & scripts) ---
'cronjob with no_agent=True runs a script on schedule and sends its stdout directly — zero tokens, zero LLM.',
'An empty cron script stdout means silent tick — nothing is delivered, perfect for threshold watchdogs.',
"HERMES_CRON_MAX_PARALLEL (default 4) caps how many cron jobs run per tick so bursts don't saturate your keys.",
# --- Gateway Hooks ---
'Gateway hooks live under ~/.hermes/hooks/<name>/ with HOOK.yaml + handler.py — handler must be named `handle`.',
'Hook events include gateway:startup, session:start, agent:step, and command:* wildcard subscriptions.',
'Drop a ~/.hermes/BOOT.md checklist and a gateway:startup hook runs it as a one-shot agent every boot.',
# --- Curator ---
'hermes curator run --dry-run previews what the curator would archive or consolidate without mutating anything.',
"hermes curator pin <skill> hard-fences a skill against both auto-archival and the agent's skill_manage tool.",
'hermes curator rollback restores skills from a pre-run snapshot — backups live under skills/.curator_backups/.',
# --- Credential Pools & Routing ---
'hermes auth reset <provider> clears all cooldowns and exhaustion flags on a credential pool.',
'credential_pool_strategies.<provider>: round_robin cycles keys evenly instead of the fill_first default.',
'use_gateway: true per-tool routes web, image, tts, or browser through your Nous subscription — no extra keys.',
'provider_routing.data_collection: deny excludes data-storing providers on OpenRouter.',
'provider_routing.require_parameters: true only routes to providers that support every param in your request.',
# --- TUI & Dashboard ---
'HERMES_TUI_RESUME=1 auto-re-attaches to the most recent TUI session on launch — handy after SSH drops.',
"HERMES_TUI_THEME=light|dark|<hex> forces the TUI theme on terminals that don't set COLORFGBG.",
'Ctrl+G or Ctrl+X Ctrl+E in the TUI opens the input buffer in $EDITOR for long multi-line prompts.',
'The TUI renders LaTeX inline — $E=mc^2$ becomes Unicode math instead of raw TeX.',
'hermes dashboard launches a local web UI at 127.0.0.1:9119 — zero data leaves localhost.',
'hermes dashboard --tui embeds the full Hermes TUI in your browser via xterm.js and a WebSocket PTY.',
'Drop a YAML in ~/.hermes/dashboard-themes/ with two palette colors to reskin the entire dashboard.',
'Dashboard plugins are drop-in: manifest.json + JS bundle in ~/.hermes/dashboard-plugins/ — no npm build required.',
'layoutVariant: cockpit in a dashboard theme adds a 260px left rail that plugins can populate via the sidebar slot.',
# --- Env Vars & Config Gates ---
"display.tool_progress_command: true exposes /verbose on messaging platforms; it's CLI-only by default.",
'HERMES_BACKGROUND_NOTIFICATIONS=result only pings when background tasks finish (vs all/error/off).',
'HERMES_WRITE_SAFE_ROOT restricts write_file and patch to a directory prefix; writes outside require approval.',
'HERMES_IGNORE_RULES skips auto-injection of AGENTS.md, SOUL.md, .cursorrules, memory, and preloaded skills.',
'HERMES_ACCEPT_HOOKS auto-approves unseen shell hooks declared in config.yaml without a TTY prompt.',
'auxiliary.goal_judge.model routes the /goal judge to a cheap fast model to keep loop cost near zero.',
'Checkpoints skip directories with more than 50,000 files to avoid slow git operations on massive monorepos.',
# --- TTS ---
'tts.provider: piper runs 44-language local TTS on CPU — voices auto-download to ~/.hermes/cache/piper-voices/.',
'tts.providers.<name>.type: command wires any CLI TTS engine with {input_path} and {output_path} placeholders.',
# --- API Server & Proxy ---
'API_SERVER_ENABLED=true runs an OpenAI-compatible endpoint alongside the gateway for Open WebUI and LibreChat.',
'GATEWAY_PROXY_URL runs a split setup: platform I/O locally, agent work delegated to a remote API server.',
# --- Platform-specific ---
'MATRIX_DEVICE_ID pins a stable device ID for E2EE — without it, keys rotate every start and historic decrypt breaks.',
'TELEGRAM_WEBHOOK_SECRET is required whenever TELEGRAM_WEBHOOK_URL is set — generate with openssl rand -hex 32.',
# --- Batch ---
"batch_runner.py --resume content-matches completed prompts by text so dataset reorders don't re-run finished work.",
# --- Less-Known Slash Commands ---
'/new starts a fresh session in place (alias /reset) — fresh session ID, clean history, CLI stays open.',
'/clear wipes the terminal screen AND starts a new session — one shortcut for a visual reset.',
'/history prints the current conversation in-line without leaving the CLI — useful for a quick re-read.',
'/save writes the current conversation to disk without ending the session.',
'/status shows session info at a glance: ID, title, model, token usage, and elapsed time.',
'/image <path> attaches a local image file for your next prompt without pasting or drag-and-drop.',
'/platforms shows gateway and messaging-platform connection status right from inside chat.',
'/commands paginates the full slash-command + installed-skill list — useful on platforms without tab completion.',
'/toolsets lists every available toolset so you know what -t/--toolsets accepts.',
'/gquota shows Google Gemini Code Assist quota usage with progress bars when that provider is active.',
'/voice tts toggles TTS-only mode — agent replies out loud but you still type your prompts.',
'/reload-skills re-scans ~/.hermes/skills/ so drop-in skills appear without restarting the session.',
'/indicator kaomoji|emoji|unicode|ascii picks the TUI busy-indicator style shown during agent runs.',
'/debug uploads a support bundle (system info + logs) and returns shareable links — works in chat too.',
# --- CLI Subcommands & Flags ---
'hermes -z "<prompt>" is the purest one-shot: final answer on stdout, nothing else — ideal for piping in scripts.',
'hermes chat --pass-session-id injects the session ID into the system prompt so the agent can self-reference it.',
'hermes chat --image path/to/pic.png attaches a local image to a single -q query without a separate upload step.',
'hermes chat --ignore-user-config skips ~/.hermes/config.yaml — reproducible bug reports and CI runs.',
"hermes chat --source tool tags programmatic chats so they don't clutter hermes sessions list.",
'hermes dump --show-keys includes redacted API key fingerprints for deeper support debugging.',
'hermes sessions rename <ID> "new title" renames any past session; hermes sessions delete <ID> removes one.',
'hermes import restores a session export or profile archive produced by sessions export or profile export.',
'hermes fallback manages the fallback_model chain interactively — no hand-editing config.yaml.',
'hermes pairing rotates the DM pairing token — the first messager after rotation claims access to the bot.',
'hermes setup walks first-time users through provider, keys, and platform wiring in one interactive flow.',
'hermes status --deep runs the full health sweep across every component; plain hermes status is the quick view.',
# --- Agent Behavior Env Vars ---
'HERMES_AGENT_TIMEOUT=0 disables the gateway inactivity kill for a running agent — use for long research runs.',
'HERMES_ENABLE_PROJECT_PLUGINS=1 auto-loads repo-local plugins from ./.hermes/plugins/ — trust-gated by design.',
"HERMES_DISABLE_FILE_STATE_GUARD=1 turns off the 'file changed since you read it' guard on patch and write_file.",
'HERMES_ALLOW_PRIVATE_URLS=true lets web tools hit localhost and private networks — off by default in gateway mode.',
'HERMES_OPTIONAL_SKILLS=name1,name2 auto-installs extra optional-catalog skills on first run per profile.',
'HERMES_BUNDLED_SKILLS points at a custom bundled-skill tree — used by Homebrew and Nix packaging.',
'HERMES_DUMP_REQUEST_STDOUT=1 dumps every API request payload to stdout instead of log files.',
'HERMES_OAUTH_TRACE=1 logs redacted OAuth token exchange and refresh attempts for debugging provider auth.',
'HERMES_STREAM_RETRIES (default 3) controls mid-stream reconnect attempts on transient network errors.',
# --- Gateway Behavior Env Vars ---
'HERMES_GATEWAY_BUSY_ACK_ENABLED=false silences the ⚡/⏳/⏩ ack messages when a user messages a busy agent.',
'HERMES_AGENT_NOTIFY_INTERVAL (default 180s) sets how often the gateway pings with progress on long turns.',
'HERMES_RESTART_DRAIN_TIMEOUT (default 900s) caps how long /restart waits for in-flight runs before forcing.',
'HERMES_CHECKPOINT_TIMEOUT (default 30s) caps filesystem checkpoint creation — raise it on huge monorepos.',
# --- Auxiliary Tasks & Image Generation ---
'image_gen.model in config.yaml picks the FAL model: flux-2/klein, gpt-image-2, nano-banana-pro, and more.',
'image_gen.provider routes image generation through a plugin (OpenAI Images, Codex, FAL) instead of the default.',
'AUXILIARY_VISION_BASE_URL + AUXILIARY_VISION_API_KEY point vision analysis at any OpenAI-compatible endpoint.',
'auxiliary.session_search.max_concurrency bounds how many matched sessions are summarized in parallel (default 3).',
'auxiliary.session_search.extra_body forwards provider-specific OpenAI-compatible fields on summarization calls.',
# --- Security ---
'security.tirith_fail_open: false makes Hermes block commands when the tirith scanner itself errors out.',
'TIRITH_FAIL_OPEN env var overrides the tirith_fail_open config — a quick toggle without editing config.yaml.',
# --- Sessions & Source Tags ---
'--source tool chats are excluded from hermes sessions list by default — set --source explicitly to see them.',
'Session IDs are timestamp-prefixed (20250305_091523_abcd) so sorting works naturally in ls and jq.',
# --- Misc ---
'API_SERVER_MODEL_NAME customizes the model name on /v1/models — essential for multi-profile Open WebUI setups.',
'Dashboard plugins are served from /dashboard-plugins/<name>/ — drop files into ~/.hermes/dashboard-plugins/.',
]

View file

@ -299,6 +299,32 @@ TOOL_CATEGORIES = {
{"key": "FIRECRAWL_API_URL", "prompt": "Your Firecrawl instance URL (e.g., http://localhost:3002)"},
],
},
{
"name": "SearXNG",
"badge": "free · self-hosted · search only",
"tag": "Privacy-respecting metasearch engine — search only (pair with any extract provider)",
"web_backend": "searxng",
"env_vars": [
{"key": "SEARXNG_URL", "prompt": "Your SearXNG instance URL (e.g., http://localhost:8080)", "url": "https://searxng.github.io/searxng/"},
],
},
{
"name": "Brave Search (Free Tier)",
"badge": "free tier · search only",
"tag": "2,000 queries/mo free — search only (pair with any extract provider)",
"web_backend": "brave-free",
"env_vars": [
{"key": "BRAVE_SEARCH_API_KEY", "prompt": "Brave Search subscription token", "url": "https://brave.com/search/api/"},
],
},
{
"name": "DuckDuckGo (ddgs)",
"badge": "free · no key · search only",
"tag": "Search via the ddgs Python package — no API key (pair with any extract provider)",
"web_backend": "ddgs",
"env_vars": [],
"post_setup": "ddgs",
},
],
},
"image_gen": {
@ -660,6 +686,32 @@ def _run_post_setup(post_setup_key: str):
_print_info(" Full voice list: https://github.com/OHF-Voice/piper1-gpl/blob/main/docs/VOICES.md")
_print_info(" Switch voices by setting tts.piper.voice in ~/.hermes/config.yaml")
elif post_setup_key == "ddgs":
try:
__import__("ddgs")
_print_success(" ddgs is already installed")
except ImportError:
import subprocess
_print_info(" Installing ddgs (DuckDuckGo search package)...")
try:
result = subprocess.run(
[sys.executable, "-m", "pip", "install", "-U", "ddgs", "--quiet"],
capture_output=True, text=True, timeout=300,
)
if result.returncode == 0:
_print_success(" ddgs installed")
else:
_print_warning(" ddgs install failed:")
_print_info(f" {result.stderr.strip()[:300]}")
_print_info(" Run manually: python -m pip install -U ddgs")
return
except subprocess.TimeoutExpired:
_print_warning(" ddgs install timed out (>5min)")
_print_info(" Run manually: python -m pip install -U ddgs")
return
_print_info(" No API key required. DuckDuckGo enforces server-side rate limits.")
_print_info(" Pair with an extract provider if you also need web_extract.")
elif post_setup_key == "spotify":
# Run the full `hermes auth spotify` flow — if the user has no
# client_id yet, this drops them into the interactive wizard

View file

@ -27,6 +27,192 @@ import sys
import threading
from typing import Any, Callable, Optional
# Modifier aliases mirrored from the TUI parser (``ui-tui/src/lib/platform.ts``)
# ``_MOD_ALIASES`` table — the contract that removes the cross-runtime
# mismatch Copilot flagged in round-9 on #19835.
#
# ``super``/``win``/``windows`` are intentionally absent: prompt_toolkit
# has no super/meta modifier for the Cmd key, so those spellings are
# TUI-only. The normalizer below returns the documented default
# (``c-b``) for them — a silent fallback was preferred to a hard
# startup crash (Copilot round-11). The CLI binding site
# (``_register_voice_handler`` in cli.py) logs a warning when that
# fallback fires so users see why their TUI-only shortcut isn't
# bound in the classic CLI.
_VOICE_MOD_ALIASES = {
"ctrl": "c-",
"control": "c-",
"alt": "a-",
"option": "a-",
"opt": "a-",
}
# Named keys prompt_toolkit accepts in ``c-<name>`` / ``a-<name>`` form.
# Aliases collapse to prompt_toolkit's canonical spelling so the same
# config value binds identically in both runtimes (Copilot round-10 on
# #19835).
_VOICE_NAMED_KEYS = {
"space": "space",
"spc": "space",
"enter": "enter",
"return": "enter",
"ret": "enter",
"tab": "tab",
"escape": "escape",
"esc": "escape",
"backspace": "backspace",
"bs": "backspace",
"delete": "delete",
"del": "delete",
}
# ``useInputHandlers()`` intercepts these before the voice check runs,
# so a binding like ``ctrl+c`` (interrupt), ``ctrl+d`` (quit), or
# ``ctrl+l`` (clear screen) would be advertised in /voice status but
# never fire push-to-talk — the same blocklist the TUI parser uses.
_VOICE_RESERVED_CTRL_CHARS = frozenset({"c", "d", "l"})
# On macOS the classic CLI's prompt_toolkit bindings for copy / exit /
# clear also claim ``a-c`` / ``a-d`` / ``a-l`` via the action-modifier
# lookup, and hermes-ink reports Alt as ``key.meta`` on many terminals.
# Mirror the TUI parser's darwin-only reservation so ``option+c`` etc.
# don't bind Alt+C in the CLI while the TUI silently falls back to
# Ctrl+B (Copilot round-14 on #19835).
_VOICE_RESERVED_ALT_CHARS_MAC = frozenset({"c", "d", "l"})
_DEFAULT_PT_KEY = "c-b"
def voice_record_key_from_config(cfg: Any) -> Any:
"""Shape-safe ``cfg.voice.record_key`` lookup.
``load_config()`` deep-merges raw YAML and preserves scalar
overrides, so a hand-edited ``voice: true`` / ``voice: cmd+b``
leaves ``cfg["voice"]`` as a bool/str instead of a dict, and the
naive ``.get("voice", {}).get("record_key")`` chain raises
AttributeError before voice can even start (Copilot round-11 on
#19835). Return ``None`` for malformed shapes so call sites can
feed the result straight into the normalizer/formatter and get
the documented default.
"""
if not isinstance(cfg, dict):
return None
voice = cfg.get("voice")
if not isinstance(voice, dict):
return None
return voice.get("record_key")
def normalize_voice_record_key_for_prompt_toolkit(raw: Any) -> str:
"""Coerce ``voice.record_key`` into prompt_toolkit's ``c-x`` / ``a-x`` format.
Mirrors the TUI parser contract (``ui-tui/src/lib/platform.ts``)
so one config value binds the same shortcut in both runtimes:
* non-string / empty / typo'd / bare-char / multi-modifier / reserved
``ctrl+c|d|l`` documented default ``c-b``
* single-char keys: ``ctrl+o`` ``c-o``
* named keys: ``ctrl+space`` ``c-space`` (aliases collapse:
``ctrl+return`` ``c-enter``)
* ``super`` / ``win`` / ``windows`` ``c-b`` (TUI-only modifiers
prompt_toolkit has no super mod; the CLI binding site is
expected to warn when this fallback fires so users see the
cross-runtime split, Copilot round-11 on #19835)
"""
if not isinstance(raw, str):
return _DEFAULT_PT_KEY
lowered = raw.strip().lower()
if not lowered:
return _DEFAULT_PT_KEY
parts = [p.strip() for p in lowered.split("+") if p.strip()]
if not parts:
return _DEFAULT_PT_KEY
# Multi-modifier chords like ``ctrl+alt+r`` bind different shortcuts
# in prompt_toolkit (a-c-r form) and hermes-ink rejects them; collapse
# to the documented default instead of silently diverging.
if len(parts) > 2:
return _DEFAULT_PT_KEY
# Bare char / bare named key (no explicit modifier) — the CLI's
# prompt_toolkit binds the raw key without a modifier, which the TUI
# parser refuses; reject here too so both runtimes agree.
if len(parts) == 1:
return _DEFAULT_PT_KEY
modifier_token, key_token = parts
# ``super`` / ``win`` / ``windows`` are TUI-only (prompt_toolkit has
# no super modifier, so ``@kb.add(super+b)`` crashes the CLI at
# startup). Fall back to the documented default here; the CLI
# binding site is expected to log a warning when the configured
# value is one of these spellings so users know the TUI+CLI
# runtimes diverge on that shortcut (Copilot round-11 on #19835).
if modifier_token in {"super", "win", "windows"}:
return _DEFAULT_PT_KEY
normalized_mod = _VOICE_MOD_ALIASES.get(modifier_token)
if not normalized_mod:
return _DEFAULT_PT_KEY
# Single-char key: reject reserved-ctrl chords that the TUI would
# also block at parse time, plus the mac-only alt reservation.
if len(key_token) == 1:
if normalized_mod == "c-" and key_token in _VOICE_RESERVED_CTRL_CHARS:
return _DEFAULT_PT_KEY
if (
normalized_mod == "a-"
and sys.platform == "darwin"
and key_token in _VOICE_RESERVED_ALT_CHARS_MAC
):
return _DEFAULT_PT_KEY
return f"{normalized_mod}{key_token}"
# Multi-char key token must be a known named key; typos like
# ``ctrl+spcae`` fall back to the default rather than being passed
# through as ``c-spcae`` (which prompt_toolkit would reject).
named = _VOICE_NAMED_KEYS.get(key_token)
if not named:
return _DEFAULT_PT_KEY
return f"{normalized_mod}{named}"
def format_voice_record_key_for_status(raw: Any) -> str:
"""Render ``voice.record_key`` for ``/voice status`` in CLI-friendly form.
Mirrors the TUI's ``formatVoiceRecordKey``: returns ``Ctrl+B`` /
``Alt+Space`` / ``Ctrl+Enter``. Malformed configs surface as the
documented default so status never advertises a shortcut that
won't bind (Copilot round-10 on #19835).
"""
normalized = normalize_voice_record_key_for_prompt_toolkit(raw)
if normalized.startswith("c-"):
prefix, key = "Ctrl+", normalized[2:]
elif normalized.startswith("a-"):
prefix, key = "Alt+", normalized[2:]
elif "+" in normalized:
# ``super+<key>`` / ``win+<key>`` — CLI won't bind them, but
# render in title case so status output is still readable.
mod, key = normalized.split("+", 1)
prefix = mod[0].upper() + mod[1:] + "+"
else:
return "Ctrl+B"
if not key:
return prefix.rstrip("+")
if len(key) == 1:
return prefix + key.upper()
return prefix + key[0].upper() + key[1:]
from tools.voice_mode import (
create_audio_recorder,
is_whisper_hallucination,
@ -95,6 +281,8 @@ _recorder_lock = threading.Lock()
# ── Continuous (VAD) state ───────────────────────────────────────────
_continuous_lock = threading.Lock()
_continuous_active = False
_continuous_stopping = False
_continuous_auto_restart: bool = True
_continuous_recorder: Any = None
# ── TTS-vs-STT feedback guard ────────────────────────────────────────
@ -184,32 +372,43 @@ def start_continuous(
on_silent_limit: Optional[Callable[[], None]] = None,
silence_threshold: int = 200,
silence_duration: float = 3.0,
) -> None:
auto_restart: bool = True,
) -> bool:
"""Start a VAD-driven continuous recording loop.
The loop calls ``on_transcript(text)`` each time speech is detected and
transcribed successfully, then auto-restarts. After
``_CONTINUOUS_NO_SPEECH_LIMIT`` consecutive silent cycles (no speech
picked up at all) the loop stops itself and calls ``on_silent_limit``
so the UI can reflect "voice off". Idempotent calling while already
active is a no-op.
transcribed successfully. If ``auto_restart`` is True, it auto-restarts
for the next turn and resets the no-speech counter for that loop. If
``auto_restart`` is False, the first silence-triggered transcription ends
the loop and reports ``"idle"``; no-speech counts are retained across
starts so a push-to-talk caller can still enforce the three-strikes guard.
After ``_CONTINUOUS_NO_SPEECH_LIMIT`` consecutive silent cycles (no speech
picked up at all) the loop stops itself and calls ``on_silent_limit`` so the
UI can reflect "voice off". Returns False if a previous stop is still
transcribing/cleaning up; otherwise returns True. Idempotent calling while
already active is a successful no-op.
``on_status`` is called with ``"listening"`` / ``"transcribing"`` /
``"idle"`` so the UI can show a live indicator.
"""
global _continuous_active, _continuous_recorder
global _continuous_active, _continuous_recorder, _continuous_auto_restart
global _continuous_on_transcript, _continuous_on_status, _continuous_on_silent_limit
global _continuous_no_speech_count
with _continuous_lock:
if _continuous_active:
_debug("start_continuous: already active — no-op")
return
return True
if _continuous_stopping:
_debug("start_continuous: stop/transcribe in progress — busy")
return False
_continuous_active = True
_continuous_auto_restart = auto_restart
_continuous_on_transcript = on_transcript
_continuous_on_status = on_status
_continuous_on_silent_limit = on_silent_limit
_continuous_no_speech_count = 0
if auto_restart:
_continuous_no_speech_count = 0
if _continuous_recorder is None:
_continuous_recorder = create_audio_recorder()
@ -242,15 +441,18 @@ def start_continuous(
except Exception:
pass
return True
def stop_continuous() -> None:
def stop_continuous(force_transcribe: bool = False) -> None:
"""Stop the active continuous loop and release the microphone.
Idempotent calling while not active is a no-op. Any in-flight
transcription completes but its result is discarded (the callback
checks ``_continuous_active`` before firing).
Idempotent calling while not active is a no-op. If ``force_transcribe`` is
True, the recorder stops synchronously, then transcription/cleanup runs on a
background thread before reporting ``"idle"``. Otherwise the buffer is
discarded.
"""
global _continuous_active, _continuous_on_transcript
global _continuous_active, _continuous_on_transcript, _continuous_stopping
global _continuous_on_status, _continuous_on_silent_limit
global _continuous_recorder, _continuous_no_speech_count
@ -260,18 +462,98 @@ def stop_continuous() -> None:
_continuous_active = False
rec = _continuous_recorder
on_status = _continuous_on_status
on_transcript = _continuous_on_transcript
on_silent_limit = _continuous_on_silent_limit
auto_restart = _continuous_auto_restart
track_no_speech = force_transcribe and not auto_restart
_continuous_stopping = rec is not None
_continuous_on_transcript = None
_continuous_on_status = None
_continuous_on_silent_limit = None
_continuous_no_speech_count = 0
if not track_no_speech:
_continuous_no_speech_count = 0
if rec is not None:
try:
# cancel() (not stop()) discards buffered frames — the loop
# is over, we don't want to transcribe a half-captured turn.
rec.cancel()
except Exception as e:
logger.warning("failed to cancel recorder: %s", e)
if force_transcribe and on_transcript:
if on_status:
try:
on_status("transcribing")
except Exception:
pass
try:
wav_path = rec.stop()
except Exception as e:
logger.warning("failed to stop recorder: %s", e)
try:
rec.cancel()
except Exception as cancel_error:
logger.warning("failed to cancel recorder: %s", cancel_error)
wav_path = None
def _transcribe_and_cleanup():
global _continuous_no_speech_count, _continuous_stopping
transcript: Optional[str] = None
should_halt = False
try:
if wav_path:
try:
result = transcribe_recording(wav_path)
if result.get("success"):
text = (result.get("transcript") or "").strip()
if text and not is_whisper_hallucination(text):
transcript = text
finally:
if os.path.isfile(wav_path):
os.unlink(wav_path)
except Exception as e:
logger.warning("failed to stop/transcribe recorder: %s", e)
finally:
if transcript:
try:
on_transcript(transcript)
except Exception as e:
logger.warning("on_transcript callback raised: %s", e)
if track_no_speech:
with _continuous_lock:
if transcript:
_continuous_no_speech_count = 0
else:
_continuous_no_speech_count += 1
should_halt = (
_continuous_no_speech_count
>= _CONTINUOUS_NO_SPEECH_LIMIT
)
if should_halt:
_continuous_no_speech_count = 0
if should_halt and on_silent_limit:
try:
on_silent_limit()
except Exception:
pass
_play_beep(frequency=660, count=2)
with _continuous_lock:
_continuous_stopping = False
if on_status:
try:
on_status("idle")
except Exception:
pass
threading.Thread(target=_transcribe_and_cleanup, daemon=True).start()
return
else:
try:
# cancel() (not stop()) discards buffered frames — the loop
# is over, we don't want to transcribe a half-captured turn.
rec.cancel()
except Exception as e:
logger.warning("failed to cancel recorder: %s", e)
with _continuous_lock:
_continuous_stopping = False
# Audible "recording stopped" cue (CLI parity: same 660 Hz × 2 the
# silence-auto-stop path plays).
@ -417,23 +699,39 @@ def _continuous_on_silence() -> None:
_debug("_continuous_on_silence: stopped while waiting for TTS")
return
# Restart for the next turn.
_debug(f"_continuous_on_silence: restarting loop (no_speech={no_speech})")
_play_beep(frequency=880, count=1)
try:
rec.start(on_silence_stop=_continuous_on_silence)
except Exception as e:
logger.error("failed to restart continuous recording: %s", e)
_debug(f"_continuous_on_silence: restart raised {type(e).__name__}: {e}")
if _continuous_auto_restart:
# Restart for the next turn.
_debug(f"_continuous_on_silence: restarting loop (no_speech={no_speech})")
_play_beep(frequency=880, count=1)
try:
rec.start(on_silence_stop=_continuous_on_silence)
except Exception as e:
logger.error("failed to restart continuous recording: %s", e)
_debug(f"_continuous_on_silence: restart raised {type(e).__name__}: {e}")
with _continuous_lock:
_continuous_active = False
if on_status:
try:
on_status("idle")
except Exception:
pass
return
if on_status:
try:
on_status("listening")
except Exception:
pass
else:
# Do not auto-restart. Clean up state and notify idle.
_debug("_continuous_on_silence: auto_restart=False, stopping loop")
with _continuous_lock:
_continuous_active = False
return
if on_status:
try:
on_status("listening")
except Exception:
pass
if on_status:
try:
on_status("idle")
except Exception:
pass
# ── TTS API ──────────────────────────────────────────────────────────

View file

@ -52,7 +52,7 @@ from gateway.status import get_running_pid, read_runtime_status
try:
from fastapi import FastAPI, HTTPException, Request, WebSocket, WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, HTMLResponse, JSONResponse
from fastapi.responses import FileResponse, HTMLResponse, JSONResponse, Response
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
except ImportError:
@ -1877,8 +1877,8 @@ async def _start_device_code_flow(provider_id: str) -> Dict[str, Any]:
name=f"oauth-codex-{sid[:6]}",
).start()
# Block briefly until the worker has populated the user_code, OR error.
deadline = time.time() + 10
while time.time() < deadline:
deadline = time.monotonic() + 10
while time.monotonic() < deadline:
with _oauth_sessions_lock:
s = _oauth_sessions.get(sid)
if s and (s.get("user_code") or s["status"] != "pending"):
@ -2012,10 +2012,10 @@ def _codex_full_login_worker(session_id: str) -> None:
sess["expires_at"] = time.time() + sess["expires_in"]
# Step 2: poll until authorized
deadline = time.time() + sess["expires_in"]
deadline = time.monotonic() + sess["expires_in"]
code_resp = None
with httpx.Client(timeout=httpx.Timeout(15.0)) as client:
while time.time() < deadline:
while time.monotonic() < deadline:
time.sleep(poll_interval)
poll = client.post(
f"{issuer}/api/accounts/deviceauth/token",
@ -2173,6 +2173,83 @@ async def cancel_oauth_session(session_id: str, request: Request):
# ---------------------------------------------------------------------------
def _session_latest_descendant(session_id: str):
"""Resolve a session id to the newest child leaf session.
/model may create child sessions. Dashboard refresh should continue the
newest child instead of reopening the old parent.
"""
from hermes_state import SessionDB
def row_get(row, key, index):
if isinstance(row, dict):
return row.get(key)
try:
return row[key]
except Exception:
try:
return row[index]
except Exception:
return None
db = SessionDB()
try:
sid = db.resolve_session_id(session_id)
if not sid or not db.get_session(sid):
return None, []
conn = (
getattr(db, "conn", None)
or getattr(db, "_conn", None)
or getattr(db, "connection", None)
or getattr(db, "_connection", None)
)
rows = []
if conn is not None:
raw_rows = conn.execute(
"SELECT id, parent_session_id, started_at FROM sessions"
).fetchall()
for row in raw_rows:
rows.append({
"id": row_get(row, "id", 0),
"parent_session_id": row_get(row, "parent_session_id", 1),
"started_at": row_get(row, "started_at", 2),
})
else:
rows = db.list_sessions_rich(limit=10000, offset=0)
children = {}
for row in rows:
rid = row.get("id")
parent = row.get("parent_session_id")
if rid and parent:
children.setdefault(parent, []).append(row)
def started(row):
try:
return float(row.get("started_at") or 0)
except Exception:
return 0.0
current = sid
path = [sid]
seen = {sid}
while children.get(current):
candidates = [r for r in children[current] if r.get("id") not in seen]
if not candidates:
break
candidates.sort(key=started, reverse=True)
current = candidates[0]["id"]
path.append(current)
seen.add(current)
return current, path
finally:
db.close()
@app.get("/api/sessions/{session_id}")
async def get_session_detail(session_id: str):
from hermes_state import SessionDB
@ -2187,6 +2264,19 @@ async def get_session_detail(session_id: str):
db.close()
@app.get("/api/sessions/{session_id}/latest-descendant")
async def get_session_latest_descendant(session_id: str):
latest, path = _session_latest_descendant(session_id)
if not latest:
raise HTTPException(status_code=404, detail="Session not found")
return {
"requested_session_id": path[0] if path else session_id,
"session_id": latest,
"path": path,
"changed": bool(path and latest != path[0]),
}
@app.get("/api/sessions/{session_id}/messages")
async def get_session_messages(session_id: str):
from hermes_state import SessionDB
@ -2366,6 +2456,7 @@ async def delete_cron_job(job_id: str):
class ProfileCreate(BaseModel):
name: str
clone_from_default: bool = False
no_skills: bool = False
class ProfileRename(BaseModel):
@ -2471,11 +2562,13 @@ async def create_profile_endpoint(body: ProfileCreate):
name=body.name,
clone_from="default" if body.clone_from_default else None,
clone_config=body.clone_from_default,
no_skills=body.no_skills,
)
# Match the CLI's profile-create flow: fresh named profiles get the
# bundled skills installed. When cloning from default, create_profile()
# has already copied the source profile's skills, including any
# user-installed skills.
# user-installed skills. When no_skills=True, create_profile() wrote
# the opt-out marker and seed_profile_skills() will no-op.
if not body.clone_from_default:
profiles_mod.seed_profile_skills(path, quiet=True)
@ -2946,8 +3039,18 @@ def _resolve_chat_argv(
argv, cwd = _make_tui_argv(PROJECT_ROOT / "ui-tui", tui_dev=False)
env = os.environ.copy()
env.setdefault("NODE_ENV", "production")
# Browser-embedded chat should prefer stable wheel-based scrollback over
# native terminal mouse tracking. When mouse tracking is enabled, wheel
# events are consumed by the TUI and forwarded as terminal input, which
# makes browser-side transcript scrolling feel broken. Keep the terminal
# build unchanged for native CLI usage; only disable mouse tracking for
# the dashboard PTY path.
env.setdefault("HERMES_TUI_DISABLE_MOUSE", "1")
if resume:
latest_resume, _latest_path = _session_latest_descendant(resume)
if latest_resume:
resume = latest_resume
env["HERMES_TUI_RESUME"] = resume
if sidecar_url:
@ -3205,12 +3308,42 @@ async def events_ws(ws: WebSocket) -> None:
_event_channels.pop(channel, None)
def _normalise_prefix(raw: Optional[str]) -> str:
"""Normalise an X-Forwarded-Prefix header value.
Returns a string like ``"/hermes"`` (no trailing slash) or ``""`` when
no prefix is set / the header is malformed. We deliberately reject
anything containing ``..`` or non-printable bytes so a hostile proxy
can't inject HTML via the prefix.
"""
if not raw:
return ""
p = raw.strip()
if not p:
return ""
if not p.startswith("/"):
p = "/" + p
p = p.rstrip("/")
if "//" in p or ".." in p or any(c in p for c in ('"', "'", "<", ">", " ", "\n", "\r", "\t")):
return ""
if len(p) > 64:
return ""
return p
def mount_spa(application: FastAPI):
"""Mount the built SPA. Falls back to index.html for client-side routing.
The session token is injected into index.html via a ``<script>`` tag so
the SPA can authenticate against protected API endpoints without a
separate (unauthenticated) token-dispensing endpoint.
When served behind a path-prefix reverse proxy (e.g.
``mission-control.tilos.com/hermes/*`` -> local Caddy -> :9119), the
proxy injects ``X-Forwarded-Prefix: /hermes`` on every request. We
rewrite the served ``index.html`` so absolute asset URLs (``/assets/...``)
and the SPA's runtime ``__HERMES_BASE_PATH__`` honour that prefix
without rebuilding the bundle.
"""
if not WEB_DIST.exists():
@application.get("/{full_path:path}")
@ -3223,24 +3356,62 @@ def mount_spa(application: FastAPI):
_index_path = WEB_DIST / "index.html"
def _serve_index():
"""Return index.html with the session token injected."""
def _serve_index(prefix: str = ""):
"""Return index.html with the session token + base-path injected.
``prefix`` is the normalised ``X-Forwarded-Prefix`` (e.g. ``/hermes``)
or empty string when served at root.
"""
html = _index_path.read_text()
chat_js = "true" if _DASHBOARD_EMBEDDED_CHAT_ENABLED else "false"
token_script = (
f'<script>window.__HERMES_SESSION_TOKEN__="{_SESSION_TOKEN}";'
f"window.__HERMES_DASHBOARD_EMBEDDED_CHAT__={chat_js};</script>"
f"window.__HERMES_DASHBOARD_EMBEDDED_CHAT__={chat_js};"
f'window.__HERMES_BASE_PATH__="{prefix}";</script>'
)
if prefix:
# Rewrite absolute asset URLs baked into the Vite build so the
# browser fetches them through the same proxy prefix.
html = html.replace('href="/assets/', f'href="{prefix}/assets/')
html = html.replace('src="/assets/', f'src="{prefix}/assets/')
html = html.replace('href="/favicon.ico"', f'href="{prefix}/favicon.ico"')
html = html.replace('href="/fonts/', f'href="{prefix}/fonts/')
html = html.replace('href="/ds-assets/', f'href="{prefix}/ds-assets/')
html = html.replace('src="/ds-assets/', f'src="{prefix}/ds-assets/')
html = html.replace("</head>", f"{token_script}</head>", 1)
return HTMLResponse(
html,
headers={"Cache-Control": "no-store, no-cache, must-revalidate"},
)
# When served behind a path-prefix proxy, the built CSS contains
# absolute ``url(/fonts/...)`` and ``url(/ds-assets/...)`` references.
# Browsers resolve those against the document origin, which means
# under ``/hermes`` they'd hit ``mission-control.tilos.com/fonts/...``
# (the MC Pages app), not the Hermes backend. Intercept CSS asset
# requests BEFORE the StaticFiles mount and rewrite the absolute paths
# when a prefix is in play.
@application.get("/assets/{filename}.css")
async def serve_css(filename: str, request: Request):
css_path = WEB_DIST / "assets" / f"{filename}.css"
if not css_path.is_file() or not css_path.resolve().is_relative_to(
WEB_DIST.resolve()
):
return JSONResponse({"error": "not found"}, status_code=404)
prefix = _normalise_prefix(request.headers.get("x-forwarded-prefix"))
css = css_path.read_text()
if prefix:
for asset_dir in ("/fonts/", "/fonts-terminal/", "/ds-assets/", "/assets/"):
css = css.replace(f"url({asset_dir}", f"url({prefix}{asset_dir}")
css = css.replace(f"url(\"{asset_dir}", f"url(\"{prefix}{asset_dir}")
css = css.replace(f"url('{asset_dir}", f"url('{prefix}{asset_dir}")
return Response(content=css, media_type="text/css")
application.mount("/assets", StaticFiles(directory=WEB_DIST / "assets"), name="assets")
@application.get("/{full_path:path}")
async def serve_spa(full_path: str):
async def serve_spa(full_path: str, request: Request):
prefix = _normalise_prefix(request.headers.get("x-forwarded-prefix"))
file_path = WEB_DIST / full_path
# Prevent path traversal via url-encoded sequences (%2e%2e/)
if (
@ -3250,7 +3421,7 @@ def mount_spa(application: FastAPI):
and file_path.is_file()
):
return FileResponse(file_path)
return _serve_index()
return _serve_index(prefix)
# ---------------------------------------------------------------------------
@ -3260,8 +3431,9 @@ def mount_spa(application: FastAPI):
# Built-in dashboard themes — label + description only. The actual color
# definitions live in the frontend (web/src/themes/presets.ts).
_BUILTIN_DASHBOARD_THEMES = [
{"name": "default", "label": "Hermes Teal", "description": "Classic dark teal — the canonical Hermes look"},
{"name": "midnight", "label": "Midnight", "description": "Deep blue-violet with cool accents"},
{"name": "default", "label": "Hermes Teal", "description": "Classic dark teal — the canonical Hermes look"},
{"name": "default-large", "label": "Hermes Teal (Large)", "description": "Hermes Teal with bigger fonts and roomier spacing"},
{"name": "midnight", "label": "Midnight", "description": "Deep blue-violet with cool accents"},
{"name": "ember", "label": "Ember", "description": "Warm crimson and bronze — forge vibes"},
{"name": "mono", "label": "Mono", "description": "Clean grayscale — minimal and focused"},
{"name": "cyberpunk", "label": "Cyberpunk", "description": "Neon green on black — matrix terminal"},