fix(types): batch P1 ty hotfixes + run_agent.py annotation pass

15 P1 ship-stopper runtime bugs from the ty triage plus the cross-bucket
cleanup in run_agent.py. Net: -138 ty diagnostics (1953 -> 1815). Major
wins on not-subscriptable (-34), unresolved-attribute (-29),
invalid-argument-type (-26), invalid-type-form (-20),
unsupported-operator
(-18), invalid-key (-9).

Missing refs (structural):
- tools/rl_training_tool.py: RunState dataclass gains api_log_file,
  trainer_log_file, env_log_file fields; stop-run was closing undeclared
  handles.
- agent/credential_pool.py: remove_entry(entry_id) added, symmetric with
  add_entry; used by hermes_cli/web_server.py OAuth dashboard cleanup.
- hermes_cli/config.py: _CamofoxConfig TypedDict defined (was referenced
  by _BrowserConfig but never declared).
- hermes_cli/gateway.py: _setup_wecom_callback() added, mirroring
  _setup_wecom().
- tui_gateway/server.py: skills_hub imports corrected from
  hermes_cli.skills_hub -> tools.skills_hub.

Typo / deprecation:
- tools/transcription_tools.py: os.sys.modules -> sys.modules.
- gateway/platforms/bluebubbles.py: datetime.utcnow() ->
  datetime.now(timezone.utc).

None-guards:
- gateway/platforms/telegram.py:~2798 - msg.sticker None guard.
- gateway/platforms/discord.py:3602/3637 - interaction.data None +
  SelectMenu narrowing; :3009 - thread_id None before `in`; :1893 -
  guild.member_count None.
- gateway/platforms/matrix.py:2174/2185 - walrus-narrow
  re.search().group().
- agent/display.py:732 - start_time None before elapsed subtraction.
- gateway/run.py:10334 - assert _agent_timeout is not None before `//
  60`.

Platform override signature match:
- gateway/platforms/email.py: send_image accepts metadata kwarg;
  send_document accepts **kwargs (matches base class).

run_agent.py annotation pass:
- callable/any -> Callable/Any in annotation position (15 sites in
  run_agent.py + 5 in cli.py, toolset_distributions.py,
  tools/delegate_tool.py, hermes_cli/dingtalk_auth.py,
  tui_gateway/server.py).
- conversation_history param widened to list[dict[str, Any]] | None.
- OMIT_TEMPERATURE sentinel guarded from leaking into
  call_llm(temperature): kwargs-dict pattern at run_agent.py:7337 +
  scripts/trajectory_compressor.py:618/688.
- build_anthropic_client(timeout) widened to Optional[float].

Tests:
- tests/agent/test_credential_pool.py: remove_entry (id match,
  unknown-id, priority renumbering).
- tests/hermes_cli/test_config_shapes.py: _CamofoxConfig shape +
  nesting.
- tests/tools/test_rl_training_tool.py: RunState log_file fields.
This commit is contained in:
alt-glitch 2026-04-21 20:20:13 +05:30
parent fb6d37495b
commit 15ac253b11
24 changed files with 1726 additions and 254 deletions

View file

@ -292,7 +292,7 @@ def _common_betas_for_base_url(base_url: str | None) -> list[str]:
return _COMMON_BETAS
def build_anthropic_client(api_key: str, base_url: str = None, timeout: float = None):
def build_anthropic_client(api_key: str, base_url: str = None, timeout: Optional[float] = None):
"""Create an Anthropic client, auto-detecting setup-tokens vs API keys.
If *timeout* is provided it overrides the default 900s read timeout. The

View file

@ -876,6 +876,20 @@ class CredentialPool:
self._current_id = None
return removed
def remove_entry(self, entry_id: str) -> Optional[PooledCredential]:
for idx, entry in enumerate(self._entries):
if entry.id == entry_id:
removed = self._entries.pop(idx)
self._entries = [
replace(e, priority=new_priority)
for new_priority, e in enumerate(self._entries)
]
self._persist()
if self._current_id == removed.id:
self._current_id = None
return removed
return None
def resolve_target(self, target: Any) -> Tuple[Optional[int], Optional[PooledCredential], Optional[str]]:
raw = str(target or "").strip()
if not raw:

View file

@ -729,6 +729,8 @@ class KawaiiSpinner:
time.sleep(0.1)
continue
frame = self.spinner_frames[self.frame_idx % len(self.spinner_frames)]
if self.start_time is None:
continue
elapsed = time.time() - self.start_time
if wings:
left, right = wings[self.frame_idx % len(wings)]

2
cli.py
View file

@ -1659,7 +1659,7 @@ def _parse_skills_argument(skills: str | list[str] | tuple[str, ...] | None) ->
return parsed
def save_config_value(key_path: str, value: any) -> bool:
def save_config_value(key_path: str, value: Any) -> bool:
"""
Save a value to the active config file at the specified key path.

View file

@ -14,7 +14,7 @@ import logging
import os
import re
import uuid
from datetime import datetime
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from urllib.parse import quote
@ -377,7 +377,7 @@ class BlueBubblesAdapter(BasePlatformAdapter):
payload = {
"addresses": [address],
"message": message,
"tempGuid": f"temp-{datetime.utcnow().timestamp()}",
"tempGuid": f"temp-{datetime.now(timezone.utc).timestamp()}",
}
try:
res = await self._api_post("/api/v1/chat/new", payload)
@ -417,7 +417,7 @@ class BlueBubblesAdapter(BasePlatformAdapter):
)
payload: Dict[str, Any] = {
"chatGuid": guid,
"tempGuid": f"temp-{datetime.utcnow().timestamp()}",
"tempGuid": f"temp-{datetime.now(timezone.utc).timestamp()}",
"message": chunk,
}
if reply_to and self._private_api_enabled and self._helper_connected:

View file

@ -1890,7 +1890,7 @@ class DiscordAdapter(BasePlatformAdapter):
# Fetch full member list (requires members intent)
try:
members = guild.members
if len(members) < guild.member_count:
if guild.member_count is not None and len(members) < guild.member_count:
members = [m async for m in guild.fetch_members(limit=None)]
except Exception as e:
logger.warning("Failed to fetch members for guild %s: %s", guild.name, e)
@ -3006,7 +3006,7 @@ class DiscordAdapter(BasePlatformAdapter):
# Skip the mention check if the message is in a thread where
# the bot has previously participated (auto-created or replied in).
in_bot_thread = is_thread and thread_id in self._threads
in_bot_thread = is_thread and thread_id is not None and thread_id in self._threads
if require_mention and not is_free_channel and not in_bot_thread:
if self._client.user not in message.mentions and not mention_prefix:
@ -3599,7 +3599,9 @@ if DISCORD_AVAILABLE:
)
return
provider_slug = interaction.data["values"][0] # ty: ignore[not-subscriptable]
if interaction.data is None:
return
provider_slug = interaction.data["values"][0] # ty: ignore[invalid-key]
self._selected_provider = provider_slug
provider = next(
(p for p in self.providers if p["slug"] == provider_slug), None
@ -3634,7 +3636,9 @@ if DISCORD_AVAILABLE:
return
self.resolved = True
model_id = interaction.data["values"][0] # ty: ignore[not-subscriptable]
if interaction.data is None:
return
model_id = interaction.data["values"][0] # ty: ignore[invalid-key]
try:
result_text = await self.on_model_selected(

View file

@ -532,6 +532,7 @@ class EmailAdapter(BasePlatformAdapter):
image_url: str,
caption: Optional[str] = None,
reply_to: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> SendResult:
"""Send an image URL as part of an email body."""
text = caption or ""
@ -545,6 +546,7 @@ class EmailAdapter(BasePlatformAdapter):
caption: Optional[str] = None,
file_name: Optional[str] = None,
reply_to: Optional[str] = None,
**kwargs,
) -> SendResult:
"""Send a file as an email attachment."""
try:

View file

@ -2170,8 +2170,8 @@ class MatrixAdapter(BasePlatformAdapter):
ul_match = re.match(r"^[\s]*[-*+]\s+(.+)$", line)
if ul_match:
items = []
while i < len(lines) and re.match(r"^[\s]*[-*+]\s+(.+)$", lines[i]):
items.append(re.match(r"^[\s]*[-*+]\s+(.+)$", lines[i]).group(1))
while i < len(lines) and (m := re.match(r"^[\s]*[-*+]\s+(.+)$", lines[i])):
items.append(m.group(1))
i += 1
li = "".join(f"<li>{item}</li>" for item in items)
out_lines.append(f"<ul>{li}</ul>")
@ -2181,8 +2181,8 @@ class MatrixAdapter(BasePlatformAdapter):
ol_match = re.match(r"^[\s]*\d+[.)]\s+(.+)$", line)
if ol_match:
items = []
while i < len(lines) and re.match(r"^[\s]*\d+[.)]\s+(.+)$", lines[i]):
items.append(re.match(r"^[\s]*\d+[.)]\s+(.+)$", lines[i]).group(1))
while i < len(lines) and (m := re.match(r"^[\s]*\d+[.)]\s+(.+)$", lines[i])):
items.append(m.group(1))
i += 1
li = "".join(f"<li>{item}</li>" for item in items)
out_lines.append(f"<ol>{li}</ol>")

View file

@ -2796,6 +2796,8 @@ class TelegramAdapter(BasePlatformAdapter):
)
sticker = msg.sticker
if sticker is None:
return
emoji = sticker.emoji or ""
set_name = sticker.set_name or ""

View file

@ -10331,6 +10331,7 @@ class GatewayRunner:
if _timed_out_agent and hasattr(_timed_out_agent, "interrupt"):
_timed_out_agent.interrupt(_INTERRUPT_REASON_TIMEOUT)
assert _agent_timeout is not None # narrowed by _idle_secs >= _agent_timeout above
_timeout_mins = int(_agent_timeout // 60) or 1
# Construct a user-facing message with diagnostic context.

View file

@ -373,6 +373,10 @@ class _TerminalConfig(TypedDict):
persistent_shell: bool
class _CamofoxConfig(TypedDict, total=False):
managed_persistence: bool
class _BrowserConfig(TypedDict):
inactivity_timeout: int
command_timeout: int

View file

@ -18,7 +18,7 @@ import os
import sys
import time
import logging
from typing import Optional, Tuple
from typing import Any, Callable, Optional, Tuple
import requests
@ -108,7 +108,7 @@ def wait_for_registration_success(
device_code: str,
interval: int = 3,
expires_in: int = 7200,
on_waiting: Optional[callable] = None,
on_waiting: Optional[Callable[..., Any]] = None,
) -> Tuple[str, str]:
"""Block until the registration succeeds or times out.

View file

@ -2644,6 +2644,12 @@ def _setup_wecom():
_setup_standard_platform(wecom_platform)
def _setup_wecom_callback():
"""Configure WeCom Callback (self-built app) via the standard platform setup."""
wecom_platform = next(p for p in _PLATFORMS if p["key"] == "wecom_callback")
_setup_standard_platform(wecom_platform)
def _is_service_installed() -> bool:
"""Check if the gateway is installed as a system service."""
if supports_systemd_services():

View file

@ -37,7 +37,7 @@ import time
import threading
from types import SimpleNamespace
import uuid
from typing import List, Dict, Any, Optional, TYPE_CHECKING
from typing import Callable, List, Dict, Any, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from agent.rate_limit_tracker import RateLimitState
@ -736,17 +736,17 @@ class AIAgent:
provider_require_parameters: bool = False,
provider_data_collection: str = None,
session_id: str = None,
tool_progress_callback: callable = None,
tool_start_callback: callable = None,
tool_complete_callback: callable = None,
thinking_callback: callable = None,
reasoning_callback: callable = None,
clarify_callback: callable = None,
step_callback: callable = None,
stream_delta_callback: callable = None,
interim_assistant_callback: callable = None,
tool_gen_callback: callable = None,
status_callback: callable = None,
tool_progress_callback: Callable[..., Any] = None,
tool_start_callback: Callable[..., Any] = None,
tool_complete_callback: Callable[..., Any] = None,
thinking_callback: Callable[..., Any] = None,
reasoning_callback: Callable[..., Any] = None,
clarify_callback: Callable[..., Any] = None,
step_callback: Callable[..., Any] = None,
stream_delta_callback: Callable[..., Any] = None,
interim_assistant_callback: Callable[..., Any] = None,
tool_gen_callback: Callable[..., Any] = None,
status_callback: Callable[..., Any] = None,
max_tokens: int = None,
reasoning_config: Dict[str, Any] = None,
service_tier: str = None,
@ -4688,7 +4688,7 @@ class AIAgent:
def _close_request_openai_client(self, client: Any, *, reason: str) -> None:
self._close_openai_client(client, reason=reason, shared=False)
def _run_codex_stream(self, api_kwargs: dict, client: Any = None, on_first_delta: callable = None):
def _run_codex_stream(self, api_kwargs: dict, client: Any = None, on_first_delta: Callable[..., Any] = None):
"""Execute one streaming Responses API request and return the final response."""
import httpx as _httpx
@ -5384,7 +5384,7 @@ class AIAgent:
)
def _interruptible_streaming_api_call(
self, api_kwargs: dict, *, on_first_delta: callable = None
self, api_kwargs: dict, *, on_first_delta: Callable[..., Any] = None
):
"""Streaming variant of _interruptible_api_call for real-time token delivery.
@ -7334,12 +7334,15 @@ class AIAgent:
_flush_temperature = _fixed_temp
else:
_flush_temperature = 0.3
_flush_llm_kwargs: dict = {}
if _flush_temperature is not None:
_flush_llm_kwargs["temperature"] = _flush_temperature
try:
response = _call_llm(
task="flush_memories",
messages=api_messages,
tools=[memory_tool_def],
temperature=_flush_temperature,
**_flush_llm_kwargs,
max_tokens=5120,
# timeout resolved from auxiliary.flush_memories.timeout config
)
@ -8531,9 +8534,9 @@ class AIAgent:
self,
user_message: str,
system_message: str = None,
conversation_history: List[Dict[str, Any]] = None,
conversation_history: List[Dict[str, Any]] | None = None,
task_id: str = None,
stream_callback: Optional[callable] = None,
stream_callback: Optional[Callable[..., Any]] = None,
persist_user_message: Optional[str] = None,
) -> Dict[str, Any]:
"""
@ -11783,7 +11786,7 @@ class AIAgent:
return result
def chat(self, message: str, stream_callback: Optional[callable] = None) -> str:
def chat(self, message: str, stream_callback: Optional[Callable[..., Any]] = None) -> str:
"""
Simple chat interface that returns just the final response.

View file

@ -611,11 +611,14 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
if getattr(self, '_use_call_llm', False):
from agent.auxiliary_client import call_llm
_call_llm_kwargs: dict = {}
if summary_temperature is not None:
_call_llm_kwargs["temperature"] = summary_temperature
response = call_llm(
provider=self._llm_provider,
model=self.config.summarization_model,
messages=[{"role": "user", "content": prompt}],
temperature=summary_temperature,
**_call_llm_kwargs,
max_tokens=self.config.summary_target_tokens * 2,
)
else:
@ -627,14 +630,14 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
if summary_temperature is not None:
_create_kwargs["temperature"] = summary_temperature
response = self.client.chat.completions.create(**_create_kwargs)
summary = self._coerce_summary_content(response.choices[0].message.content)
return self._ensure_summary_prefix(summary)
except Exception as e:
metrics.summarization_errors += 1
self.logger.warning(f"Summarization attempt {attempt + 1} failed: {e}")
if attempt < self.config.max_retries - 1:
time.sleep(jittered_backoff(attempt + 1, base_delay=self.config.retry_delay, max_delay=30.0))
else:
@ -681,11 +684,14 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
if getattr(self, '_use_call_llm', False):
from agent.auxiliary_client import async_call_llm
_async_llm_kwargs: dict = {}
if summary_temperature is not None:
_async_llm_kwargs["temperature"] = summary_temperature
response = await async_call_llm(
provider=self._llm_provider,
model=self.config.summarization_model,
messages=[{"role": "user", "content": prompt}],
temperature=summary_temperature,
**_async_llm_kwargs,
max_tokens=self.config.summary_target_tokens * 2,
)
else:
@ -697,14 +703,14 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
if summary_temperature is not None:
_create_kwargs["temperature"] = summary_temperature
response = await self._get_async_client().chat.completions.create(**_create_kwargs)
summary = self._coerce_summary_content(response.choices[0].message.content)
return self._ensure_summary_prefix(summary)
except Exception as e:
metrics.summarization_errors += 1
self.logger.warning(f"Summarization attempt {attempt + 1} failed: {e}")
if attempt < self.config.max_retries - 1:
await asyncio.sleep(jittered_backoff(attempt + 1, base_delay=self.config.retry_delay, max_delay=30.0))
else:

View file

@ -1162,3 +1162,79 @@ def test_load_pool_does_not_seed_qwen_oauth_when_no_token(tmp_path, monkeypatch)
assert not pool.has_credentials()
assert pool.entries() == []
# ---------------------------------------------------------------------------
# Tests for CredentialPool.remove_entry (added in commit fc00f699)
# ---------------------------------------------------------------------------
def _build_pool_with_entries(tmp_path, monkeypatch, provider="openrouter", entries=None):
"""Helper: build a CredentialPool directly without seeding side-effects."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path / "hermes"))
monkeypatch.setattr("agent.credential_pool._seed_from_singletons", lambda p, e: (False, set()))
monkeypatch.setattr("agent.credential_pool._seed_from_env", lambda p, e: (False, set()))
if entries is None:
entries = [
{
"id": "cred-1",
"label": "primary",
"auth_type": "api_key",
"priority": 0,
"source": "manual",
"access_token": "tok-1",
},
{
"id": "cred-2",
"label": "secondary",
"auth_type": "api_key",
"priority": 1,
"source": "manual",
"access_token": "tok-2",
},
]
_write_auth_store(tmp_path, {"version": 1, "credential_pool": {provider: entries}})
from agent.credential_pool import load_pool
return load_pool(provider)
def test_remove_entry_removes_by_id(tmp_path, monkeypatch):
"""remove_entry should remove the entry with matching id and return it."""
pool = _build_pool_with_entries(tmp_path, monkeypatch)
removed = pool.remove_entry("cred-1")
assert removed is not None
assert removed.id == "cred-1"
remaining_ids = [e.id for e in pool.entries()]
assert "cred-1" not in remaining_ids
assert "cred-2" in remaining_ids
def test_remove_entry_returns_none_for_unknown_id(tmp_path, monkeypatch):
"""remove_entry returns None when no entry matches the given id."""
pool = _build_pool_with_entries(tmp_path, monkeypatch)
result = pool.remove_entry("nonexistent-id")
assert result is None
# Pool should still have both original entries
assert len(pool.entries()) == 2
def test_remove_entry_renumbers_priorities(tmp_path, monkeypatch):
"""After remove_entry, remaining entries receive sequential priorities 0, 1, ..."""
pool = _build_pool_with_entries(
tmp_path,
monkeypatch,
entries=[
{"id": "cred-1", "label": "a", "auth_type": "api_key", "priority": 0, "source": "manual", "access_token": "tok-1"},
{"id": "cred-2", "label": "b", "auth_type": "api_key", "priority": 1, "source": "manual", "access_token": "tok-2"},
{"id": "cred-3", "label": "c", "auth_type": "api_key", "priority": 2, "source": "manual", "access_token": "tok-3"},
],
)
pool.remove_entry("cred-2")
remaining = sorted(pool.entries(), key=lambda e: e.priority)
assert [e.priority for e in remaining] == [0, 1]
assert [e.id for e in remaining] == ["cred-1", "cred-3"]

View file

@ -0,0 +1,42 @@
"""Tests for TypedDict shape definitions added in commit fc00f699.
Verifies that _CamofoxConfig is importable, honours total=False
(all fields optional), and nests correctly inside _BrowserConfig.
"""
from __future__ import annotations
def test_camofox_config_is_partial_typeddict():
"""_CamofoxConfig should accept zero or more keys (total=False)."""
from hermes_cli.config import _CamofoxConfig, _BrowserConfig
# total=False: constructing with no keys must succeed at runtime
cfg_empty: _CamofoxConfig = {}
cfg_with_field: _CamofoxConfig = {"managed_persistence": True}
assert cfg_empty == {}
assert cfg_with_field.get("managed_persistence") is True
def test_camofox_config_nested_in_browser_config():
"""_CamofoxConfig should be accepted in the camofox slot of _BrowserConfig."""
from hermes_cli.config import _CamofoxConfig, _BrowserConfig
browser: _BrowserConfig = {
"inactivity_timeout": 60,
"command_timeout": 10,
"record_sessions": False,
"allow_private_urls": False,
"cdp_url": "http://localhost:9222",
"camofox": {"managed_persistence": False},
}
assert browser["camofox"].get("managed_persistence") is False
def test_camofox_config_total_false_flag():
"""_CamofoxConfig.__total__ must be False (all fields optional)."""
from hermes_cli.config import _CamofoxConfig
assert _CamofoxConfig.__total__ is False

View file

@ -5,6 +5,8 @@ terminates processes, and handles edge cases on failure paths.
Inspired by PR #715 (0xbyt4).
"""
import dataclasses
import io
from unittest.mock import MagicMock
import pytest
@ -118,6 +120,34 @@ class TestStopTrainingRunProcesses:
trainer.terminate.assert_not_called()
# ---------------------------------------------------------------------------
# Tests for RunState log_file fields (added in commit fc00f699)
# ---------------------------------------------------------------------------
class TestRunStateLogFileFields:
"""Verify api_log_file, trainer_log_file, env_log_file exist with None defaults."""
def test_log_file_fields_default_none(self):
"""All three log_file fields should default to None."""
state = _make_run_state()
assert state.api_log_file is None
assert state.trainer_log_file is None
assert state.env_log_file is None
def test_accepts_file_handle_for_api_log(self):
"""api_log_file should accept an open file-like object."""
api_log = io.StringIO()
state = _make_run_state(api_log_file=api_log)
assert state.api_log_file is api_log
def test_log_file_fields_present_in_dataclass(self):
"""All three field names must be declared on the RunState dataclass."""
field_names = {f.name for f in dataclasses.fields(RunState)}
assert "api_log_file" in field_names
assert "trainer_log_file" in field_names
assert "env_log_file" in field_names
class TestStopTrainingRunStatus:
"""Verify status transitions in _stop_training_run."""

View file

@ -23,7 +23,7 @@ import os
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Dict, List, Optional
from typing import Any, Callable, Dict, List, Optional
from toolsets import TOOLSETS
from utils import base_url_hostname
@ -156,7 +156,7 @@ def _strip_blocked_tools(toolsets: List[str]) -> List[str]:
return [t for t in toolsets if t not in blocked_toolset_names]
def _build_child_progress_callback(task_index: int, goal: str, parent_agent, task_count: int = 1) -> Optional[callable]:
def _build_child_progress_callback(task_index: int, goal: str, parent_agent, task_count: int = 1) -> Optional[Callable[..., Any]]:
"""Build a callback that relays child agent tool calls to the parent display.
Two display paths:

View file

@ -137,6 +137,10 @@ class RunState:
api_process: Optional[subprocess.Popen] = None
trainer_process: Optional[subprocess.Popen] = None
env_process: Optional[subprocess.Popen] = None
# Log file handles (kept open while subprocess runs; closed by _stop_training_run)
api_log_file: Optional[Any] = None
trainer_log_file: Optional[Any] = None
env_log_file: Optional[Any] = None
# Global state

View file

@ -28,6 +28,7 @@ import os
import shlex
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
from typing import Optional, Dict, Any
@ -50,7 +51,7 @@ def _safe_find_spec(module_name: str) -> bool:
try:
return _ilu.find_spec(module_name) is not None
except (ImportError, ValueError):
return module_name in globals() or module_name in os.sys.modules
return module_name in globals() or module_name in sys.modules
_HAS_FASTER_WHISPER = _safe_find_spec("faster_whisper")

View file

@ -19,7 +19,7 @@ Usage:
all_dists = list_distributions()
"""
from typing import Dict, List, Optional
from typing import Any, Dict, List, Optional
import random
from toolsets import validate_toolset
@ -220,7 +220,7 @@ DISTRIBUTIONS = {
}
def get_distribution(name: str) -> Optional[Dict[str, any]]:
def get_distribution(name: str) -> Optional[Dict[str, Any]]:
"""
Get a toolset distribution by name.

View file

@ -11,6 +11,7 @@ import time
import uuid
from datetime import datetime
from pathlib import Path
from typing import Any, Callable
from hermes_constants import get_hermes_home
from hermes_cli.env_loader import load_hermes_dotenv
@ -27,7 +28,7 @@ except Exception:
from tui_gateway.render import make_stream_renderer, render_diff, render_message
_sessions: dict[str, dict] = {}
_methods: dict[str, callable] = {}
_methods: dict[str, Callable[..., Any]] = {}
_pending: dict[str, tuple[str, threading.Event]] = {}
_answers: dict[str, str] = {}
_db = None
@ -3037,7 +3038,7 @@ def _(rid, params: dict) -> dict:
from hermes_cli.banner import get_available_skills
return _ok(rid, {"skills": get_available_skills()})
if action == "search":
from hermes_cli.skills_hub import unified_search, GitHubAuth, create_source_router
from tools.skills_hub import unified_search, GitHubAuth, create_source_router
raw = unified_search(query, create_source_router(GitHubAuth()), source_filter="all", limit=20) or []
return _ok(rid, {"results": [{"name": r.name, "description": r.description} for r in raw]})
if action == "install":

1686
uv.lock generated

File diff suppressed because it is too large Load diff