fix: resolve not-subscriptable ty diagnostics across codebase

Add TypedDicts for DEFAULT_CONFIG, CLI state dicts (_ModelPickerState,
_ApprovalState, _ClarifyState), and OPTIONAL_ENV_VARS so ty can resolve
nested dict subscripts.  Guard Optional returns before subscripting
(toolsets, cron/scheduler, delegate_tool), coerce str|None to str before
slicing (gateway/run, run_agent), split ternary for isinstance narrowing
(wecom), and suppress discord interaction.data access with ty: ignore.
This commit is contained in:
alt-glitch 2026-04-21 16:59:13 +05:30
parent 1e7a598bac
commit b11e53e34f
9 changed files with 404 additions and 24 deletions

36
cli.py
View file

@ -30,7 +30,7 @@ from urllib.parse import unquote, urlparse
from contextlib import contextmanager from contextlib import contextmanager
from pathlib import Path from pathlib import Path
from datetime import datetime from datetime import datetime
from typing import List, Dict, Any, Optional from typing import List, Dict, Any, Optional, TypedDict
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -84,6 +84,34 @@ _project_env = Path(__file__).parent / '.env'
load_hermes_dotenv(hermes_home=_hermes_home, project_env=_project_env) load_hermes_dotenv(hermes_home=_hermes_home, project_env=_project_env)
class _ModelPickerState(TypedDict, total=False):
stage: str
providers: List[Dict[str, Any]]
selected: int
current_model: str
current_provider: str
user_provs: Optional[Dict[str, Any]]
custom_provs: Optional[Dict[str, Any]]
provider_data: Dict[str, Any]
model_list: List[str]
class _ApprovalState(TypedDict, total=False):
command: str
description: str
choices: List[str]
selected: int
response_queue: "queue.Queue[str]"
show_full: bool
class _ClarifyState(TypedDict, total=False):
question: str
choices: List[str]
selected: int
response_queue: "queue.Queue[str]"
_REASONING_TAGS = ( _REASONING_TAGS = (
"REASONING_SCRATCHPAD", "REASONING_SCRATCHPAD",
"think", "think",
@ -2065,16 +2093,16 @@ class HermesCLI:
self._interrupt_queue = queue.Queue() self._interrupt_queue = queue.Queue()
self._should_exit = False self._should_exit = False
self._last_ctrl_c_time = 0 self._last_ctrl_c_time = 0
self._clarify_state = None self._clarify_state: Optional[_ClarifyState] = None
self._clarify_freetext = False self._clarify_freetext = False
self._clarify_deadline = 0 self._clarify_deadline = 0
self._sudo_state = None self._sudo_state = None
self._sudo_deadline = 0 self._sudo_deadline = 0
self._modal_input_snapshot = None self._modal_input_snapshot = None
self._approval_state = None self._approval_state: Optional[_ApprovalState] = None
self._approval_deadline = 0 self._approval_deadline = 0
self._approval_lock = threading.Lock() self._approval_lock = threading.Lock()
self._model_picker_state = None self._model_picker_state: Optional[_ModelPickerState] = None
self._secret_state = None self._secret_state = None
self._secret_deadline = 0 self._secret_deadline = 0
self._spinner_text: str = "" # thinking spinner text for TUI self._spinner_text: str = "" # thinking spinner text for TUI

View file

@ -439,8 +439,9 @@ def _deliver_result(job: dict, content: str, adapters=None, loop=None) -> Option
delivery_errors.append(msg) delivery_errors.append(msg)
continue continue
if result and result.get("error"): error = result.get("error") if result else None
msg = f"delivery error: {result['error']}" if error:
msg = f"delivery error: {error}"
logger.error("Job '%s': %s", job["id"], msg) logger.error("Job '%s': %s", job["id"], msg)
delivery_errors.append(msg) delivery_errors.append(msg)
continue continue

View file

@ -3634,7 +3634,7 @@ if DISCORD_AVAILABLE:
) )
return return
provider_slug = interaction.data["values"][0] provider_slug = interaction.data["values"][0] # ty: ignore[not-subscriptable]
self._selected_provider = provider_slug self._selected_provider = provider_slug
provider = next( provider = next(
(p for p in self.providers if p["slug"] == provider_slug), None (p for p in self.providers if p["slug"] == provider_slug), None
@ -3669,7 +3669,7 @@ if DISCORD_AVAILABLE:
return return
self.resolved = True self.resolved = True
model_id = interaction.data["values"][0] model_id = interaction.data["values"][0] # ty: ignore[not-subscriptable]
try: try:
result_text = await self.on_model_selected( result_text = await self.on_model_selected(

View file

@ -703,7 +703,8 @@ class WeComAdapter(BasePlatformAdapter):
elif isinstance(appmsg.get("image"), dict): elif isinstance(appmsg.get("image"), dict):
refs.append(("image", appmsg["image"])) refs.append(("image", appmsg["image"]))
quote = body.get("quote") if isinstance(body.get("quote"), dict) else {} raw_quote = body.get("quote")
quote = raw_quote if isinstance(raw_quote, dict) else {}
quote_type = str(quote.get("msgtype") or "").lower() quote_type = str(quote.get("msgtype") or "").lower()
if quote_type == "image" and isinstance(quote.get("image"), dict): if quote_type == "image" and isinstance(quote.get("image"), dict):
refs.append(("image", quote["image"])) refs.append(("image", quote["image"]))

View file

@ -10608,7 +10608,7 @@ class GatewayRunner:
pending = None pending = None
if pending_event or pending: if pending_event or pending:
logger.debug("Processing pending message: '%s...'", pending[:40]) logger.debug("Processing pending message: '%s...'", (pending or "")[:40])
# Clear the adapter's interrupt event so the next _run_agent call # Clear the adapter's interrupt event so the next _run_agent call
# doesn't immediately re-trigger the interrupt before the new agent # doesn't immediately re-trigger the interrupt before the new agent

View file

@ -23,7 +23,7 @@ import sys
import tempfile import tempfile
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Dict, Any, Optional, List, Tuple from typing import Dict, Any, Optional, List, Tuple, TypedDict, Union
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -343,7 +343,354 @@ def _ensure_hermes_home_managed(home: Path):
# Config loading/saving # Config loading/saving
# ============================================================================= # =============================================================================
DEFAULT_CONFIG = { class _AgentConfig(TypedDict):
max_turns: int
gateway_timeout: int
restart_drain_timeout: int
service_tier: str
tool_use_enforcement: str
gateway_timeout_warning: int
gateway_notify_interval: int
class _TerminalConfig(TypedDict):
backend: str
modal_mode: str
cwd: str
timeout: int
env_passthrough: List[str]
docker_image: str
docker_forward_env: List[str]
docker_env: Dict[str, str]
singularity_image: str
modal_image: str
daytona_image: str
container_cpu: int
container_memory: int
container_disk: int
container_persistent: bool
docker_volumes: List[str]
docker_mount_cwd_to_workspace: bool
persistent_shell: bool
class _BrowserConfig(TypedDict):
inactivity_timeout: int
command_timeout: int
record_sessions: bool
allow_private_urls: bool
cdp_url: str
camofox: _CamofoxConfig
class _CheckpointsConfig(TypedDict):
enabled: bool
max_snapshots: int
class _CompressionConfig(TypedDict):
enabled: bool
threshold: float
target_ratio: float
protect_last_n: int
class _BedrockDiscoveryConfig(TypedDict):
enabled: bool
provider_filter: List[str]
refresh_interval: int
class _BedrockGuardrailConfig(TypedDict):
guardrail_identifier: str
guardrail_version: str
stream_processing_mode: str
trace: str
class _BedrockConfig(TypedDict):
region: str
discovery: _BedrockDiscoveryConfig
guardrail: _BedrockGuardrailConfig
class _AuxiliaryTaskConfig(TypedDict, total=False):
provider: str
model: str
base_url: str
api_key: str
timeout: int
extra_body: Dict[str, Any]
max_concurrency: int
download_timeout: int
class _AuxiliaryConfig(TypedDict):
vision: _AuxiliaryTaskConfig
web_extract: _AuxiliaryTaskConfig
compression: _AuxiliaryTaskConfig
session_search: _AuxiliaryTaskConfig
skills_hub: _AuxiliaryTaskConfig
approval: _AuxiliaryTaskConfig
mcp: _AuxiliaryTaskConfig
flush_memories: _AuxiliaryTaskConfig
title_generation: _AuxiliaryTaskConfig
class _UserMessagePreviewConfig(TypedDict):
first_lines: int
last_lines: int
class _DisplayConfig(TypedDict):
compact: bool
personality: str
resume_display: str
busy_input_mode: str
bell_on_complete: bool
show_reasoning: bool
streaming: bool
final_response_markdown: str
inline_diffs: bool
show_cost: bool
skin: str
user_message_preview: _UserMessagePreviewConfig
interim_assistant_messages: bool
tool_progress_command: bool
tool_progress_overrides: Dict[str, Any]
tool_preview_length: int
platforms: Dict[str, Any]
class _DashboardConfig(TypedDict):
theme: str
class _PrivacyConfig(TypedDict):
redact_pii: bool
class _EdgeTtsConfig(TypedDict):
voice: str
class _ElevenlabsTtsConfig(TypedDict):
voice_id: str
model_id: str
class _OpenaiTtsConfig(TypedDict):
model: str
voice: str
class _XaiTtsConfig(TypedDict):
voice_id: str
language: str
sample_rate: int
bit_rate: int
class _MistralTtsConfig(TypedDict):
model: str
voice_id: str
class _NeuttsConfig(TypedDict):
ref_audio: str
ref_text: str
model: str
device: str
class _TtsConfig(TypedDict):
provider: str
edge: _EdgeTtsConfig
elevenlabs: _ElevenlabsTtsConfig
openai: _OpenaiTtsConfig
xai: _XaiTtsConfig
mistral: _MistralTtsConfig
neutts: _NeuttsConfig
class _LocalSttConfig(TypedDict):
model: str
language: str
class _OpenaiSttConfig(TypedDict):
model: str
class _MistralSttConfig(TypedDict):
model: str
class _SttConfig(TypedDict):
enabled: bool
provider: str
local: _LocalSttConfig
openai: _OpenaiSttConfig
mistral: _MistralSttConfig
class _VoiceConfig(TypedDict):
record_key: str
max_recording_seconds: int
auto_tts: bool
silence_threshold: int
silence_duration: float
class _HumanDelayConfig(TypedDict):
mode: str
min_ms: int
max_ms: int
class _ContextConfig(TypedDict):
engine: str
class _MemoryConfig(TypedDict):
memory_enabled: bool
user_profile_enabled: bool
memory_char_limit: int
user_char_limit: int
provider: str
class _DelegationConfig(TypedDict):
model: str
provider: str
base_url: str
api_key: str
max_iterations: int
reasoning_effort: str
class _SkillsConfig(TypedDict):
external_dirs: List[str]
class _ChannelPromptsConfig(TypedDict):
channel_prompts: Dict[str, str]
class _DiscordConfig(TypedDict):
require_mention: bool
free_response_channels: str
allowed_channels: str
auto_thread: bool
reactions: bool
channel_prompts: Dict[str, str]
server_actions: str
class _ApprovalsConfig(TypedDict):
mode: str
timeout: int
cron_mode: str
class _WebsiteBlocklistConfig(TypedDict):
enabled: bool
domains: List[str]
shared_files: List[str]
class _SecurityConfig(TypedDict):
redact_secrets: bool
tirith_enabled: bool
tirith_path: str
tirith_timeout: int
tirith_fail_open: bool
website_blocklist: _WebsiteBlocklistConfig
class _CronConfig(TypedDict):
wrap_response: bool
max_parallel_jobs: Optional[int]
class _CodeExecutionConfig(TypedDict):
mode: str
class _LoggingConfig(TypedDict):
level: str
max_size_mb: int
backup_count: int
class _NetworkConfig(TypedDict):
force_ipv4: bool
class _DefaultConfig(TypedDict):
model: str
providers: Dict[str, Any]
fallback_providers: List[Any]
credential_pool_strategies: Dict[str, Any]
toolsets: List[str]
agent: _AgentConfig
terminal: _TerminalConfig
browser: _BrowserConfig
checkpoints: _CheckpointsConfig
file_read_max_chars: int
compression: _CompressionConfig
bedrock: _BedrockConfig
auxiliary: _AuxiliaryConfig
display: _DisplayConfig
dashboard: _DashboardConfig
privacy: _PrivacyConfig
tts: _TtsConfig
stt: _SttConfig
voice: _VoiceConfig
human_delay: _HumanDelayConfig
context: _ContextConfig
memory: _MemoryConfig
delegation: _DelegationConfig
prefill_messages_file: str
skills: _SkillsConfig
honcho: Dict[str, Any]
timezone: str
discord: _DiscordConfig
whatsapp: Dict[str, Any]
telegram: _ChannelPromptsConfig
slack: _ChannelPromptsConfig
mattermost: _ChannelPromptsConfig
approvals: _ApprovalsConfig
command_allowlist: List[str]
quick_commands: Dict[str, Any]
hooks: Dict[str, Any]
hooks_auto_accept: bool
personalities: Dict[str, Any]
security: _SecurityConfig
cron: _CronConfig
code_execution: _CodeExecutionConfig
logging: _LoggingConfig
network: _NetworkConfig
_config_version: int
class _EnvVarRequired(TypedDict):
description: str
prompt: str
category: str
class _EnvVarOptional(TypedDict, total=False):
url: Optional[str]
password: bool
tools: List[str]
advanced: bool
class _EnvVarInfo(_EnvVarRequired, _EnvVarOptional):
pass
DEFAULT_CONFIG: _DefaultConfig = {
"model": "", "model": "",
"providers": {}, "providers": {},
"fallback_providers": [], "fallback_providers": [],
@ -954,7 +1301,7 @@ ENV_VARS_BY_VERSION: Dict[int, List[str]] = {
REQUIRED_ENV_VARS = {} REQUIRED_ENV_VARS = {}
# Optional environment variables that enhance functionality # Optional environment variables that enhance functionality
OPTIONAL_ENV_VARS = { OPTIONAL_ENV_VARS: Dict[str, _EnvVarInfo] = {
# ── Provider (handled in provider selection, not shown in checklists) ── # ── Provider (handled in provider selection, not shown in checklists) ──
"NOUS_BASE_URL": { "NOUS_BASE_URL": {
"description": "Nous Portal base URL override", "description": "Nous Portal base URL override",
@ -1904,7 +2251,7 @@ def get_missing_config_fields() -> List[Dict[str, Any]]:
config = load_config() config = load_config()
missing = [] missing = []
def _check(defaults: dict, current: dict, prefix: str = ""): def _check(defaults: Dict[str, Any], current: Dict[str, Any], prefix: str = ""):
for key, default_value in defaults.items(): for key, default_value in defaults.items():
if key.startswith('_'): if key.startswith('_'):
continue continue
@ -1918,7 +2265,7 @@ def get_missing_config_fields() -> List[Dict[str, Any]]:
elif isinstance(default_value, dict) and isinstance(current.get(key), dict): elif isinstance(default_value, dict) and isinstance(current.get(key), dict):
_check(default_value, current[key], full_key) _check(default_value, current[key], full_key)
_check(DEFAULT_CONFIG, config) _check(dict(DEFAULT_CONFIG), config)
return missing return missing
@ -2867,7 +3214,7 @@ def migrate_config(interactive: bool = True, quiet: bool = False) -> Dict[str, A
return results return results
def _deep_merge(base: dict, override: dict) -> dict: def _deep_merge(base: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
"""Recursively merge *override* into *base*, preserving nested defaults. """Recursively merge *override* into *base*, preserving nested defaults.
Keys in *override* take precedence. If both values are dicts the merge Keys in *override* take precedence. If both values are dicts the merge
@ -3056,7 +3403,7 @@ def load_config() -> Dict[str, Any]:
ensure_hermes_home() ensure_hermes_home()
config_path = get_config_path() config_path = get_config_path()
config = copy.deepcopy(DEFAULT_CONFIG) config: Dict[str, Any] = copy.deepcopy(DEFAULT_CONFIG)
if config_path.exists(): if config_path.exists():
try: try:
@ -3732,7 +4079,7 @@ def edit_config():
# Ensure config exists # Ensure config exists
if not config_path.exists(): if not config_path.exists():
save_config(DEFAULT_CONFIG) save_config(dict(DEFAULT_CONFIG))
print(f"Created {config_path}") print(f"Created {config_path}")
# Find editor # Find editor

View file

@ -10228,7 +10228,7 @@ class AIAgent:
auth_method = "Bearer (OAuth/setup-token)" if _is_oauth_token(key) else "x-api-key (API key)" auth_method = "Bearer (OAuth/setup-token)" if _is_oauth_token(key) else "x-api-key (API key)"
print(f"{self.log_prefix}🔐 Anthropic 401 — authentication failed.") print(f"{self.log_prefix}🔐 Anthropic 401 — authentication failed.")
print(f"{self.log_prefix} Auth method: {auth_method}") print(f"{self.log_prefix} Auth method: {auth_method}")
print(f"{self.log_prefix} Token prefix: {key[:12]}..." if key and len(key) > 12 else f"{self.log_prefix} Token: (empty or short)") print(f"{self.log_prefix} Token prefix: {str(key)[:12]}..." if key and len(str(key)) > 12 else f"{self.log_prefix} Token: (empty or short)")
print(f"{self.log_prefix} Troubleshooting:") print(f"{self.log_prefix} Troubleshooting:")
from hermes_constants import display_hermes_home as _dhh_fn from hermes_constants import display_hermes_home as _dhh_fn
_dhh = _dhh_fn() _dhh = _dhh_fn()
@ -11572,7 +11572,7 @@ class AIAgent:
messages.append(assistant_msg) messages.append(assistant_msg)
if reasoning_text: if reasoning_text:
reasoning_preview = reasoning_text[:500] + "..." if len(reasoning_text) > 500 else reasoning_text reasoning_preview = str(reasoning_text)[:500] + "..." if len(str(reasoning_text)) > 500 else reasoning_text
logger.warning( logger.warning(
"Reasoning-only response (no visible content) " "Reasoning-only response (no visible content) "
"after exhausting retries and fallback. " "after exhausting retries and fallback. "

View file

@ -1602,7 +1602,7 @@ def delegate_task(
n_tasks = len(task_list) n_tasks = len(task_list)
# Track goal labels for progress display (truncated for readability) # Track goal labels for progress display (truncated for readability)
task_labels = [t["goal"][:40] for t in task_list] task_labels = [str(t["goal"] or "")[:40] for t in task_list]
# Save parent tool names BEFORE any child construction mutates the global. # Save parent tool names BEFORE any child construction mutates the global.
# _build_child_agent() calls AIAgent() which calls get_tool_definitions(), # _build_child_agent() calls AIAgent() which calls get_tool_definitions(),

View file

@ -689,6 +689,8 @@ if __name__ == "__main__":
print("-" * 40) print("-" * 40)
for name, toolset in get_all_toolsets().items(): for name, toolset in get_all_toolsets().items():
info = get_toolset_info(name) info = get_toolset_info(name)
if not info:
continue
composite = "[composite]" if info["is_composite"] else "[leaf]" composite = "[composite]" if info["is_composite"] else "[leaf]"
print(f" {composite} {name:20} - {toolset['description']}") print(f" {composite} {name:20} - {toolset['description']}")
print(f" Tools: {len(info['resolved_tools'])} total") print(f" Tools: {len(info['resolved_tools'])} total")
@ -715,6 +717,7 @@ if __name__ == "__main__":
includes=["terminal", "vision"] includes=["terminal", "vision"]
) )
custom_info = get_toolset_info("my_custom") custom_info = get_toolset_info("my_custom")
print(" Created 'my_custom' toolset:") if custom_info:
print(f" Description: {custom_info['description']}") print(" Created 'my_custom' toolset:")
print(f" Resolved tools: {', '.join(custom_info['resolved_tools'])}") print(f" Description: {custom_info['description']}")
print(f" Resolved tools: {', '.join(custom_info['resolved_tools'])}")