mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 03:01:47 +00:00
fix(types): batch P1 ty hotfixes + run_agent.py annotation pass
15 P1 ship-stopper runtime bugs from the ty triage plus the cross-bucket cleanup in run_agent.py. Net: -138 ty diagnostics (1953 -> 1815). Major wins on not-subscriptable (-34), unresolved-attribute (-29), invalid-argument-type (-26), invalid-type-form (-20), unsupported-operator (-18), invalid-key (-9). Missing refs (structural): - tools/rl_training_tool.py: RunState dataclass gains api_log_file, trainer_log_file, env_log_file fields; stop-run was closing undeclared handles. - agent/credential_pool.py: remove_entry(entry_id) added, symmetric with add_entry; used by hermes_cli/web_server.py OAuth dashboard cleanup. - hermes_cli/config.py: _CamofoxConfig TypedDict defined (was referenced by _BrowserConfig but never declared). - hermes_cli/gateway.py: _setup_wecom_callback() added, mirroring _setup_wecom(). - tui_gateway/server.py: skills_hub imports corrected from hermes_cli.skills_hub -> tools.skills_hub. Typo / deprecation: - tools/transcription_tools.py: os.sys.modules -> sys.modules. - gateway/platforms/bluebubbles.py: datetime.utcnow() -> datetime.now(timezone.utc). None-guards: - gateway/platforms/telegram.py:~2798 - msg.sticker None guard. - gateway/platforms/discord.py:3602/3637 - interaction.data None + SelectMenu narrowing; :3009 - thread_id None before `in`; :1893 - guild.member_count None. - gateway/platforms/matrix.py:2174/2185 - walrus-narrow re.search().group(). - agent/display.py:732 - start_time None before elapsed subtraction. - gateway/run.py:10334 - assert _agent_timeout is not None before `// 60`. Platform override signature match: - gateway/platforms/email.py: send_image accepts metadata kwarg; send_document accepts **kwargs (matches base class). run_agent.py annotation pass: - callable/any -> Callable/Any in annotation position (15 sites in run_agent.py + 5 in cli.py, toolset_distributions.py, tools/delegate_tool.py, hermes_cli/dingtalk_auth.py, tui_gateway/server.py). - conversation_history param widened to list[dict[str, Any]] | None. - OMIT_TEMPERATURE sentinel guarded from leaking into call_llm(temperature): kwargs-dict pattern at run_agent.py:7337 + scripts/trajectory_compressor.py:618/688. - build_anthropic_client(timeout) widened to Optional[float]. Tests: - tests/agent/test_credential_pool.py: remove_entry (id match, unknown-id, priority renumbering). - tests/hermes_cli/test_config_shapes.py: _CamofoxConfig shape + nesting. - tests/tools/test_rl_training_tool.py: RunState log_file fields.
This commit is contained in:
parent
fb6d37495b
commit
15ac253b11
24 changed files with 1726 additions and 254 deletions
39
run_agent.py
39
run_agent.py
|
|
@ -37,7 +37,7 @@ import time
|
|||
import threading
|
||||
from types import SimpleNamespace
|
||||
import uuid
|
||||
from typing import List, Dict, Any, Optional, TYPE_CHECKING
|
||||
from typing import Callable, List, Dict, Any, Optional, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from agent.rate_limit_tracker import RateLimitState
|
||||
|
|
@ -736,17 +736,17 @@ class AIAgent:
|
|||
provider_require_parameters: bool = False,
|
||||
provider_data_collection: str = None,
|
||||
session_id: str = None,
|
||||
tool_progress_callback: callable = None,
|
||||
tool_start_callback: callable = None,
|
||||
tool_complete_callback: callable = None,
|
||||
thinking_callback: callable = None,
|
||||
reasoning_callback: callable = None,
|
||||
clarify_callback: callable = None,
|
||||
step_callback: callable = None,
|
||||
stream_delta_callback: callable = None,
|
||||
interim_assistant_callback: callable = None,
|
||||
tool_gen_callback: callable = None,
|
||||
status_callback: callable = None,
|
||||
tool_progress_callback: Callable[..., Any] = None,
|
||||
tool_start_callback: Callable[..., Any] = None,
|
||||
tool_complete_callback: Callable[..., Any] = None,
|
||||
thinking_callback: Callable[..., Any] = None,
|
||||
reasoning_callback: Callable[..., Any] = None,
|
||||
clarify_callback: Callable[..., Any] = None,
|
||||
step_callback: Callable[..., Any] = None,
|
||||
stream_delta_callback: Callable[..., Any] = None,
|
||||
interim_assistant_callback: Callable[..., Any] = None,
|
||||
tool_gen_callback: Callable[..., Any] = None,
|
||||
status_callback: Callable[..., Any] = None,
|
||||
max_tokens: int = None,
|
||||
reasoning_config: Dict[str, Any] = None,
|
||||
service_tier: str = None,
|
||||
|
|
@ -4688,7 +4688,7 @@ class AIAgent:
|
|||
def _close_request_openai_client(self, client: Any, *, reason: str) -> None:
|
||||
self._close_openai_client(client, reason=reason, shared=False)
|
||||
|
||||
def _run_codex_stream(self, api_kwargs: dict, client: Any = None, on_first_delta: callable = None):
|
||||
def _run_codex_stream(self, api_kwargs: dict, client: Any = None, on_first_delta: Callable[..., Any] = None):
|
||||
"""Execute one streaming Responses API request and return the final response."""
|
||||
import httpx as _httpx
|
||||
|
||||
|
|
@ -5384,7 +5384,7 @@ class AIAgent:
|
|||
)
|
||||
|
||||
def _interruptible_streaming_api_call(
|
||||
self, api_kwargs: dict, *, on_first_delta: callable = None
|
||||
self, api_kwargs: dict, *, on_first_delta: Callable[..., Any] = None
|
||||
):
|
||||
"""Streaming variant of _interruptible_api_call for real-time token delivery.
|
||||
|
||||
|
|
@ -7334,12 +7334,15 @@ class AIAgent:
|
|||
_flush_temperature = _fixed_temp
|
||||
else:
|
||||
_flush_temperature = 0.3
|
||||
_flush_llm_kwargs: dict = {}
|
||||
if _flush_temperature is not None:
|
||||
_flush_llm_kwargs["temperature"] = _flush_temperature
|
||||
try:
|
||||
response = _call_llm(
|
||||
task="flush_memories",
|
||||
messages=api_messages,
|
||||
tools=[memory_tool_def],
|
||||
temperature=_flush_temperature,
|
||||
**_flush_llm_kwargs,
|
||||
max_tokens=5120,
|
||||
# timeout resolved from auxiliary.flush_memories.timeout config
|
||||
)
|
||||
|
|
@ -8531,9 +8534,9 @@ class AIAgent:
|
|||
self,
|
||||
user_message: str,
|
||||
system_message: str = None,
|
||||
conversation_history: List[Dict[str, Any]] = None,
|
||||
conversation_history: List[Dict[str, Any]] | None = None,
|
||||
task_id: str = None,
|
||||
stream_callback: Optional[callable] = None,
|
||||
stream_callback: Optional[Callable[..., Any]] = None,
|
||||
persist_user_message: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
|
|
@ -11783,7 +11786,7 @@ class AIAgent:
|
|||
|
||||
return result
|
||||
|
||||
def chat(self, message: str, stream_callback: Optional[callable] = None) -> str:
|
||||
def chat(self, message: str, stream_callback: Optional[Callable[..., Any]] = None) -> str:
|
||||
"""
|
||||
Simple chat interface that returns just the final response.
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue