refactor: add tool_error/tool_result helpers + read_raw_config, migrate 129 callsites

Add three reusable helpers to eliminate pervasive boilerplate:

tools/registry.py — tool_error() and tool_result():
  Every tool handler returns JSON strings. The pattern
  json.dumps({"error": msg}, ensure_ascii=False) appeared 106 times,
  and json.dumps({"success": False, "error": msg}, ...) another 23.
  Now: tool_error(msg) or tool_error(msg, success=False).

  tool_result() handles arbitrary result dicts:
  tool_result(success=True, data=payload) or tool_result(some_dict).

hermes_cli/config.py — read_raw_config():
  Lightweight YAML reader that returns the raw config dict without
  load_config()'s deep-merge + migration overhead. Available for
  callsites that just need a single config value.

Migration (129 callsites across 32 files):
- tools/: browser_camofox (18), file_tools (10), homeassistant (8),
  web_tools (7), skill_manager (7), cronjob (11), code_execution (4),
  delegate (5), send_message (4), tts (4), memory (7), session_search (3),
  mcp (2), clarify (2), skills_tool (3), todo (1), vision (1),
  browser (1), process_registry (2), image_gen (1)
- plugins/memory/: honcho (9), supermemory (9), hindsight (8),
  holographic (7), openviking (7), mem0 (7), byterover (6), retaindb (2)
- agent/: memory_manager (2), builtin_memory_provider (1)
This commit is contained in:
Teknium 2026-04-07 13:36:20 -07:00
parent ab8f9c089e
commit 678a87c477
No known key found for this signature in database
32 changed files with 252 additions and 179 deletions

View file

@ -16,6 +16,7 @@ import logging
from typing import Any, Dict, List from typing import Any, Dict, List
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -92,7 +93,7 @@ class BuiltinMemoryProvider(MemoryProvider):
def handle_tool_call(self, tool_name: str, args: Dict[str, Any], **kwargs) -> str: def handle_tool_call(self, tool_name: str, args: Dict[str, Any], **kwargs) -> str:
"""Not used — the memory tool is intercepted in run_agent.py.""" """Not used — the memory tool is intercepted in run_agent.py."""
return json.dumps({"error": "Built-in memory tool is handled by the agent loop"}) return tool_error("Built-in memory tool is handled by the agent loop")
def shutdown(self) -> None: def shutdown(self) -> None:
"""No cleanup needed — files are saved on every write.""" """No cleanup needed — files are saved on every write."""

View file

@ -34,6 +34,7 @@ import re
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -249,7 +250,7 @@ class MemoryManager:
""" """
provider = self._tool_to_provider.get(tool_name) provider = self._tool_to_provider.get(tool_name)
if provider is None: if provider is None:
return json.dumps({"error": f"No memory provider handles tool '{tool_name}'"}) return tool_error(f"No memory provider handles tool '{tool_name}'")
try: try:
return provider.handle_tool_call(tool_name, args, **kwargs) return provider.handle_tool_call(tool_name, args, **kwargs)
except Exception as e: except Exception as e:
@ -257,7 +258,7 @@ class MemoryManager:
"Memory provider '%s' handle_tool_call(%s) failed: %s", "Memory provider '%s' handle_tool_call(%s) failed: %s",
provider.name, tool_name, e, provider.name, tool_name, e,
) )
return json.dumps({"error": f"Memory tool '{tool_name}' failed: {e}"}) return tool_error(f"Memory tool '{tool_name}' failed: {e}")
# -- Lifecycle hooks ----------------------------------------------------- # -- Lifecycle hooks -----------------------------------------------------

View file

@ -1881,6 +1881,24 @@ def _normalize_max_turns_config(config: Dict[str, Any]) -> Dict[str, Any]:
def read_raw_config() -> Dict[str, Any]:
"""Read ~/.hermes/config.yaml as-is, without merging defaults or migrating.
Returns the raw YAML dict, or ``{}`` if the file doesn't exist or can't
be parsed. Use this for lightweight config reads where you just need a
single value and don't want the overhead of ``load_config()``'s deep-merge
+ migration pipeline.
"""
try:
config_path = get_config_path()
if config_path.exists():
with open(config_path, encoding="utf-8") as f:
return yaml.safe_load(f) or {}
except Exception:
pass
return {}
def load_config() -> Dict[str, Any]: def load_config() -> Dict[str, Any]:
"""Load configuration from ~/.hermes/config.yaml.""" """Load configuration from ~/.hermes/config.yaml."""
import copy import copy

View file

@ -27,6 +27,7 @@ from pathlib import Path
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -320,7 +321,7 @@ class ByteRoverMemoryProvider(MemoryProvider):
return self._tool_curate(args) return self._tool_curate(args)
elif tool_name == "brv_status": elif tool_name == "brv_status":
return self._tool_status() return self._tool_status()
return json.dumps({"error": f"Unknown tool: {tool_name}"}) return tool_error(f"Unknown tool: {tool_name}")
def shutdown(self) -> None: def shutdown(self) -> None:
if self._sync_thread and self._sync_thread.is_alive(): if self._sync_thread and self._sync_thread.is_alive():
@ -331,7 +332,7 @@ class ByteRoverMemoryProvider(MemoryProvider):
def _tool_query(self, args: dict) -> str: def _tool_query(self, args: dict) -> str:
query = args.get("query", "") query = args.get("query", "")
if not query: if not query:
return json.dumps({"error": "query is required"}) return tool_error("query is required")
result = _run_brv( result = _run_brv(
["query", "--", query.strip()[:5000]], ["query", "--", query.strip()[:5000]],
@ -339,7 +340,7 @@ class ByteRoverMemoryProvider(MemoryProvider):
) )
if not result["success"]: if not result["success"]:
return json.dumps({"error": result.get("error", "Query failed")}) return tool_error(result.get("error", "Query failed"))
output = result.get("output", "").strip() output = result.get("output", "").strip()
if not output or len(output) < _MIN_OUTPUT_LEN: if not output or len(output) < _MIN_OUTPUT_LEN:
@ -354,7 +355,7 @@ class ByteRoverMemoryProvider(MemoryProvider):
def _tool_curate(self, args: dict) -> str: def _tool_curate(self, args: dict) -> str:
content = args.get("content", "") content = args.get("content", "")
if not content: if not content:
return json.dumps({"error": "content is required"}) return tool_error("content is required")
result = _run_brv( result = _run_brv(
["curate", "--", content], ["curate", "--", content],
@ -362,14 +363,14 @@ class ByteRoverMemoryProvider(MemoryProvider):
) )
if not result["success"]: if not result["success"]:
return json.dumps({"error": result.get("error", "Curate failed")}) return tool_error(result.get("error", "Curate failed"))
return json.dumps({"result": "Memory curated successfully."}) return json.dumps({"result": "Memory curated successfully."})
def _tool_status(self) -> str: def _tool_status(self) -> str:
result = _run_brv(["status"], timeout=15, cwd=self._cwd) result = _run_brv(["status"], timeout=15, cwd=self._cwd)
if not result["success"]: if not result["success"]:
return json.dumps({"error": result.get("error", "Status check failed")}) return tool_error(result.get("error", "Status check failed"))
return json.dumps({"status": result.get("output", "")}) return json.dumps({"status": result.get("output", "")})

View file

@ -26,6 +26,7 @@ import threading
from typing import Any, Dict, List from typing import Any, Dict, List
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -433,12 +434,12 @@ class HindsightMemoryProvider(MemoryProvider):
client = self._get_client() client = self._get_client()
except Exception as e: except Exception as e:
logger.warning("Hindsight client init failed: %s", e) logger.warning("Hindsight client init failed: %s", e)
return json.dumps({"error": f"Hindsight client unavailable: {e}"}) return tool_error(f"Hindsight client unavailable: {e}")
if tool_name == "hindsight_retain": if tool_name == "hindsight_retain":
content = args.get("content", "") content = args.get("content", "")
if not content: if not content:
return json.dumps({"error": "Missing required parameter: content"}) return tool_error("Missing required parameter: content")
context = args.get("context") context = args.get("context")
try: try:
_run_sync(client.aretain( _run_sync(client.aretain(
@ -447,12 +448,12 @@ class HindsightMemoryProvider(MemoryProvider):
return json.dumps({"result": "Memory stored successfully."}) return json.dumps({"result": "Memory stored successfully."})
except Exception as e: except Exception as e:
logger.warning("hindsight_retain failed: %s", e) logger.warning("hindsight_retain failed: %s", e)
return json.dumps({"error": f"Failed to store memory: {e}"}) return tool_error(f"Failed to store memory: {e}")
elif tool_name == "hindsight_recall": elif tool_name == "hindsight_recall":
query = args.get("query", "") query = args.get("query", "")
if not query: if not query:
return json.dumps({"error": "Missing required parameter: query"}) return tool_error("Missing required parameter: query")
try: try:
resp = _run_sync(client.arecall( resp = _run_sync(client.arecall(
bank_id=self._bank_id, query=query, budget=self._budget bank_id=self._bank_id, query=query, budget=self._budget
@ -463,12 +464,12 @@ class HindsightMemoryProvider(MemoryProvider):
return json.dumps({"result": "\n".join(lines)}) return json.dumps({"result": "\n".join(lines)})
except Exception as e: except Exception as e:
logger.warning("hindsight_recall failed: %s", e) logger.warning("hindsight_recall failed: %s", e)
return json.dumps({"error": f"Failed to search memory: {e}"}) return tool_error(f"Failed to search memory: {e}")
elif tool_name == "hindsight_reflect": elif tool_name == "hindsight_reflect":
query = args.get("query", "") query = args.get("query", "")
if not query: if not query:
return json.dumps({"error": "Missing required parameter: query"}) return tool_error("Missing required parameter: query")
try: try:
resp = _run_sync(client.areflect( resp = _run_sync(client.areflect(
bank_id=self._bank_id, query=query, budget=self._budget bank_id=self._bank_id, query=query, budget=self._budget
@ -476,9 +477,9 @@ class HindsightMemoryProvider(MemoryProvider):
return json.dumps({"result": resp.text or "No relevant memories found."}) return json.dumps({"result": resp.text or "No relevant memories found."})
except Exception as e: except Exception as e:
logger.warning("hindsight_reflect failed: %s", e) logger.warning("hindsight_reflect failed: %s", e)
return json.dumps({"error": f"Failed to reflect: {e}"}) return tool_error(f"Failed to reflect: {e}")
return json.dumps({"error": f"Unknown tool: {tool_name}"}) return tool_error(f"Unknown tool: {tool_name}")
def shutdown(self) -> None: def shutdown(self) -> None:
global _loop, _loop_thread global _loop, _loop_thread

View file

@ -23,6 +23,7 @@ import re
from typing import Any, Dict, List from typing import Any, Dict, List
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
from .store import MemoryStore from .store import MemoryStore
from .retrieval import FactRetriever from .retrieval import FactRetriever
@ -230,7 +231,7 @@ class HolographicMemoryProvider(MemoryProvider):
return self._handle_fact_store(args) return self._handle_fact_store(args)
elif tool_name == "fact_feedback": elif tool_name == "fact_feedback":
return self._handle_fact_feedback(args) return self._handle_fact_feedback(args)
return json.dumps({"error": f"Unknown tool: {tool_name}"}) return tool_error(f"Unknown tool: {tool_name}")
def on_session_end(self, messages: List[Dict[str, Any]]) -> None: def on_session_end(self, messages: List[Dict[str, Any]]) -> None:
if not self._config.get("auto_extract", False): if not self._config.get("auto_extract", False):
@ -296,7 +297,7 @@ class HolographicMemoryProvider(MemoryProvider):
elif action == "reason": elif action == "reason":
entities = args.get("entities", []) entities = args.get("entities", [])
if not entities: if not entities:
return json.dumps({"error": "reason requires 'entities' list"}) return tool_error("reason requires 'entities' list")
results = retriever.reason( results = retriever.reason(
entities, entities,
category=args.get("category"), category=args.get("category"),
@ -334,12 +335,12 @@ class HolographicMemoryProvider(MemoryProvider):
return json.dumps({"facts": facts, "count": len(facts)}) return json.dumps({"facts": facts, "count": len(facts)})
else: else:
return json.dumps({"error": f"Unknown action: {action}"}) return tool_error(f"Unknown action: {action}")
except KeyError as exc: except KeyError as exc:
return json.dumps({"error": f"Missing required argument: {exc}"}) return tool_error(f"Missing required argument: {exc}")
except Exception as exc: except Exception as exc:
return json.dumps({"error": str(exc)}) return tool_error(str(exc))
def _handle_fact_feedback(self, args: dict) -> str: def _handle_fact_feedback(self, args: dict) -> str:
try: try:
@ -348,9 +349,9 @@ class HolographicMemoryProvider(MemoryProvider):
result = self._store.record_feedback(fact_id, helpful=helpful) result = self._store.record_feedback(fact_id, helpful=helpful)
return json.dumps(result) return json.dumps(result)
except KeyError as exc: except KeyError as exc:
return json.dumps({"error": f"Missing required argument: {exc}"}) return tool_error(f"Missing required argument: {exc}")
except Exception as exc: except Exception as exc:
return json.dumps({"error": str(exc)}) return tool_error(str(exc))
# -- Auto-extraction (on_session_end) ------------------------------------ # -- Auto-extraction (on_session_end) ------------------------------------

View file

@ -21,6 +21,7 @@ import threading
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -638,15 +639,15 @@ class HonchoMemoryProvider(MemoryProvider):
def handle_tool_call(self, tool_name: str, args: dict, **kwargs) -> str: def handle_tool_call(self, tool_name: str, args: dict, **kwargs) -> str:
"""Handle a Honcho tool call, with lazy session init for tools-only mode.""" """Handle a Honcho tool call, with lazy session init for tools-only mode."""
if self._cron_skipped: if self._cron_skipped:
return json.dumps({"error": "Honcho is not active (cron context)."}) return tool_error("Honcho is not active (cron context).")
# Port #1957: ensure session is initialized for tools-only mode # Port #1957: ensure session is initialized for tools-only mode
if not self._session_initialized: if not self._session_initialized:
if not self._ensure_session(): if not self._ensure_session():
return json.dumps({"error": "Honcho session could not be initialized."}) return tool_error("Honcho session could not be initialized.")
if not self._manager or not self._session_key: if not self._manager or not self._session_key:
return json.dumps({"error": "Honcho is not active for this session."}) return tool_error("Honcho is not active for this session.")
try: try:
if tool_name == "honcho_profile": if tool_name == "honcho_profile":
@ -658,7 +659,7 @@ class HonchoMemoryProvider(MemoryProvider):
elif tool_name == "honcho_search": elif tool_name == "honcho_search":
query = args.get("query", "") query = args.get("query", "")
if not query: if not query:
return json.dumps({"error": "Missing required parameter: query"}) return tool_error("Missing required parameter: query")
max_tokens = min(int(args.get("max_tokens", 800)), 2000) max_tokens = min(int(args.get("max_tokens", 800)), 2000)
result = self._manager.search_context( result = self._manager.search_context(
self._session_key, query, max_tokens=max_tokens self._session_key, query, max_tokens=max_tokens
@ -670,7 +671,7 @@ class HonchoMemoryProvider(MemoryProvider):
elif tool_name == "honcho_context": elif tool_name == "honcho_context":
query = args.get("query", "") query = args.get("query", "")
if not query: if not query:
return json.dumps({"error": "Missing required parameter: query"}) return tool_error("Missing required parameter: query")
peer = args.get("peer", "user") peer = args.get("peer", "user")
result = self._manager.dialectic_query( result = self._manager.dialectic_query(
self._session_key, query, peer=peer self._session_key, query, peer=peer
@ -680,17 +681,17 @@ class HonchoMemoryProvider(MemoryProvider):
elif tool_name == "honcho_conclude": elif tool_name == "honcho_conclude":
conclusion = args.get("conclusion", "") conclusion = args.get("conclusion", "")
if not conclusion: if not conclusion:
return json.dumps({"error": "Missing required parameter: conclusion"}) return tool_error("Missing required parameter: conclusion")
ok = self._manager.create_conclusion(self._session_key, conclusion) ok = self._manager.create_conclusion(self._session_key, conclusion)
if ok: if ok:
return json.dumps({"result": f"Conclusion saved: {conclusion}"}) return json.dumps({"result": f"Conclusion saved: {conclusion}"})
return json.dumps({"error": "Failed to save conclusion."}) return tool_error("Failed to save conclusion.")
return json.dumps({"error": f"Unknown tool: {tool_name}"}) return tool_error(f"Unknown tool: {tool_name}")
except Exception as e: except Exception as e:
logger.error("Honcho tool %s failed: %s", tool_name, e) logger.error("Honcho tool %s failed: %s", tool_name, e)
return json.dumps({"error": f"Honcho {tool_name} failed: {e}"}) return tool_error(f"Honcho {tool_name} failed: {e}")
def shutdown(self) -> None: def shutdown(self) -> None:
for t in (self._prefetch_thread, self._sync_thread): for t in (self._prefetch_thread, self._sync_thread):

View file

@ -23,6 +23,7 @@ import time
from typing import Any, Dict, List from typing import Any, Dict, List
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -305,7 +306,7 @@ class Mem0MemoryProvider(MemoryProvider):
try: try:
client = self._get_client() client = self._get_client()
except Exception as e: except Exception as e:
return json.dumps({"error": str(e)}) return tool_error(str(e))
if tool_name == "mem0_profile": if tool_name == "mem0_profile":
try: try:
@ -317,12 +318,12 @@ class Mem0MemoryProvider(MemoryProvider):
return json.dumps({"result": "\n".join(lines), "count": len(lines)}) return json.dumps({"result": "\n".join(lines), "count": len(lines)})
except Exception as e: except Exception as e:
self._record_failure() self._record_failure()
return json.dumps({"error": f"Failed to fetch profile: {e}"}) return tool_error(f"Failed to fetch profile: {e}")
elif tool_name == "mem0_search": elif tool_name == "mem0_search":
query = args.get("query", "") query = args.get("query", "")
if not query: if not query:
return json.dumps({"error": "Missing required parameter: query"}) return tool_error("Missing required parameter: query")
rerank = args.get("rerank", False) rerank = args.get("rerank", False)
top_k = min(int(args.get("top_k", 10)), 50) top_k = min(int(args.get("top_k", 10)), 50)
try: try:
@ -339,12 +340,12 @@ class Mem0MemoryProvider(MemoryProvider):
return json.dumps({"results": items, "count": len(items)}) return json.dumps({"results": items, "count": len(items)})
except Exception as e: except Exception as e:
self._record_failure() self._record_failure()
return json.dumps({"error": f"Search failed: {e}"}) return tool_error(f"Search failed: {e}")
elif tool_name == "mem0_conclude": elif tool_name == "mem0_conclude":
conclusion = args.get("conclusion", "") conclusion = args.get("conclusion", "")
if not conclusion: if not conclusion:
return json.dumps({"error": "Missing required parameter: conclusion"}) return tool_error("Missing required parameter: conclusion")
try: try:
client.add( client.add(
[{"role": "user", "content": conclusion}], [{"role": "user", "content": conclusion}],
@ -355,9 +356,9 @@ class Mem0MemoryProvider(MemoryProvider):
return json.dumps({"result": "Fact stored."}) return json.dumps({"result": "Fact stored."})
except Exception as e: except Exception as e:
self._record_failure() self._record_failure()
return json.dumps({"error": f"Failed to store: {e}"}) return tool_error(f"Failed to store: {e}")
return json.dumps({"error": f"Unknown tool: {tool_name}"}) return tool_error(f"Unknown tool: {tool_name}")
def shutdown(self) -> None: def shutdown(self) -> None:
for t in (self._prefetch_thread, self._sync_thread): for t in (self._prefetch_thread, self._sync_thread):

View file

@ -31,6 +31,7 @@ import threading
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -461,7 +462,7 @@ class OpenVikingMemoryProvider(MemoryProvider):
def handle_tool_call(self, tool_name: str, args: dict, **kwargs) -> str: def handle_tool_call(self, tool_name: str, args: dict, **kwargs) -> str:
if not self._client: if not self._client:
return json.dumps({"error": "OpenViking server not connected"}) return tool_error("OpenViking server not connected")
try: try:
if tool_name == "viking_search": if tool_name == "viking_search":
@ -474,9 +475,9 @@ class OpenVikingMemoryProvider(MemoryProvider):
return self._tool_remember(args) return self._tool_remember(args)
elif tool_name == "viking_add_resource": elif tool_name == "viking_add_resource":
return self._tool_add_resource(args) return self._tool_add_resource(args)
return json.dumps({"error": f"Unknown tool: {tool_name}"}) return tool_error(f"Unknown tool: {tool_name}")
except Exception as e: except Exception as e:
return json.dumps({"error": str(e)}) return tool_error(str(e))
def shutdown(self) -> None: def shutdown(self) -> None:
# Wait for background threads to finish # Wait for background threads to finish
@ -493,7 +494,7 @@ class OpenVikingMemoryProvider(MemoryProvider):
def _tool_search(self, args: dict) -> str: def _tool_search(self, args: dict) -> str:
query = args.get("query", "") query = args.get("query", "")
if not query: if not query:
return json.dumps({"error": "query is required"}) return tool_error("query is required")
payload: Dict[str, Any] = {"query": query} payload: Dict[str, Any] = {"query": query}
mode = args.get("mode", "auto") mode = args.get("mode", "auto")
@ -530,7 +531,7 @@ class OpenVikingMemoryProvider(MemoryProvider):
def _tool_read(self, args: dict) -> str: def _tool_read(self, args: dict) -> str:
uri = args.get("uri", "") uri = args.get("uri", "")
if not uri: if not uri:
return json.dumps({"error": "uri is required"}) return tool_error("uri is required")
level = args.get("level", "overview") level = args.get("level", "overview")
# Map our level names to OpenViking GET endpoints # Map our level names to OpenViking GET endpoints
@ -582,7 +583,7 @@ class OpenVikingMemoryProvider(MemoryProvider):
def _tool_remember(self, args: dict) -> str: def _tool_remember(self, args: dict) -> str:
content = args.get("content", "") content = args.get("content", "")
if not content: if not content:
return json.dumps({"error": "content is required"}) return tool_error("content is required")
# Store as a session message that will be extracted during commit. # Store as a session message that will be extracted during commit.
# The category hint helps OpenViking's extraction classify correctly. # The category hint helps OpenViking's extraction classify correctly.
@ -606,7 +607,7 @@ class OpenVikingMemoryProvider(MemoryProvider):
def _tool_add_resource(self, args: dict) -> str: def _tool_add_resource(self, args: dict) -> str:
url = args.get("url", "") url = args.get("url", "")
if not url: if not url:
return json.dumps({"error": "url is required"}) return tool_error("url is required")
payload: Dict[str, Any] = {"path": url} payload: Dict[str, Any] = {"path": url}
if args.get("reason"): if args.get("reason"):

View file

@ -34,6 +34,7 @@ from typing import Any, Dict, List
from urllib.parse import quote from urllib.parse import quote
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -649,11 +650,11 @@ class RetainDBMemoryProvider(MemoryProvider):
def handle_tool_call(self, tool_name: str, args: dict, **kwargs) -> str: def handle_tool_call(self, tool_name: str, args: dict, **kwargs) -> str:
if not self._client: if not self._client:
return json.dumps({"error": "RetainDB not initialized"}) return tool_error("RetainDB not initialized")
try: try:
return json.dumps(self._dispatch(tool_name, args)) return json.dumps(self._dispatch(tool_name, args))
except Exception as exc: except Exception as exc:
return json.dumps({"error": str(exc)}) return tool_error(str(exc))
def _dispatch(self, tool_name: str, args: dict) -> Any: def _dispatch(self, tool_name: str, args: dict) -> Any:
c = self._client c = self._client

View file

@ -18,6 +18,7 @@ from pathlib import Path
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from agent.memory_provider import MemoryProvider from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -587,7 +588,7 @@ class SupermemoryMemoryProvider(MemoryProvider):
def _tool_store(self, args: dict) -> str: def _tool_store(self, args: dict) -> str:
content = str(args.get("content") or "").strip() content = str(args.get("content") or "").strip()
if not content: if not content:
return json.dumps({"error": "content is required"}) return tool_error("content is required")
metadata = args.get("metadata") or {} metadata = args.get("metadata") or {}
if not isinstance(metadata, dict): if not isinstance(metadata, dict):
metadata = {} metadata = {}
@ -598,12 +599,12 @@ class SupermemoryMemoryProvider(MemoryProvider):
preview = content[:80] + ("..." if len(content) > 80 else "") preview = content[:80] + ("..." if len(content) > 80 else "")
return json.dumps({"saved": True, "id": result.get("id", ""), "preview": preview}) return json.dumps({"saved": True, "id": result.get("id", ""), "preview": preview})
except Exception as exc: except Exception as exc:
return json.dumps({"error": f"Failed to store memory: {exc}"}) return tool_error(f"Failed to store memory: {exc}")
def _tool_search(self, args: dict) -> str: def _tool_search(self, args: dict) -> str:
query = str(args.get("query") or "").strip() query = str(args.get("query") or "").strip()
if not query: if not query:
return json.dumps({"error": "query is required"}) return tool_error("query is required")
try: try:
limit = max(1, min(20, int(args.get("limit", 5) or 5))) limit = max(1, min(20, int(args.get("limit", 5) or 5)))
except Exception: except Exception:
@ -621,20 +622,20 @@ class SupermemoryMemoryProvider(MemoryProvider):
formatted.append(entry) formatted.append(entry)
return json.dumps({"results": formatted, "count": len(formatted)}) return json.dumps({"results": formatted, "count": len(formatted)})
except Exception as exc: except Exception as exc:
return json.dumps({"error": f"Search failed: {exc}"}) return tool_error(f"Search failed: {exc}")
def _tool_forget(self, args: dict) -> str: def _tool_forget(self, args: dict) -> str:
memory_id = str(args.get("id") or "").strip() memory_id = str(args.get("id") or "").strip()
query = str(args.get("query") or "").strip() query = str(args.get("query") or "").strip()
if not memory_id and not query: if not memory_id and not query:
return json.dumps({"error": "Provide either id or query"}) return tool_error("Provide either id or query")
try: try:
if memory_id: if memory_id:
self._client.forget_memory(memory_id) self._client.forget_memory(memory_id)
return json.dumps({"forgotten": True, "id": memory_id}) return json.dumps({"forgotten": True, "id": memory_id})
return json.dumps(self._client.forget_by_query(query)) return json.dumps(self._client.forget_by_query(query))
except Exception as exc: except Exception as exc:
return json.dumps({"error": f"Forget failed: {exc}"}) return tool_error(f"Forget failed: {exc}")
def _tool_profile(self, args: dict) -> str: def _tool_profile(self, args: dict) -> str:
query = str(args.get("query") or "").strip() or None query = str(args.get("query") or "").strip() or None
@ -651,11 +652,11 @@ class SupermemoryMemoryProvider(MemoryProvider):
"dynamic_count": len(profile["dynamic"]), "dynamic_count": len(profile["dynamic"]),
}) })
except Exception as exc: except Exception as exc:
return json.dumps({"error": f"Profile failed: {exc}"}) return tool_error(f"Profile failed: {exc}")
def handle_tool_call(self, tool_name: str, args: Dict[str, Any], **kwargs) -> str: def handle_tool_call(self, tool_name: str, args: Dict[str, Any], **kwargs) -> str:
if not self._active or not self._client: if not self._active or not self._client:
return json.dumps({"error": "Supermemory is not configured"}) return tool_error("Supermemory is not configured")
if tool_name == "supermemory_store": if tool_name == "supermemory_store":
return self._tool_store(args) return self._tool_store(args)
if tool_name == "supermemory_search": if tool_name == "supermemory_search":
@ -664,7 +665,7 @@ class SupermemoryMemoryProvider(MemoryProvider):
return self._tool_forget(args) return self._tool_forget(args)
if tool_name == "supermemory_profile": if tool_name == "supermemory_profile":
return self._tool_profile(args) return self._tool_profile(args)
return json.dumps({"error": f"Unknown tool: {tool_name}"}) return tool_error(f"Unknown tool: {tool_name}")
def register(ctx): def register(ctx):

View file

@ -259,7 +259,7 @@ def camofox_navigate(url: str, task_id: Optional[str] = None) -> str:
return json.dumps(result) return json.dumps(result)
except requests.HTTPError as e: except requests.HTTPError as e:
return json.dumps({"success": False, "error": f"Navigation failed: {e}"}) return tool_error(f"Navigation failed: {e}", success=False)
except requests.ConnectionError: except requests.ConnectionError:
return json.dumps({ return json.dumps({
"success": False, "success": False,
@ -268,7 +268,7 @@ def camofox_navigate(url: str, task_id: Optional[str] = None) -> str:
"or: docker run -p 9377:9377 -e CAMOFOX_PORT=9377 jo-inc/camofox-browser", "or: docker run -p 9377:9377 -e CAMOFOX_PORT=9377 jo-inc/camofox-browser",
}) })
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_snapshot(full: bool = False, task_id: Optional[str] = None, def camofox_snapshot(full: bool = False, task_id: Optional[str] = None,
@ -277,7 +277,7 @@ def camofox_snapshot(full: bool = False, task_id: Optional[str] = None,
try: try:
session = _get_session(task_id) session = _get_session(task_id)
if not session["tab_id"]: if not session["tab_id"]:
return json.dumps({"success": False, "error": "No browser session. Call browser_navigate first."}) return tool_error("No browser session. Call browser_navigate first.", success=False)
data = _get( data = _get(
f"/tabs/{session['tab_id']}/snapshot", f"/tabs/{session['tab_id']}/snapshot",
@ -306,7 +306,7 @@ def camofox_snapshot(full: bool = False, task_id: Optional[str] = None,
"element_count": refs_count, "element_count": refs_count,
}) })
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_click(ref: str, task_id: Optional[str] = None) -> str: def camofox_click(ref: str, task_id: Optional[str] = None) -> str:
@ -314,7 +314,7 @@ def camofox_click(ref: str, task_id: Optional[str] = None) -> str:
try: try:
session = _get_session(task_id) session = _get_session(task_id)
if not session["tab_id"]: if not session["tab_id"]:
return json.dumps({"success": False, "error": "No browser session. Call browser_navigate first."}) return tool_error("No browser session. Call browser_navigate first.", success=False)
# Strip @ prefix if present (our tool convention) # Strip @ prefix if present (our tool convention)
clean_ref = ref.lstrip("@") clean_ref = ref.lstrip("@")
@ -329,7 +329,7 @@ def camofox_click(ref: str, task_id: Optional[str] = None) -> str:
"url": data.get("url", ""), "url": data.get("url", ""),
}) })
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_type(ref: str, text: str, task_id: Optional[str] = None) -> str: def camofox_type(ref: str, text: str, task_id: Optional[str] = None) -> str:
@ -337,7 +337,7 @@ def camofox_type(ref: str, text: str, task_id: Optional[str] = None) -> str:
try: try:
session = _get_session(task_id) session = _get_session(task_id)
if not session["tab_id"]: if not session["tab_id"]:
return json.dumps({"success": False, "error": "No browser session. Call browser_navigate first."}) return tool_error("No browser session. Call browser_navigate first.", success=False)
clean_ref = ref.lstrip("@") clean_ref = ref.lstrip("@")
@ -351,7 +351,7 @@ def camofox_type(ref: str, text: str, task_id: Optional[str] = None) -> str:
"element": clean_ref, "element": clean_ref,
}) })
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_scroll(direction: str, task_id: Optional[str] = None) -> str: def camofox_scroll(direction: str, task_id: Optional[str] = None) -> str:
@ -359,7 +359,7 @@ def camofox_scroll(direction: str, task_id: Optional[str] = None) -> str:
try: try:
session = _get_session(task_id) session = _get_session(task_id)
if not session["tab_id"]: if not session["tab_id"]:
return json.dumps({"success": False, "error": "No browser session. Call browser_navigate first."}) return tool_error("No browser session. Call browser_navigate first.", success=False)
_post( _post(
f"/tabs/{session['tab_id']}/scroll", f"/tabs/{session['tab_id']}/scroll",
@ -367,7 +367,7 @@ def camofox_scroll(direction: str, task_id: Optional[str] = None) -> str:
) )
return json.dumps({"success": True, "scrolled": direction}) return json.dumps({"success": True, "scrolled": direction})
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_back(task_id: Optional[str] = None) -> str: def camofox_back(task_id: Optional[str] = None) -> str:
@ -375,7 +375,7 @@ def camofox_back(task_id: Optional[str] = None) -> str:
try: try:
session = _get_session(task_id) session = _get_session(task_id)
if not session["tab_id"]: if not session["tab_id"]:
return json.dumps({"success": False, "error": "No browser session. Call browser_navigate first."}) return tool_error("No browser session. Call browser_navigate first.", success=False)
data = _post( data = _post(
f"/tabs/{session['tab_id']}/back", f"/tabs/{session['tab_id']}/back",
@ -383,7 +383,7 @@ def camofox_back(task_id: Optional[str] = None) -> str:
) )
return json.dumps({"success": True, "url": data.get("url", "")}) return json.dumps({"success": True, "url": data.get("url", "")})
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_press(key: str, task_id: Optional[str] = None) -> str: def camofox_press(key: str, task_id: Optional[str] = None) -> str:
@ -391,7 +391,7 @@ def camofox_press(key: str, task_id: Optional[str] = None) -> str:
try: try:
session = _get_session(task_id) session = _get_session(task_id)
if not session["tab_id"]: if not session["tab_id"]:
return json.dumps({"success": False, "error": "No browser session. Call browser_navigate first."}) return tool_error("No browser session. Call browser_navigate first.", success=False)
_post( _post(
f"/tabs/{session['tab_id']}/press", f"/tabs/{session['tab_id']}/press",
@ -399,7 +399,7 @@ def camofox_press(key: str, task_id: Optional[str] = None) -> str:
) )
return json.dumps({"success": True, "pressed": key}) return json.dumps({"success": True, "pressed": key})
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_close(task_id: Optional[str] = None) -> str: def camofox_close(task_id: Optional[str] = None) -> str:
@ -426,7 +426,7 @@ def camofox_get_images(task_id: Optional[str] = None) -> str:
try: try:
session = _get_session(task_id) session = _get_session(task_id)
if not session["tab_id"]: if not session["tab_id"]:
return json.dumps({"success": False, "error": "No browser session. Call browser_navigate first."}) return tool_error("No browser session. Call browser_navigate first.", success=False)
import re import re
@ -461,7 +461,7 @@ def camofox_get_images(task_id: Optional[str] = None) -> str:
"count": len(images), "count": len(images),
}) })
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_vision(question: str, annotate: bool = False, def camofox_vision(question: str, annotate: bool = False,
@ -470,7 +470,7 @@ def camofox_vision(question: str, annotate: bool = False,
try: try:
session = _get_session(task_id) session = _get_session(task_id)
if not session["tab_id"]: if not session["tab_id"]:
return json.dumps({"success": False, "error": "No browser session. Call browser_navigate first."}) return tool_error("No browser session. Call browser_navigate first.", success=False)
# Get screenshot as binary PNG # Get screenshot as binary PNG
resp = _get_raw( resp = _get_raw(
@ -551,7 +551,7 @@ def camofox_vision(question: str, annotate: bool = False,
"screenshot_path": screenshot_path, "screenshot_path": screenshot_path,
}) })
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}) return tool_error(str(e), success=False)
def camofox_console(clear: bool = False, task_id: Optional[str] = None) -> str: def camofox_console(clear: bool = False, task_id: Optional[str] = None) -> str:

View file

@ -1618,7 +1618,7 @@ def _camofox_eval(expression: str, task_id: Optional[str] = None) -> str:
"error": "JavaScript evaluation is not supported by this Camofox server. " "error": "JavaScript evaluation is not supported by this Camofox server. "
"Use browser_snapshot or browser_vision to inspect page state.", "Use browser_snapshot or browser_vision to inspect page state.",
}) })
return json.dumps({"success": False, "error": error_msg}) return tool_error(error_msg, success=False)
def _maybe_start_recording(task_id: str): def _maybe_start_recording(task_id: str):
@ -2102,7 +2102,7 @@ if __name__ == "__main__":
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Registry # Registry
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from tools.registry import registry from tools.registry import registry, tool_error
_BROWSER_SCHEMA_MAP = {s["name"]: s for s in BROWSER_TOOL_SCHEMAS} _BROWSER_SCHEMA_MAP = {s["name"]: s for s in BROWSER_TOOL_SCHEMAS}

View file

@ -40,14 +40,14 @@ def clarify_tool(
JSON string with the user's response. JSON string with the user's response.
""" """
if not question or not question.strip(): if not question or not question.strip():
return json.dumps({"error": "Question text is required."}, ensure_ascii=False) return tool_error("Question text is required.")
question = question.strip() question = question.strip()
# Validate and trim choices # Validate and trim choices
if choices is not None: if choices is not None:
if not isinstance(choices, list): if not isinstance(choices, list):
return json.dumps({"error": "choices must be a list of strings."}, ensure_ascii=False) return tool_error("choices must be a list of strings.")
choices = [str(c).strip() for c in choices if str(c).strip()] choices = [str(c).strip() for c in choices if str(c).strip()]
if len(choices) > MAX_CHOICES: if len(choices) > MAX_CHOICES:
choices = choices[:MAX_CHOICES] choices = choices[:MAX_CHOICES]
@ -126,7 +126,7 @@ CLARIFY_SCHEMA = {
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="clarify", name="clarify",

View file

@ -344,7 +344,7 @@ def _rpc_server_loop(
try: try:
request = json.loads(line.decode()) request = json.loads(line.decode())
except (json.JSONDecodeError, UnicodeDecodeError) as exc: except (json.JSONDecodeError, UnicodeDecodeError) as exc:
resp = json.dumps({"error": f"Invalid RPC request: {exc}"}) resp = tool_error(f"Invalid RPC request: {exc}")
conn.sendall((resp + "\n").encode()) conn.sendall((resp + "\n").encode())
continue continue
@ -396,7 +396,7 @@ def _rpc_server_loop(
devnull.close() devnull.close()
except Exception as exc: except Exception as exc:
logger.error("Tool call failed in sandbox: %s", exc, exc_info=True) logger.error("Tool call failed in sandbox: %s", exc, exc_info=True)
result = json.dumps({"error": str(exc)}) result = tool_error(str(exc))
tool_call_counter[0] += 1 tool_call_counter[0] += 1
call_duration = time.monotonic() - call_start call_duration = time.monotonic() - call_start
@ -648,7 +648,7 @@ def _rpc_poll_loop(
except Exception as exc: except Exception as exc:
logger.error("Tool call failed in remote sandbox: %s", logger.error("Tool call failed in remote sandbox: %s",
exc, exc_info=True) exc, exc_info=True)
tool_result = json.dumps({"error": str(exc)}) tool_result = tool_error(str(exc))
tool_call_counter[0] += 1 tool_call_counter[0] += 1
call_duration = time.monotonic() - call_start call_duration = time.monotonic() - call_start
@ -890,7 +890,7 @@ def execute_code(
}) })
if not code or not code.strip(): if not code or not code.strip():
return json.dumps({"error": "No code provided."}) return tool_error("No code provided.")
# Dispatch: remote backends use file-based RPC, local uses UDS # Dispatch: remote backends use file-based RPC, local uses UDS
from tools.terminal_tool import _get_env_config from tools.terminal_tool import _get_env_config
@ -1331,7 +1331,7 @@ EXECUTE_CODE_SCHEMA = build_execute_code_schema()
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="execute_code", name="execute_code",

View file

@ -231,20 +231,20 @@ def cronjob(
if normalized == "create": if normalized == "create":
if not schedule: if not schedule:
return json.dumps({"success": False, "error": "schedule is required for create"}, indent=2) return tool_error("schedule is required for create", success=False)
canonical_skills = _canonical_skills(skill, skills) canonical_skills = _canonical_skills(skill, skills)
if not prompt and not canonical_skills: if not prompt and not canonical_skills:
return json.dumps({"success": False, "error": "create requires either prompt or at least one skill"}, indent=2) return tool_error("create requires either prompt or at least one skill", success=False)
if prompt: if prompt:
scan_error = _scan_cron_prompt(prompt) scan_error = _scan_cron_prompt(prompt)
if scan_error: if scan_error:
return json.dumps({"success": False, "error": scan_error}, indent=2) return tool_error(scan_error, success=False)
# Validate script path before storing # Validate script path before storing
if script: if script:
script_error = _validate_cron_script_path(script) script_error = _validate_cron_script_path(script)
if script_error: if script_error:
return json.dumps({"success": False, "error": script_error}, indent=2) return tool_error(script_error, success=False)
job = create_job( job = create_job(
prompt=prompt or "", prompt=prompt or "",
@ -281,7 +281,7 @@ def cronjob(
return json.dumps({"success": True, "count": len(jobs), "jobs": jobs}, indent=2) return json.dumps({"success": True, "count": len(jobs), "jobs": jobs}, indent=2)
if not job_id: if not job_id:
return json.dumps({"success": False, "error": f"job_id is required for action '{normalized}'"}, indent=2) return tool_error(f"job_id is required for action '{normalized}'", success=False)
job = get_job(job_id) job = get_job(job_id)
if not job: if not job:
@ -293,7 +293,7 @@ def cronjob(
if normalized == "remove": if normalized == "remove":
removed = remove_job(job_id) removed = remove_job(job_id)
if not removed: if not removed:
return json.dumps({"success": False, "error": f"Failed to remove job '{job_id}'"}, indent=2) return tool_error(f"Failed to remove job '{job_id}'", success=False)
return json.dumps( return json.dumps(
{ {
"success": True, "success": True,
@ -324,7 +324,7 @@ def cronjob(
if prompt is not None: if prompt is not None:
scan_error = _scan_cron_prompt(prompt) scan_error = _scan_cron_prompt(prompt)
if scan_error: if scan_error:
return json.dumps({"success": False, "error": scan_error}, indent=2) return tool_error(scan_error, success=False)
updates["prompt"] = prompt updates["prompt"] = prompt
if name is not None: if name is not None:
updates["name"] = name updates["name"] = name
@ -345,7 +345,7 @@ def cronjob(
if script: if script:
script_error = _validate_cron_script_path(script) script_error = _validate_cron_script_path(script)
if script_error: if script_error:
return json.dumps({"success": False, "error": script_error}, indent=2) return tool_error(script_error, success=False)
updates["script"] = _normalize_optional_job_value(script) if script else None updates["script"] = _normalize_optional_job_value(script) if script else None
if repeat is not None: if repeat is not None:
# Normalize: treat 0 or negative as None (infinite) # Normalize: treat 0 or negative as None (infinite)
@ -361,14 +361,14 @@ def cronjob(
updates["state"] = "scheduled" updates["state"] = "scheduled"
updates["enabled"] = True updates["enabled"] = True
if not updates: if not updates:
return json.dumps({"success": False, "error": "No updates provided."}, indent=2) return tool_error("No updates provided.", success=False)
updated = update_job(job_id, updates) updated = update_job(job_id, updates)
return json.dumps({"success": True, "job": _format_job(updated)}, indent=2) return json.dumps({"success": True, "job": _format_job(updated)}, indent=2)
return json.dumps({"success": False, "error": f"Unknown cron action '{action}'"}, indent=2) return tool_error(f"Unknown cron action '{action}'", success=False)
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}, indent=2) return tool_error(str(e), success=False)
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@ -502,7 +502,7 @@ def check_cronjob_requirements() -> bool:
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="cronjob", name="cronjob",

View file

@ -527,7 +527,7 @@ def delegate_task(
Returns JSON with results array, one entry per task. Returns JSON with results array, one entry per task.
""" """
if parent_agent is None: if parent_agent is None:
return json.dumps({"error": "delegate_task requires a parent agent context."}) return tool_error("delegate_task requires a parent agent context.")
# Depth limit # Depth limit
depth = getattr(parent_agent, '_delegate_depth', 0) depth = getattr(parent_agent, '_delegate_depth', 0)
@ -552,7 +552,7 @@ def delegate_task(
try: try:
creds = _resolve_delegation_credentials(cfg, parent_agent) creds = _resolve_delegation_credentials(cfg, parent_agent)
except ValueError as exc: except ValueError as exc:
return json.dumps({"error": str(exc)}) return tool_error(str(exc))
# Normalize to task list # Normalize to task list
if tasks and isinstance(tasks, list): if tasks and isinstance(tasks, list):
@ -560,15 +560,15 @@ def delegate_task(
elif goal and isinstance(goal, str) and goal.strip(): elif goal and isinstance(goal, str) and goal.strip():
task_list = [{"goal": goal, "context": context, "toolsets": toolsets}] task_list = [{"goal": goal, "context": context, "toolsets": toolsets}]
else: else:
return json.dumps({"error": "Provide either 'goal' (single task) or 'tasks' (batch)."}) return tool_error("Provide either 'goal' (single task) or 'tasks' (batch).")
if not task_list: if not task_list:
return json.dumps({"error": "No tasks provided."}) return tool_error("No tasks provided.")
# Validate each task has a goal # Validate each task has a goal
for i, task in enumerate(task_list): for i, task in enumerate(task_list):
if not task.get("goal", "").strip(): if not task.get("goal", "").strip():
return json.dumps({"error": f"Task {i} is missing a 'goal'."}) return tool_error(f"Task {i} is missing a 'goal'.")
overall_start = time.monotonic() overall_start = time.monotonic()
results = [] results = []
@ -958,7 +958,7 @@ DELEGATE_TASK_SCHEMA = {
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="delegate_task", name="delegate_task",

View file

@ -432,7 +432,7 @@ def read_file_tool(path: str, offset: int = 1, limit: int = 500, task_id: str =
return json.dumps(result_dict, ensure_ascii=False) return json.dumps(result_dict, ensure_ascii=False)
except Exception as e: except Exception as e:
return json.dumps({"error": str(e)}, ensure_ascii=False) return tool_error(str(e))
def get_read_files_summary(task_id: str = "default") -> list: def get_read_files_summary(task_id: str = "default") -> list:
@ -560,7 +560,7 @@ def write_file_tool(path: str, content: str, task_id: str = "default") -> str:
"""Write content to a file.""" """Write content to a file."""
sensitive_err = _check_sensitive_path(path) sensitive_err = _check_sensitive_path(path)
if sensitive_err: if sensitive_err:
return json.dumps({"error": sensitive_err}, ensure_ascii=False) return tool_error(sensitive_err)
try: try:
stale_warning = _check_file_staleness(path, task_id) stale_warning = _check_file_staleness(path, task_id)
file_ops = _get_file_ops(task_id) file_ops = _get_file_ops(task_id)
@ -577,7 +577,7 @@ def write_file_tool(path: str, content: str, task_id: str = "default") -> str:
logger.debug("write_file expected denial: %s: %s", type(e).__name__, e) logger.debug("write_file expected denial: %s: %s", type(e).__name__, e)
else: else:
logger.error("write_file error: %s: %s", type(e).__name__, e, exc_info=True) logger.error("write_file error: %s: %s", type(e).__name__, e, exc_info=True)
return json.dumps({"error": str(e)}, ensure_ascii=False) return tool_error(str(e))
def patch_tool(mode: str = "replace", path: str = None, old_string: str = None, def patch_tool(mode: str = "replace", path: str = None, old_string: str = None,
@ -595,7 +595,7 @@ def patch_tool(mode: str = "replace", path: str = None, old_string: str = None,
for _p in _paths_to_check: for _p in _paths_to_check:
sensitive_err = _check_sensitive_path(_p) sensitive_err = _check_sensitive_path(_p)
if sensitive_err: if sensitive_err:
return json.dumps({"error": sensitive_err}, ensure_ascii=False) return tool_error(sensitive_err)
try: try:
# Check staleness for all files this patch will touch. # Check staleness for all files this patch will touch.
stale_warnings = [] stale_warnings = []
@ -608,16 +608,16 @@ def patch_tool(mode: str = "replace", path: str = None, old_string: str = None,
if mode == "replace": if mode == "replace":
if not path: if not path:
return json.dumps({"error": "path required"}) return tool_error("path required")
if old_string is None or new_string is None: if old_string is None or new_string is None:
return json.dumps({"error": "old_string and new_string required"}) return tool_error("old_string and new_string required")
result = file_ops.patch_replace(path, old_string, new_string, replace_all) result = file_ops.patch_replace(path, old_string, new_string, replace_all)
elif mode == "patch": elif mode == "patch":
if not patch: if not patch:
return json.dumps({"error": "patch content required"}) return tool_error("patch content required")
result = file_ops.patch_v4a(patch) result = file_ops.patch_v4a(patch)
else: else:
return json.dumps({"error": f"Unknown mode: {mode}"}) return tool_error(f"Unknown mode: {mode}")
result_dict = result.to_dict() result_dict = result.to_dict()
if stale_warnings: if stale_warnings:
@ -634,7 +634,7 @@ def patch_tool(mode: str = "replace", path: str = None, old_string: str = None,
result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]" result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]"
return result_json return result_json
except Exception as e: except Exception as e:
return json.dumps({"error": str(e)}, ensure_ascii=False) return tool_error(str(e))
def search_tool(pattern: str, target: str = "content", path: str = ".", def search_tool(pattern: str, target: str = "content", path: str = ".",
@ -702,7 +702,7 @@ def search_tool(pattern: str, target: str = "content", path: str = ".",
result_json += f"\n\n[Hint: Results truncated. Use offset={next_offset} to see more, or narrow with a more specific pattern or file_glob.]" result_json += f"\n\n[Hint: Results truncated. Use offset={next_offset} to see more, or narrow with a more specific pattern or file_glob.]"
return result_json return result_json
except Exception as e: except Exception as e:
return json.dumps({"error": str(e)}, ensure_ascii=False) return tool_error(str(e))
FILE_TOOLS = [ FILE_TOOLS = [
@ -716,7 +716,7 @@ FILE_TOOLS = [
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Schemas + Registry # Schemas + Registry
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from tools.registry import registry from tools.registry import registry, tool_error
def _check_file_reqs(): def _check_file_reqs():

View file

@ -221,22 +221,22 @@ def _handle_list_entities(args: dict, **kw) -> str:
return json.dumps({"result": result}) return json.dumps({"result": result})
except Exception as e: except Exception as e:
logger.error("ha_list_entities error: %s", e) logger.error("ha_list_entities error: %s", e)
return json.dumps({"error": f"Failed to list entities: {e}"}) return tool_error(f"Failed to list entities: {e}")
def _handle_get_state(args: dict, **kw) -> str: def _handle_get_state(args: dict, **kw) -> str:
"""Handler for ha_get_state tool.""" """Handler for ha_get_state tool."""
entity_id = args.get("entity_id", "") entity_id = args.get("entity_id", "")
if not entity_id: if not entity_id:
return json.dumps({"error": "Missing required parameter: entity_id"}) return tool_error("Missing required parameter: entity_id")
if not _ENTITY_ID_RE.match(entity_id): if not _ENTITY_ID_RE.match(entity_id):
return json.dumps({"error": f"Invalid entity_id format: {entity_id}"}) return tool_error(f"Invalid entity_id format: {entity_id}")
try: try:
result = _run_async(_async_get_state(entity_id)) result = _run_async(_async_get_state(entity_id))
return json.dumps({"result": result}) return json.dumps({"result": result})
except Exception as e: except Exception as e:
logger.error("ha_get_state error: %s", e) logger.error("ha_get_state error: %s", e)
return json.dumps({"error": f"Failed to get state for {entity_id}: {e}"}) return tool_error(f"Failed to get state for {entity_id}: {e}")
def _handle_call_service(args: dict, **kw) -> str: def _handle_call_service(args: dict, **kw) -> str:
@ -244,7 +244,7 @@ def _handle_call_service(args: dict, **kw) -> str:
domain = args.get("domain", "") domain = args.get("domain", "")
service = args.get("service", "") service = args.get("service", "")
if not domain or not service: if not domain or not service:
return json.dumps({"error": "Missing required parameters: domain and service"}) return tool_error("Missing required parameters: domain and service")
if domain in _BLOCKED_DOMAINS: if domain in _BLOCKED_DOMAINS:
return json.dumps({ return json.dumps({
@ -254,7 +254,7 @@ def _handle_call_service(args: dict, **kw) -> str:
entity_id = args.get("entity_id") entity_id = args.get("entity_id")
if entity_id and not _ENTITY_ID_RE.match(entity_id): if entity_id and not _ENTITY_ID_RE.match(entity_id):
return json.dumps({"error": f"Invalid entity_id format: {entity_id}"}) return tool_error(f"Invalid entity_id format: {entity_id}")
data = args.get("data") data = args.get("data")
try: try:
@ -262,7 +262,7 @@ def _handle_call_service(args: dict, **kw) -> str:
return json.dumps({"result": result}) return json.dumps({"result": result})
except Exception as e: except Exception as e:
logger.error("ha_call_service error: %s", e) logger.error("ha_call_service error: %s", e)
return json.dumps({"error": f"Failed to call {domain}.{service}: {e}"}) return tool_error(f"Failed to call {domain}.{service}: {e}")
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@ -311,7 +311,7 @@ def _handle_list_services(args: dict, **kw) -> str:
return json.dumps({"result": result}) return json.dumps({"result": result})
except Exception as e: except Exception as e:
logger.error("ha_list_services error: %s", e) logger.error("ha_list_services error: %s", e)
return json.dumps({"error": f"Failed to list services: {e}"}) return tool_error(f"Failed to list services: {e}")
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@ -451,7 +451,7 @@ HA_CALL_SERVICE_SCHEMA = {
# Registration # Registration
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="ha_list_entities", name="ha_list_entities",

View file

@ -652,7 +652,7 @@ if __name__ == "__main__":
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Registry # Registry
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from tools.registry import registry from tools.registry import registry, tool_error
IMAGE_GENERATE_SCHEMA = { IMAGE_GENERATE_SCHEMA = {
"name": "image_generate", "name": "image_generate",
@ -679,7 +679,7 @@ IMAGE_GENERATE_SCHEMA = {
def _handle_image_generate(args, **kw): def _handle_image_generate(args, **kw):
prompt = args.get("prompt", "") prompt = args.get("prompt", "")
if not prompt: if not prompt:
return json.dumps({"error": "prompt is required for image generation"}) return tool_error("prompt is required for image generation")
return image_generate_tool( return image_generate_tool(
prompt=prompt, prompt=prompt,
aspect_ratio=args.get("aspect_ratio", "landscape"), aspect_ratio=args.get("aspect_ratio", "landscape"),

View file

@ -792,7 +792,7 @@ class MCPServerTask:
After the initial ``await`` (list_tools), all mutations are synchronous After the initial ``await`` (list_tools), all mutations are synchronous
atomic from the event loop's perspective. atomic from the event loop's perspective.
""" """
from tools.registry import registry from tools.registry import registry, tool_error
from toolsets import TOOLSETS from toolsets import TOOLSETS
async with self._refresh_lock: async with self._refresh_lock:
@ -1326,7 +1326,7 @@ def _make_read_resource_handler(server_name: str, tool_timeout: float):
uri = args.get("uri") uri = args.get("uri")
if not uri: if not uri:
return json.dumps({"error": "Missing required parameter 'uri'"}) return tool_error("Missing required parameter 'uri'")
async def _call(): async def _call():
result = await server.session.read_resource(uri) result = await server.session.read_resource(uri)
@ -1415,7 +1415,7 @@ def _make_get_prompt_handler(server_name: str, tool_timeout: float):
name = args.get("name") name = args.get("name")
if not name: if not name:
return json.dumps({"error": "Missing required parameter 'name'"}) return tool_error("Missing required parameter 'name'")
arguments = args.get("arguments", {}) arguments = args.get("arguments", {})
async def _call(): async def _call():
@ -1724,7 +1724,7 @@ def _register_server_tools(name: str, server: MCPServerTask, config: dict) -> Li
Returns: Returns:
List of registered prefixed tool names. List of registered prefixed tool names.
""" """
from tools.registry import registry from tools.registry import registry, tool_error
from toolsets import create_custom_toolset, TOOLSETS from toolsets import create_custom_toolset, TOOLSETS
registered_names: List[str] = [] registered_names: List[str] = []

View file

@ -449,30 +449,30 @@ def memory_tool(
Returns JSON string with results. Returns JSON string with results.
""" """
if store is None: if store is None:
return json.dumps({"success": False, "error": "Memory is not available. It may be disabled in config or this environment."}, ensure_ascii=False) return tool_error("Memory is not available. It may be disabled in config or this environment.", success=False)
if target not in ("memory", "user"): if target not in ("memory", "user"):
return json.dumps({"success": False, "error": f"Invalid target '{target}'. Use 'memory' or 'user'."}, ensure_ascii=False) return tool_error(f"Invalid target '{target}'. Use 'memory' or 'user'.", success=False)
if action == "add": if action == "add":
if not content: if not content:
return json.dumps({"success": False, "error": "Content is required for 'add' action."}, ensure_ascii=False) return tool_error("Content is required for 'add' action.", success=False)
result = store.add(target, content) result = store.add(target, content)
elif action == "replace": elif action == "replace":
if not old_text: if not old_text:
return json.dumps({"success": False, "error": "old_text is required for 'replace' action."}, ensure_ascii=False) return tool_error("old_text is required for 'replace' action.", success=False)
if not content: if not content:
return json.dumps({"success": False, "error": "content is required for 'replace' action."}, ensure_ascii=False) return tool_error("content is required for 'replace' action.", success=False)
result = store.replace(target, old_text, content) result = store.replace(target, old_text, content)
elif action == "remove": elif action == "remove":
if not old_text: if not old_text:
return json.dumps({"success": False, "error": "old_text is required for 'remove' action."}, ensure_ascii=False) return tool_error("old_text is required for 'remove' action.", success=False)
result = store.remove(target, old_text) result = store.remove(target, old_text)
else: else:
return json.dumps({"success": False, "error": f"Unknown action '{action}'. Use: add, replace, remove"}, ensure_ascii=False) return tool_error(f"Unknown action '{action}'. Use: add, replace, remove", success=False)
return json.dumps(result, ensure_ascii=False) return json.dumps(result, ensure_ascii=False)
@ -539,7 +539,7 @@ MEMORY_SCHEMA = {
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="memory", name="memory",

View file

@ -821,7 +821,7 @@ process_registry = ProcessRegistry()
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Registry -- the "process" tool schema + handler # Registry -- the "process" tool schema + handler
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from tools.registry import registry from tools.registry import registry, tool_error
PROCESS_SCHEMA = { PROCESS_SCHEMA = {
"name": "process", "name": "process",
@ -879,7 +879,7 @@ def _handle_process(args, **kw):
return _json.dumps({"processes": process_registry.list_sessions(task_id=task_id)}, ensure_ascii=False) return _json.dumps({"processes": process_registry.list_sessions(task_id=task_id)}, ensure_ascii=False)
elif action in ("poll", "log", "wait", "kill", "write", "submit"): elif action in ("poll", "log", "wait", "kill", "write", "submit"):
if not session_id: if not session_id:
return _json.dumps({"error": f"session_id is required for {action}"}, ensure_ascii=False) return tool_error(f"session_id is required for {action}")
if action == "poll": if action == "poll":
return _json.dumps(process_registry.poll(session_id), ensure_ascii=False) return _json.dumps(process_registry.poll(session_id), ensure_ascii=False)
elif action == "log": elif action == "log":
@ -893,7 +893,7 @@ def _handle_process(args, **kw):
return _json.dumps(process_registry.write_stdin(session_id, str(args.get("data", ""))), ensure_ascii=False) return _json.dumps(process_registry.write_stdin(session_id, str(args.get("data", ""))), ensure_ascii=False)
elif action == "submit": elif action == "submit":
return _json.dumps(process_registry.submit_stdin(session_id, str(args.get("data", ""))), ensure_ascii=False) return _json.dumps(process_registry.submit_stdin(session_id, str(args.get("data", ""))), ensure_ascii=False)
return _json.dumps({"error": f"Unknown process action: {action}. Use: list, poll, log, wait, kill, write, submit"}, ensure_ascii=False) return tool_error(f"Unknown process action: {action}. Use: list, poll, log, wait, kill, write, submit")
registry.register( registry.register(

View file

@ -273,3 +273,48 @@ class ToolRegistry:
# Module-level singleton # Module-level singleton
registry = ToolRegistry() registry = ToolRegistry()
# ---------------------------------------------------------------------------
# Helpers for tool response serialization
# ---------------------------------------------------------------------------
# Every tool handler must return a JSON string. These helpers eliminate the
# boilerplate ``json.dumps({"error": msg}, ensure_ascii=False)`` that appears
# hundreds of times across tool files.
#
# Usage:
# from tools.registry import registry, tool_error, tool_result
#
# return tool_error("something went wrong")
# return tool_error("not found", code=404)
# return tool_result(success=True, data=payload)
# return tool_result(items) # pass a dict directly
def tool_error(message, **extra) -> str:
"""Return a JSON error string for tool handlers.
>>> tool_error("file not found")
'{"error": "file not found"}'
>>> tool_error("bad input", success=False)
'{"error": "bad input", "success": false}'
"""
result = {"error": str(message)}
if extra:
result.update(extra)
return json.dumps(result, ensure_ascii=False)
def tool_result(data=None, **kwargs) -> str:
"""Return a JSON result string for tool handlers.
Accepts a dict positional arg *or* keyword arguments (not both):
>>> tool_result(success=True, count=42)
'{"success": true, "count": 42}'
>>> tool_result({"key": "value"})
'{"key": "value"}'
"""
if data is not None:
return json.dumps(data, ensure_ascii=False)
return json.dumps(kwargs, ensure_ascii=False)

View file

@ -101,7 +101,7 @@ def _handle_send(args):
target = args.get("target", "") target = args.get("target", "")
message = args.get("message", "") message = args.get("message", "")
if not target or not message: if not target or not message:
return json.dumps({"error": "Both 'target' and 'message' are required when action='send'"}) return tool_error("Both 'target' and 'message' are required when action='send'")
parts = target.split(":", 1) parts = target.split(":", 1)
platform_name = parts[0].strip().lower() platform_name = parts[0].strip().lower()
@ -134,7 +134,7 @@ def _handle_send(args):
from tools.interrupt import is_interrupted from tools.interrupt import is_interrupted
if is_interrupted(): if is_interrupted():
return json.dumps({"error": "Interrupted"}) return tool_error("Interrupted")
try: try:
from gateway.config import load_gateway_config, Platform from gateway.config import load_gateway_config, Platform
@ -160,11 +160,11 @@ def _handle_send(args):
platform = platform_map.get(platform_name) platform = platform_map.get(platform_name)
if not platform: if not platform:
avail = ", ".join(platform_map.keys()) avail = ", ".join(platform_map.keys())
return json.dumps({"error": f"Unknown platform: {platform_name}. Available: {avail}"}) return tool_error(f"Unknown platform: {platform_name}. Available: {avail}")
pconfig = config.platforms.get(platform) pconfig = config.platforms.get(platform)
if not pconfig or not pconfig.enabled: if not pconfig or not pconfig.enabled:
return json.dumps({"error": f"Platform '{platform_name}' is not configured. Set up credentials in ~/.hermes/config.yaml or environment variables."}) return tool_error(f"Platform '{platform_name}' is not configured. Set up credentials in ~/.hermes/config.yaml or environment variables.")
from gateway.platforms.base import BasePlatformAdapter from gateway.platforms.base import BasePlatformAdapter
@ -940,7 +940,7 @@ def _check_send_message():
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="send_message", name="send_message",

View file

@ -241,7 +241,7 @@ def _list_recent_sessions(db, limit: int, current_session_id: str = None) -> str
}, ensure_ascii=False) }, ensure_ascii=False)
except Exception as e: except Exception as e:
logging.error("Error listing recent sessions: %s", e, exc_info=True) logging.error("Error listing recent sessions: %s", e, exc_info=True)
return json.dumps({"success": False, "error": f"Failed to list recent sessions: {e}"}, ensure_ascii=False) return tool_error(f"Failed to list recent sessions: {e}", success=False)
def session_search( def session_search(
@ -258,7 +258,7 @@ def session_search(
The current session is excluded from results since the agent already has that context. The current session is excluded from results since the agent already has that context.
""" """
if db is None: if db is None:
return json.dumps({"success": False, "error": "Session database not available."}, ensure_ascii=False) return tool_error("Session database not available.", success=False)
limit = min(limit, 5) # Cap at 5 sessions to avoid excessive LLM calls limit = min(limit, 5) # Cap at 5 sessions to avoid excessive LLM calls
@ -427,7 +427,7 @@ def session_search(
except Exception as e: except Exception as e:
logging.error("Session search failed: %s", e, exc_info=True) logging.error("Session search failed: %s", e, exc_info=True)
return json.dumps({"success": False, "error": f"Search failed: {str(e)}"}, ensure_ascii=False) return tool_error(f"Search failed: {str(e)}", success=False)
def check_session_search_requirements() -> bool: def check_session_search_requirements() -> bool:
@ -487,7 +487,7 @@ SESSION_SEARCH_SCHEMA = {
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="session_search", name="session_search",

View file

@ -584,19 +584,19 @@ def skill_manage(
""" """
if action == "create": if action == "create":
if not content: if not content:
return json.dumps({"success": False, "error": "content is required for 'create'. Provide the full SKILL.md text (frontmatter + body)."}, ensure_ascii=False) return tool_error("content is required for 'create'. Provide the full SKILL.md text (frontmatter + body).", success=False)
result = _create_skill(name, content, category) result = _create_skill(name, content, category)
elif action == "edit": elif action == "edit":
if not content: if not content:
return json.dumps({"success": False, "error": "content is required for 'edit'. Provide the full updated SKILL.md text."}, ensure_ascii=False) return tool_error("content is required for 'edit'. Provide the full updated SKILL.md text.", success=False)
result = _edit_skill(name, content) result = _edit_skill(name, content)
elif action == "patch": elif action == "patch":
if not old_string: if not old_string:
return json.dumps({"success": False, "error": "old_string is required for 'patch'. Provide the text to find."}, ensure_ascii=False) return tool_error("old_string is required for 'patch'. Provide the text to find.", success=False)
if new_string is None: if new_string is None:
return json.dumps({"success": False, "error": "new_string is required for 'patch'. Use empty string to delete matched text."}, ensure_ascii=False) return tool_error("new_string is required for 'patch'. Use empty string to delete matched text.", success=False)
result = _patch_skill(name, old_string, new_string, file_path, replace_all) result = _patch_skill(name, old_string, new_string, file_path, replace_all)
elif action == "delete": elif action == "delete":
@ -604,14 +604,14 @@ def skill_manage(
elif action == "write_file": elif action == "write_file":
if not file_path: if not file_path:
return json.dumps({"success": False, "error": "file_path is required for 'write_file'. Example: 'references/api-guide.md'"}, ensure_ascii=False) return tool_error("file_path is required for 'write_file'. Example: 'references/api-guide.md'", success=False)
if file_content is None: if file_content is None:
return json.dumps({"success": False, "error": "file_content is required for 'write_file'."}, ensure_ascii=False) return tool_error("file_content is required for 'write_file'.", success=False)
result = _write_file(name, file_path, file_content) result = _write_file(name, file_path, file_content)
elif action == "remove_file": elif action == "remove_file":
if not file_path: if not file_path:
return json.dumps({"success": False, "error": "file_path is required for 'remove_file'."}, ensure_ascii=False) return tool_error("file_path is required for 'remove_file'.", success=False)
result = _remove_file(name, file_path) result = _remove_file(name, file_path)
else: else:
@ -722,7 +722,7 @@ SKILL_MANAGE_SCHEMA = {
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="skill_manage", name="skill_manage",

View file

@ -76,7 +76,7 @@ from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Dict, Any, List, Optional, Set, Tuple from typing import Dict, Any, List, Optional, Set, Tuple
from tools.registry import registry from tools.registry import registry, tool_error
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -713,7 +713,7 @@ def skills_categories(verbose: bool = False, task_id: str = None) -> str:
) )
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}, ensure_ascii=False) return tool_error(str(e), success=False)
def skills_list(category: str = None, task_id: str = None) -> str: def skills_list(category: str = None, task_id: str = None) -> str:
@ -781,7 +781,7 @@ def skills_list(category: str = None, task_id: str = None) -> str:
) )
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}, ensure_ascii=False) return tool_error(str(e), success=False)
def skill_view(name: str, file_path: str = None, task_id: str = None) -> str: def skill_view(name: str, file_path: str = None, task_id: str = None) -> str:
@ -1255,7 +1255,7 @@ def skill_view(name: str, file_path: str = None, task_id: str = None) -> str:
return json.dumps(result, ensure_ascii=False) return json.dumps(result, ensure_ascii=False)
except Exception as e: except Exception as e:
return json.dumps({"success": False, "error": str(e)}, ensure_ascii=False) return tool_error(str(e), success=False)
# Tool description for model_tools.py # Tool description for model_tools.py

View file

@ -161,7 +161,7 @@ def todo_tool(
JSON string with the full current list and summary metadata. JSON string with the full current list and summary metadata.
""" """
if store is None: if store is None:
return json.dumps({"error": "TodoStore not initialized"}, ensure_ascii=False) return tool_error("TodoStore not initialized")
if todos is not None: if todos is not None:
items = store.write(todos, merge) items = store.write(todos, merge)
@ -255,7 +255,7 @@ TODO_SCHEMA = {
# --- Registry --- # --- Registry ---
from tools.registry import registry from tools.registry import registry, tool_error
registry.register( registry.register(
name="todo", name="todo",

View file

@ -466,7 +466,7 @@ def text_to_speech_tool(
str: JSON result with success, file_path, and optionally MEDIA tag. str: JSON result with success, file_path, and optionally MEDIA tag.
""" """
if not text or not text.strip(): if not text or not text.strip():
return json.dumps({"success": False, "error": "Text is required"}, ensure_ascii=False) return tool_error("Text is required", success=False)
# Truncate very long text with a warning # Truncate very long text with a warning
if len(text) > MAX_TEXT_LENGTH: if len(text) > MAX_TEXT_LENGTH:
@ -607,17 +607,17 @@ def text_to_speech_tool(
# Configuration errors (missing API keys, etc.) # Configuration errors (missing API keys, etc.)
error_msg = f"TTS configuration error ({provider}): {e}" error_msg = f"TTS configuration error ({provider}): {e}"
logger.error("%s", error_msg) logger.error("%s", error_msg)
return json.dumps({"success": False, "error": error_msg}, ensure_ascii=False) return tool_error(error_msg, success=False)
except FileNotFoundError as e: except FileNotFoundError as e:
# Missing dependencies or files # Missing dependencies or files
error_msg = f"TTS dependency missing ({provider}): {e}" error_msg = f"TTS dependency missing ({provider}): {e}"
logger.error("%s", error_msg, exc_info=True) logger.error("%s", error_msg, exc_info=True)
return json.dumps({"success": False, "error": error_msg}, ensure_ascii=False) return tool_error(error_msg, success=False)
except Exception as e: except Exception as e:
# Unexpected errors # Unexpected errors
error_msg = f"TTS generation failed ({provider}): {e}" error_msg = f"TTS generation failed ({provider}): {e}"
logger.error("%s", error_msg, exc_info=True) logger.error("%s", error_msg, exc_info=True)
return json.dumps({"success": False, "error": error_msg}, ensure_ascii=False) return tool_error(error_msg, success=False)
# =========================================================================== # ===========================================================================
@ -950,7 +950,7 @@ if __name__ == "__main__":
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Registry # Registry
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from tools.registry import registry from tools.registry import registry, tool_error
TTS_SCHEMA = { TTS_SCHEMA = {
"name": "text_to_speech", "name": "text_to_speech",

View file

@ -320,7 +320,7 @@ async def vision_analyze_tool(
try: try:
from tools.interrupt import is_interrupted from tools.interrupt import is_interrupted
if is_interrupted(): if is_interrupted():
return json.dumps({"success": False, "error": "Interrupted"}) return tool_error("Interrupted", success=False)
logger.info("Analyzing image: %s", image_url[:60]) logger.info("Analyzing image: %s", image_url[:60])
logger.info("User prompt: %s", user_prompt[:100]) logger.info("User prompt: %s", user_prompt[:100])
@ -570,7 +570,7 @@ if __name__ == "__main__":
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Registry # Registry
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from tools.registry import registry from tools.registry import registry, tool_error
VISION_ANALYZE_SCHEMA = { VISION_ANALYZE_SCHEMA = {
"name": "vision_analyze", "name": "vision_analyze",

View file

@ -1079,7 +1079,7 @@ def web_search_tool(query: str, limit: int = 5) -> str:
try: try:
from tools.interrupt import is_interrupted from tools.interrupt import is_interrupted
if is_interrupted(): if is_interrupted():
return json.dumps({"error": "Interrupted", "success": False}) return tool_error("Interrupted", success=False)
# Dispatch to the configured backend # Dispatch to the configured backend
backend = _get_backend() backend = _get_backend()
@ -1158,7 +1158,7 @@ def web_search_tool(query: str, limit: int = 5) -> str:
_debug.log_call("web_search_tool", debug_call_data) _debug.log_call("web_search_tool", debug_call_data)
_debug.save() _debug.save()
return json.dumps({"error": error_msg}, ensure_ascii=False) return tool_error(error_msg)
async def web_extract_tool( async def web_extract_tool(
@ -1458,7 +1458,7 @@ async def web_extract_tool(
trimmed_response = {"results": trimmed_results} trimmed_response = {"results": trimmed_results}
if trimmed_response.get("results") == []: if trimmed_response.get("results") == []:
result_json = json.dumps({"error": "Content was inaccessible or not found"}, ensure_ascii=False) result_json = tool_error("Content was inaccessible or not found")
cleaned_result = clean_base64_images(result_json) cleaned_result = clean_base64_images(result_json)
@ -1484,7 +1484,7 @@ async def web_extract_tool(
_debug.log_call("web_extract_tool", debug_call_data) _debug.log_call("web_extract_tool", debug_call_data)
_debug.save() _debug.save()
return json.dumps({"error": error_msg}, ensure_ascii=False) return tool_error(error_msg)
async def web_crawl_tool( async def web_crawl_tool(
@ -1560,7 +1560,7 @@ async def web_crawl_tool(
from tools.interrupt import is_interrupted as _is_int from tools.interrupt import is_interrupted as _is_int
if _is_int(): if _is_int():
return json.dumps({"error": "Interrupted", "success": False}) return tool_error("Interrupted", success=False)
logger.info("Tavily crawl: %s", url) logger.info("Tavily crawl: %s", url)
payload: Dict[str, Any] = { payload: Dict[str, Any] = {
@ -1671,7 +1671,7 @@ async def web_crawl_tool(
from tools.interrupt import is_interrupted as _is_int from tools.interrupt import is_interrupted as _is_int
if _is_int(): if _is_int():
return json.dumps({"error": "Interrupted", "success": False}) return tool_error("Interrupted", success=False)
try: try:
crawl_result = _get_firecrawl_client().crawl( crawl_result = _get_firecrawl_client().crawl(
@ -1897,7 +1897,7 @@ async def web_crawl_tool(
_debug.log_call("web_crawl_tool", debug_call_data) _debug.log_call("web_crawl_tool", debug_call_data)
_debug.save() _debug.save()
return json.dumps({"error": error_msg}, ensure_ascii=False) return tool_error(error_msg)
# Convenience function to check Firecrawl credentials # Convenience function to check Firecrawl credentials
@ -2043,7 +2043,7 @@ if __name__ == "__main__":
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Registry # Registry
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from tools.registry import registry from tools.registry import registry, tool_error
WEB_SEARCH_SCHEMA = { WEB_SEARCH_SCHEMA = {
"name": "web_search", "name": "web_search",