mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
fix: sanitize tool schemas for llama.cpp backends; restore MCP in TUI (#15032)
Local llama.cpp servers (e.g. ggml-org/llama.cpp:full-cuda) fail the entire
request with HTTP 400 'Unable to generate parser for this template. ...
Unrecognized schema: "object"' when any tool schema contains shapes its
json-schema-to-grammar converter can't handle:
* 'type': 'object' without 'properties'
* bare string schema values ('additionalProperties: "object"')
* 'type': ['X', 'null'] arrays (nullable form)
Cloud providers accept these silently, so they ship from external MCP
servers (Atlassian, GCloud, Datadog) and from a couple of our own tools.
Changes
- tools/schema_sanitizer.py: walks the finalized tool list right before it
leaves get_tool_definitions() and repairs the hostile shapes in a deep
copy. No-op on well-formed schemas. Recurses into properties, items,
additionalProperties, anyOf/oneOf/allOf, and $defs.
- model_tools.get_tool_definitions(): invoke the sanitizer as the last
step so all paths (built-in, MCP, plugin, dynamically-rebuilt) get
covered uniformly.
- tools/browser_cdp_tool.py, tools/mcp_tool.py: fix our own bare-object
schemas so sanitization isn't load-bearing for in-repo tools.
- tui_gateway/server.py: _load_enabled_toolsets() was passing
include_default_mcp_servers=False at runtime. That's the config-editing
variant (see PR #3252) — it silently drops every default MCP server
from the TUI's enabled_toolsets, which is why the TUI didn't hit the
llama.cpp crash (no MCP tools sent at all). Switch to True so TUI
matches CLI behavior.
Tests
tests/tools/test_schema_sanitizer.py (17 tests) covers the individual
failure modes, well-formed pass-through, deep-copy isolation, and
required-field pruning.
E2E: loaded the default 'hermes-cli' toolset with MCP discovery and
confirmed all 27 resolved tool schemas pass a llama.cpp-compatibility
walk (no 'object' node missing 'properties', no bare-string schema
values).
This commit is contained in:
parent
5dda4cab41
commit
34c3e67109
6 changed files with 413 additions and 1 deletions
|
|
@ -343,6 +343,18 @@ def get_tool_definitions(
|
||||||
global _last_resolved_tool_names
|
global _last_resolved_tool_names
|
||||||
_last_resolved_tool_names = [t["function"]["name"] for t in filtered_tools]
|
_last_resolved_tool_names = [t["function"]["name"] for t in filtered_tools]
|
||||||
|
|
||||||
|
# Sanitize schemas for broad backend compatibility. llama.cpp's
|
||||||
|
# json-schema-to-grammar converter (used by its OAI server to build
|
||||||
|
# GBNF tool-call parsers) rejects some shapes that cloud providers
|
||||||
|
# silently accept — bare "type": "object" with no properties,
|
||||||
|
# string-valued schema nodes from malformed MCP servers, etc. This
|
||||||
|
# is a no-op for schemas that are already well-formed.
|
||||||
|
try:
|
||||||
|
from tools.schema_sanitizer import sanitize_tool_schemas
|
||||||
|
filtered_tools = sanitize_tool_schemas(filtered_tools)
|
||||||
|
except Exception as e: # pragma: no cover — defensive
|
||||||
|
logger.warning("Schema sanitization skipped: %s", e)
|
||||||
|
|
||||||
return filtered_tools
|
return filtered_tools
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
205
tests/tools/test_schema_sanitizer.py
Normal file
205
tests/tools/test_schema_sanitizer.py
Normal file
|
|
@ -0,0 +1,205 @@
|
||||||
|
"""Tests for tools/schema_sanitizer.py.
|
||||||
|
|
||||||
|
Targets the known llama.cpp ``json-schema-to-grammar`` failure modes that
|
||||||
|
cause ``HTTP 400: Unable to generate parser for this template. ...
|
||||||
|
Unrecognized schema: "object"`` errors on local inference backends.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from tools.schema_sanitizer import sanitize_tool_schemas
|
||||||
|
|
||||||
|
|
||||||
|
def _tool(name: str, parameters: dict) -> dict:
|
||||||
|
return {"type": "function", "function": {"name": name, "parameters": parameters}}
|
||||||
|
|
||||||
|
|
||||||
|
def test_object_without_properties_gets_empty_properties():
|
||||||
|
tools = [_tool("t", {"type": "object"})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
assert out[0]["function"]["parameters"] == {"type": "object", "properties": {}}
|
||||||
|
|
||||||
|
|
||||||
|
def test_nested_object_without_properties_gets_empty_properties():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {"type": "string"},
|
||||||
|
"arguments": {"type": "object", "description": "free-form"},
|
||||||
|
},
|
||||||
|
"required": ["name"],
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
args = out[0]["function"]["parameters"]["properties"]["arguments"]
|
||||||
|
assert args["type"] == "object"
|
||||||
|
assert args["properties"] == {}
|
||||||
|
assert args["description"] == "free-form"
|
||||||
|
|
||||||
|
|
||||||
|
def test_bare_string_object_value_replaced_with_schema_dict():
|
||||||
|
# Malformed: a property's schema value is the bare string "object".
|
||||||
|
# This is the exact shape llama.cpp reports as `Unrecognized schema: "object"`.
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"payload": "object", # <-- invalid, should be {"type": "object"}
|
||||||
|
},
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
payload = out[0]["function"]["parameters"]["properties"]["payload"]
|
||||||
|
assert isinstance(payload, dict)
|
||||||
|
assert payload["type"] == "object"
|
||||||
|
assert payload["properties"] == {}
|
||||||
|
|
||||||
|
|
||||||
|
def test_bare_string_primitive_value_replaced_with_schema_dict():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {"name": "string"},
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
assert out[0]["function"]["parameters"]["properties"]["name"] == {"type": "string"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_nullable_type_array_collapsed_to_single_string():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"maybe_name": {"type": ["string", "null"]},
|
||||||
|
},
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
prop = out[0]["function"]["parameters"]["properties"]["maybe_name"]
|
||||||
|
assert prop["type"] == "string"
|
||||||
|
assert prop.get("nullable") is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_anyof_nested_objects_sanitized():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"opt": {
|
||||||
|
"anyOf": [
|
||||||
|
{"type": "object"}, # bare object
|
||||||
|
{"type": "string"},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
variants = out[0]["function"]["parameters"]["properties"]["opt"]["anyOf"]
|
||||||
|
assert variants[0] == {"type": "object", "properties": {}}
|
||||||
|
assert variants[1] == {"type": "string"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_missing_parameters_gets_default_object_schema():
|
||||||
|
tools = [{"type": "function", "function": {"name": "t"}}]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
assert out[0]["function"]["parameters"] == {"type": "object", "properties": {}}
|
||||||
|
|
||||||
|
|
||||||
|
def test_non_dict_parameters_gets_default_object_schema():
|
||||||
|
tools = [_tool("t", "object")] # pathological
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
assert out[0]["function"]["parameters"] == {"type": "object", "properties": {}}
|
||||||
|
|
||||||
|
|
||||||
|
def test_required_pruned_to_existing_properties():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {"name": {"type": "string"}},
|
||||||
|
"required": ["name", "missing_field"],
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
assert out[0]["function"]["parameters"]["required"] == ["name"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_required_all_missing_is_dropped():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {},
|
||||||
|
"required": ["x", "y"],
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
assert "required" not in out[0]["function"]["parameters"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_well_formed_schema_unchanged():
|
||||||
|
schema = {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"path": {"type": "string", "description": "File path"},
|
||||||
|
"offset": {"type": "integer", "minimum": 1},
|
||||||
|
},
|
||||||
|
"required": ["path"],
|
||||||
|
}
|
||||||
|
tools = [_tool("read_file", copy.deepcopy(schema))]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
assert out[0]["function"]["parameters"] == schema
|
||||||
|
|
||||||
|
|
||||||
|
def test_additional_properties_bool_preserved():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"payload": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {},
|
||||||
|
"additionalProperties": True,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
payload = out[0]["function"]["parameters"]["properties"]["payload"]
|
||||||
|
assert payload["additionalProperties"] is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_additional_properties_schema_sanitized():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"dict_field": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {"type": "object"}, # bare object schema
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
field = out[0]["function"]["parameters"]["properties"]["dict_field"]
|
||||||
|
assert field["additionalProperties"] == {"type": "object", "properties": {}}
|
||||||
|
|
||||||
|
|
||||||
|
def test_deepcopy_does_not_mutate_input():
|
||||||
|
original = {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {"x": {"type": "object"}},
|
||||||
|
}
|
||||||
|
tools = [_tool("t", original)]
|
||||||
|
_ = sanitize_tool_schemas(tools)
|
||||||
|
# Original should still lack properties on the nested object
|
||||||
|
assert "properties" not in original["properties"]["x"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_items_sanitized_in_array_schema():
|
||||||
|
tools = [_tool("t", {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"bag": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "object"}, # bare object items
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})]
|
||||||
|
out = sanitize_tool_schemas(tools)
|
||||||
|
items = out[0]["function"]["parameters"]["properties"]["bag"]["items"]
|
||||||
|
assert items == {"type": "object", "properties": {}}
|
||||||
|
|
||||||
|
|
||||||
|
def test_empty_tools_list_returns_empty():
|
||||||
|
assert sanitize_tool_schemas([]) == []
|
||||||
|
|
||||||
|
|
||||||
|
def test_none_tools_returns_none():
|
||||||
|
assert sanitize_tool_schemas(None) is None
|
||||||
|
|
@ -477,6 +477,7 @@ BROWSER_CDP_SCHEMA: Dict[str, Any] = {
|
||||||
"Method-specific parameters as a JSON object. Omit or "
|
"Method-specific parameters as a JSON object. Omit or "
|
||||||
"pass {} for methods that take no parameters."
|
"pass {} for methods that take no parameters."
|
||||||
),
|
),
|
||||||
|
"properties": {},
|
||||||
"additionalProperties": True,
|
"additionalProperties": True,
|
||||||
},
|
},
|
||||||
"target_id": {
|
"target_id": {
|
||||||
|
|
|
||||||
|
|
@ -2294,6 +2294,8 @@ def _build_utility_schemas(server_name: str) -> List[dict]:
|
||||||
"arguments": {
|
"arguments": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Optional arguments to pass to the prompt",
|
"description": "Optional arguments to pass to the prompt",
|
||||||
|
"properties": {},
|
||||||
|
"additionalProperties": True,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"required": ["name"],
|
"required": ["name"],
|
||||||
|
|
|
||||||
186
tools/schema_sanitizer.py
Normal file
186
tools/schema_sanitizer.py
Normal file
|
|
@ -0,0 +1,186 @@
|
||||||
|
"""Sanitize tool JSON schemas for broad LLM-backend compatibility.
|
||||||
|
|
||||||
|
Some local inference backends (notably llama.cpp's ``json-schema-to-grammar``
|
||||||
|
converter used to build GBNF tool-call parsers) are strict about what JSON
|
||||||
|
Schema shapes they accept. Schemas that OpenAI / Anthropic / most cloud
|
||||||
|
providers silently accept can make llama.cpp fail the entire request with:
|
||||||
|
|
||||||
|
HTTP 400: Unable to generate parser for this template.
|
||||||
|
Automatic parser generation failed: JSON schema conversion failed:
|
||||||
|
Unrecognized schema: "object"
|
||||||
|
|
||||||
|
The failure modes we've seen in the wild:
|
||||||
|
|
||||||
|
* ``{"type": "object"}`` with no ``properties`` — rejected as a node the
|
||||||
|
grammar generator can't constrain.
|
||||||
|
* A schema value that is the bare string ``"object"`` instead of a dict
|
||||||
|
(malformed MCP server output, e.g. ``additionalProperties: "object"``).
|
||||||
|
* ``"type": ["string", "null"]`` array types — many converters only accept
|
||||||
|
single-string ``type``.
|
||||||
|
* Unconstrained ``additionalProperties`` on objects with empty properties.
|
||||||
|
|
||||||
|
This module walks the final tool schema tree (after MCP-level normalization
|
||||||
|
and any per-tool dynamic rebuilds) and fixes the known-hostile constructs
|
||||||
|
in-place on a deep copy. It is intentionally conservative: it only modifies
|
||||||
|
shapes the LLM backend couldn't use anyway.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import logging
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def sanitize_tool_schemas(tools: list[dict]) -> list[dict]:
|
||||||
|
"""Return a copy of ``tools`` with each tool's parameter schema sanitized.
|
||||||
|
|
||||||
|
Input is an OpenAI-format tool list:
|
||||||
|
``[{"type": "function", "function": {"name": ..., "parameters": {...}}}]``
|
||||||
|
|
||||||
|
The returned list is a deep copy — callers can safely mutate it without
|
||||||
|
affecting the original registry entries.
|
||||||
|
"""
|
||||||
|
if not tools:
|
||||||
|
return tools
|
||||||
|
|
||||||
|
sanitized: list[dict] = []
|
||||||
|
for tool in tools:
|
||||||
|
sanitized.append(_sanitize_single_tool(tool))
|
||||||
|
return sanitized
|
||||||
|
|
||||||
|
|
||||||
|
def _sanitize_single_tool(tool: dict) -> dict:
|
||||||
|
"""Deep-copy and sanitize a single OpenAI-format tool entry."""
|
||||||
|
out = copy.deepcopy(tool)
|
||||||
|
fn = out.get("function") if isinstance(out, dict) else None
|
||||||
|
if not isinstance(fn, dict):
|
||||||
|
return out
|
||||||
|
|
||||||
|
params = fn.get("parameters")
|
||||||
|
# Missing / non-dict parameters → substitute the minimal valid shape.
|
||||||
|
if not isinstance(params, dict):
|
||||||
|
fn["parameters"] = {"type": "object", "properties": {}}
|
||||||
|
return out
|
||||||
|
|
||||||
|
fn["parameters"] = _sanitize_node(params, path=fn.get("name", "<tool>"))
|
||||||
|
# After recursion, guarantee the top-level is an object with properties.
|
||||||
|
top = fn["parameters"]
|
||||||
|
if not isinstance(top, dict):
|
||||||
|
fn["parameters"] = {"type": "object", "properties": {}}
|
||||||
|
else:
|
||||||
|
if top.get("type") != "object":
|
||||||
|
top["type"] = "object"
|
||||||
|
if "properties" not in top or not isinstance(top.get("properties"), dict):
|
||||||
|
top["properties"] = {}
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def _sanitize_node(node: Any, path: str) -> Any:
|
||||||
|
"""Recursively sanitize a JSON-Schema fragment.
|
||||||
|
|
||||||
|
- Replaces bare-string schema values ("object", "string", ...) with
|
||||||
|
``{"type": <value>}`` so downstream consumers see a dict.
|
||||||
|
- Injects ``properties: {}`` into object-typed nodes missing it.
|
||||||
|
- Normalizes ``type: [X, "null"]`` arrays to single ``type: X`` (keeping
|
||||||
|
``nullable: true`` as a hint).
|
||||||
|
- Recurses into ``properties``, ``items``, ``additionalProperties``,
|
||||||
|
``anyOf``, ``oneOf``, ``allOf``, and ``$defs`` / ``definitions``.
|
||||||
|
"""
|
||||||
|
# Malformed: the schema position holds a bare string like "object".
|
||||||
|
if isinstance(node, str):
|
||||||
|
if node in {"object", "string", "number", "integer", "boolean", "array", "null"}:
|
||||||
|
logger.debug(
|
||||||
|
"schema_sanitizer[%s]: replacing bare-string schema %r "
|
||||||
|
"with {'type': %r}",
|
||||||
|
path, node, node,
|
||||||
|
)
|
||||||
|
return {"type": node} if node != "object" else {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {},
|
||||||
|
}
|
||||||
|
# Any other stray string is not a schema — drop it by replacing with
|
||||||
|
# a permissive object schema rather than propagate something the
|
||||||
|
# backend will reject.
|
||||||
|
logger.debug(
|
||||||
|
"schema_sanitizer[%s]: replacing non-schema string %r "
|
||||||
|
"with empty object schema", path, node,
|
||||||
|
)
|
||||||
|
return {"type": "object", "properties": {}}
|
||||||
|
|
||||||
|
if isinstance(node, list):
|
||||||
|
return [_sanitize_node(item, f"{path}[{i}]") for i, item in enumerate(node)]
|
||||||
|
|
||||||
|
if not isinstance(node, dict):
|
||||||
|
return node
|
||||||
|
|
||||||
|
out: dict = {}
|
||||||
|
for key, value in node.items():
|
||||||
|
# type: [X, "null"] → type: X (the backend's tool-call parser only
|
||||||
|
# accepts singular string types; nullable is lost but the call still
|
||||||
|
# succeeds, and the model can still pass null on its own.)
|
||||||
|
if key == "type" and isinstance(value, list):
|
||||||
|
non_null = [t for t in value if t != "null"]
|
||||||
|
if len(non_null) == 1 and isinstance(non_null[0], str):
|
||||||
|
out["type"] = non_null[0]
|
||||||
|
if "null" in value:
|
||||||
|
out.setdefault("nullable", True)
|
||||||
|
continue
|
||||||
|
# Fallback: pick the first string type, drop the rest.
|
||||||
|
first_str = next((t for t in value if isinstance(t, str) and t != "null"), None)
|
||||||
|
if first_str:
|
||||||
|
out["type"] = first_str
|
||||||
|
continue
|
||||||
|
# All-null or empty list → treat as object.
|
||||||
|
out["type"] = "object"
|
||||||
|
continue
|
||||||
|
|
||||||
|
if key in {"properties", "$defs", "definitions"} and isinstance(value, dict):
|
||||||
|
out[key] = {
|
||||||
|
sub_k: _sanitize_node(sub_v, f"{path}.{key}.{sub_k}")
|
||||||
|
for sub_k, sub_v in value.items()
|
||||||
|
}
|
||||||
|
elif key in {"items", "additionalProperties"}:
|
||||||
|
if isinstance(value, bool):
|
||||||
|
# Keep bool ``additionalProperties`` as-is — it's a valid form
|
||||||
|
# and widely accepted. ``items: true/false`` is non-standard
|
||||||
|
# but we preserve rather than drop.
|
||||||
|
out[key] = value
|
||||||
|
else:
|
||||||
|
out[key] = _sanitize_node(value, f"{path}.{key}")
|
||||||
|
elif key in {"anyOf", "oneOf", "allOf"} and isinstance(value, list):
|
||||||
|
out[key] = [
|
||||||
|
_sanitize_node(item, f"{path}.{key}[{i}]")
|
||||||
|
for i, item in enumerate(value)
|
||||||
|
]
|
||||||
|
elif key in {"required", "enum", "examples"}:
|
||||||
|
# Schema "sibling" keywords whose values are NOT schemas:
|
||||||
|
# - ``required``: list of property-name strings
|
||||||
|
# - ``enum``: list of literal values (any JSON type)
|
||||||
|
# - ``examples``: list of example values (any JSON type)
|
||||||
|
# Recursing into these with _sanitize_node() would mis-interpret
|
||||||
|
# literal strings like "path" as bare-string schemas and replace
|
||||||
|
# them with {"type": "object"} dicts. Pass through unchanged.
|
||||||
|
out[key] = copy.deepcopy(value) if isinstance(value, (list, dict)) else value
|
||||||
|
else:
|
||||||
|
out[key] = _sanitize_node(value, f"{path}.{key}") if isinstance(value, (dict, list)) else value
|
||||||
|
|
||||||
|
# Object nodes without properties: inject empty properties dict.
|
||||||
|
# llama.cpp's grammar generator can't constrain a free-form object.
|
||||||
|
if out.get("type") == "object" and not isinstance(out.get("properties"), dict):
|
||||||
|
out["properties"] = {}
|
||||||
|
|
||||||
|
# Prune ``required`` entries that don't exist in properties (defense
|
||||||
|
# against malformed MCP schemas; also caught upstream for MCP tools, but
|
||||||
|
# built-in tools or plugin tools may not have been through that path).
|
||||||
|
if out.get("type") == "object" and isinstance(out.get("required"), list):
|
||||||
|
props = out.get("properties") or {}
|
||||||
|
valid = [r for r in out["required"] if isinstance(r, str) and r in props]
|
||||||
|
if not valid:
|
||||||
|
out.pop("required", None)
|
||||||
|
elif len(valid) != len(out["required"]):
|
||||||
|
out["required"] = valid
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
@ -590,8 +590,14 @@ def _load_enabled_toolsets() -> list[str] | None:
|
||||||
from hermes_cli.config import load_config
|
from hermes_cli.config import load_config
|
||||||
from hermes_cli.tools_config import _get_platform_tools
|
from hermes_cli.tools_config import _get_platform_tools
|
||||||
|
|
||||||
|
# Runtime toolset resolution must include default MCP servers so the
|
||||||
|
# agent can actually call them. Passing ``False`` here is the
|
||||||
|
# config-editing variant — used when we need to persist a toolset
|
||||||
|
# list without baking in implicit MCP defaults. Using the wrong
|
||||||
|
# variant at agent creation time makes MCP tools silently missing
|
||||||
|
# from the TUI. See PR #3252 for the original design split.
|
||||||
enabled = sorted(
|
enabled = sorted(
|
||||||
_get_platform_tools(load_config(), "cli", include_default_mcp_servers=False)
|
_get_platform_tools(load_config(), "cli", include_default_mcp_servers=True)
|
||||||
)
|
)
|
||||||
return enabled or None
|
return enabled or None
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue