fix(tools): deduplicate tool names at API boundary for Vertex/Azure/Bedrock

Providers like Google Vertex, Azure, and Amazon Bedrock reject API
requests with duplicate tool names (HTTP 400: 'Tool names must be
unique').  The upstream injection paths in run_agent.py already dedup
after PR #17335, but two API-boundary functions pass tools through
without checking:

- agent/auxiliary_client.py: _build_call_kwargs() (all non-Anthropic
  providers in chat_completions mode)
- agent/anthropic_adapter.py: convert_tools_to_anthropic() (Anthropic
  Messages API path)

Add defensive dedup guards at both sites.  Duplicates are dropped with
a warning log, converting a hard 400 failure into a recoverable
condition.  This is intentionally conservative — the root-cause dedup
in run_agent.py is the primary defense; these guards add resilience
against future injection-path regressions.

Includes 8 new tests covering unique passthrough, duplicate removal,
empty/None edge cases.

Closes #18478
This commit is contained in:
liuhao1024 2026-05-02 03:33:13 +08:00 committed by Teknium
parent 699b3679bc
commit 9bf260472b
4 changed files with 153 additions and 2 deletions

View file

@ -1836,3 +1836,55 @@ class TestResolveMessagesMaxTokens:
result = _resolve_anthropic_messages_max_tokens(0.5, "claude-opus-4-6")
assert result > 0
assert result != 0
# ---------------------------------------------------------------------------
# convert_tools_to_anthropic — tool dedup at API boundary
# ---------------------------------------------------------------------------
class TestConvertToolsToAnthropicDedup:
"""convert_tools_to_anthropic must deduplicate tool names.
Anthropic rejects requests with duplicate tool names. This guard converts
a hard failure into a warning log. See:
https://github.com/NousResearch/hermes-agent/issues/18478
"""
def _make_openai_tool(self, name: str) -> dict:
return {
"type": "function",
"function": {
"name": name,
"description": f"Tool {name}",
"parameters": {"type": "object", "properties": {}},
},
}
def test_unique_tools_pass_through(self):
tools = [self._make_openai_tool("alpha"), self._make_openai_tool("beta")]
result = convert_tools_to_anthropic(tools)
assert len(result) == 2
names = [t["name"] for t in result]
assert names == ["alpha", "beta"]
def test_duplicate_tool_names_are_deduplicated(self):
"""RED test — must fail until dedup guard is added."""
tools = [
self._make_openai_tool("lcm_grep"),
self._make_openai_tool("lcm_describe"),
self._make_openai_tool("lcm_grep"), # duplicate
self._make_openai_tool("lcm_expand"),
self._make_openai_tool("lcm_describe"), # duplicate
]
result = convert_tools_to_anthropic(tools)
names = [t["name"] for t in result]
assert len(names) == len(set(names)), (
f"Duplicate tool names found: {names}"
)
assert len(result) == 3 # lcm_grep, lcm_describe, lcm_expand
def test_empty_tools_returns_empty(self):
assert convert_tools_to_anthropic([]) == []
def test_none_tools_returns_empty(self):
assert convert_tools_to_anthropic(None) == []