mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
Third-party gateways that speak the native Anthropic protocol (MiniMax,
Zhipu GLM, Alibaba DashScope, Kimi, LiteLLM proxies) now work end-to-end
with the same feature set as direct api.anthropic.com callers. Synthesizes
eight stale community PRs into one consolidated change.
Five fixes:
- URL detection: consolidate three inline `endswith("/anthropic")`
checks in runtime_provider.py into the shared _detect_api_mode_for_url
helper. Third-party /anthropic endpoints now auto-resolve to
api_mode=anthropic_messages via one code path instead of three.
- OAuth leak-guard: all five sites that assign `_is_anthropic_oauth`
(__init__, switch_model, _try_refresh_anthropic_client_credentials,
_swap_credential, _try_activate_fallback) now gate on
`provider == "anthropic"` so a stale ANTHROPIC_TOKEN never trips
Claude-Code identity injection on third-party endpoints. Previously
only 2 of 5 sites were guarded.
- Prompt caching: new method `_anthropic_prompt_cache_policy()` returns
`(should_cache, use_native_layout)` per endpoint. Replaces three
inline conditions and the `native_anthropic=(api_mode=='anthropic_messages')`
call-site flag. Native Anthropic and third-party Anthropic gateways
both get the native cache_control layout; OpenRouter gets envelope
layout. Layout is persisted in `_primary_runtime` so fallback
restoration preserves the per-endpoint choice.
- Auxiliary client: `_try_custom_endpoint` honors
`api_mode=anthropic_messages` and builds `AnthropicAuxiliaryClient`
instead of silently downgrading to an OpenAI-wire client. Degrades
gracefully to OpenAI-wire when the anthropic SDK isn't installed.
- Config hygiene: `_update_config_for_provider` (hermes_cli/auth.py)
clears stale `api_key`/`api_mode` when switching to a built-in
provider, so a previous MiniMax custom endpoint's credentials can't
leak into a later OpenRouter session.
- Truncation continuation: length-continuation and tool-call-truncation
retry now cover `anthropic_messages` in addition to `chat_completions`
and `bedrock_converse`. Reuses the existing `_build_assistant_message`
path via `normalize_anthropic_response()` so the interim message
shape is byte-identical to the non-truncated path.
Tests: 6 new files, 42 test cases. Targeted run + tests/run_agent,
tests/agent, tests/hermes_cli all pass (4554 passed).
Synthesized from (credits preserved via Co-authored-by trailers):
#7410 @nocoo — URL detection helper
#7393 @keyuyuan — OAuth 5-site guard
#7367 @n-WN — OAuth guard (narrower cousin, kept comment)
#8636 @sgaofen — caching helper + native-vs-proxy layout split
#10954 @Only-Code-A — caching on anthropic_messages+Claude
#7648 @zhongyueming1121 — aux client anthropic_messages branch
#6096 @hansnow — /model switch clears stale api_mode
#9691 @TroyMitchell911 — anthropic_messages truncation continuation
Closes: #7366, #8294 (third-party Anthropic identity + caching).
Supersedes: #7410, #7367, #7393, #8636, #10954, #7648, #6096, #9691.
Rejects: #9621 (OpenAI-wire caching with incomplete blocklist — risky),
#7242 (superseded by #9691, stale branch),
#8321 (targets smart_model_routing which was removed in #12732).
Co-authored-by: nocoo <nocoo@users.noreply.github.com>
Co-authored-by: Keyu Yuan <leoyuan0099@gmail.com>
Co-authored-by: Zoee <30841158+n-WN@users.noreply.github.com>
Co-authored-by: sgaofen <135070653+sgaofen@users.noreply.github.com>
Co-authored-by: Only-Code-A <bxzt2006@163.com>
Co-authored-by: zhongyueming <mygamez@163.com>
Co-authored-by: Xiaohan Li <hansnow@users.noreply.github.com>
Co-authored-by: Troy Mitchell <i@troy-y.org>
152 lines
5.7 KiB
Python
152 lines
5.7 KiB
Python
"""Tests for AIAgent._anthropic_prompt_cache_policy().
|
|
|
|
The policy returns ``(should_cache, use_native_layout)`` for five endpoint
|
|
classes. The test matrix pins the decision for each so a regression (e.g.
|
|
silently dropping caching on third-party Anthropic gateways, or applying
|
|
the native layout on OpenRouter) surfaces loudly.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
from unittest.mock import MagicMock
|
|
|
|
from run_agent import AIAgent
|
|
|
|
|
|
def _make_agent(
|
|
*,
|
|
provider: str = "openrouter",
|
|
base_url: str = "https://openrouter.ai/api/v1",
|
|
api_mode: str = "chat_completions",
|
|
model: str = "anthropic/claude-sonnet-4.6",
|
|
) -> AIAgent:
|
|
agent = AIAgent.__new__(AIAgent)
|
|
agent.provider = provider
|
|
agent.base_url = base_url
|
|
agent.api_mode = api_mode
|
|
agent.model = model
|
|
agent._base_url_lower = (base_url or "").lower()
|
|
agent.client = MagicMock()
|
|
agent.quiet_mode = True
|
|
return agent
|
|
|
|
|
|
class TestNativeAnthropic:
|
|
def test_claude_on_native_anthropic_caches_with_native_layout(self):
|
|
agent = _make_agent(
|
|
provider="anthropic",
|
|
base_url="https://api.anthropic.com",
|
|
api_mode="anthropic_messages",
|
|
model="claude-sonnet-4-6",
|
|
)
|
|
assert agent._anthropic_prompt_cache_policy() == (True, True)
|
|
|
|
def test_api_anthropic_host_detected_even_when_provider_label_differs(self):
|
|
# Some pool configurations label native Anthropic as "anthropic-direct"
|
|
# or similar; falling back to hostname keeps caching on.
|
|
agent = _make_agent(
|
|
provider="anthropic-direct",
|
|
base_url="https://api.anthropic.com",
|
|
api_mode="anthropic_messages",
|
|
model="claude-opus-4.6",
|
|
)
|
|
assert agent._anthropic_prompt_cache_policy() == (True, True)
|
|
|
|
|
|
class TestOpenRouter:
|
|
def test_claude_on_openrouter_caches_with_envelope_layout(self):
|
|
agent = _make_agent(
|
|
provider="openrouter",
|
|
base_url="https://openrouter.ai/api/v1",
|
|
api_mode="chat_completions",
|
|
model="anthropic/claude-sonnet-4.6",
|
|
)
|
|
should, native = agent._anthropic_prompt_cache_policy()
|
|
assert should is True
|
|
assert native is False # OpenRouter uses envelope layout
|
|
|
|
def test_non_claude_on_openrouter_does_not_cache(self):
|
|
agent = _make_agent(
|
|
provider="openrouter",
|
|
base_url="https://openrouter.ai/api/v1",
|
|
api_mode="chat_completions",
|
|
model="openai/gpt-5.4",
|
|
)
|
|
assert agent._anthropic_prompt_cache_policy() == (False, False)
|
|
|
|
|
|
class TestThirdPartyAnthropicGateway:
|
|
"""Third-party gateways speaking the Anthropic protocol (MiniMax, Zhipu GLM, LiteLLM)."""
|
|
|
|
def test_minimax_claude_via_anthropic_messages(self):
|
|
agent = _make_agent(
|
|
provider="custom",
|
|
base_url="https://api.minimax.io/anthropic",
|
|
api_mode="anthropic_messages",
|
|
model="claude-sonnet-4-6",
|
|
)
|
|
should, native = agent._anthropic_prompt_cache_policy()
|
|
assert should is True, "Third-party Anthropic gateway with Claude must cache"
|
|
assert native is True, "Third-party Anthropic gateway uses native cache_control layout"
|
|
|
|
def test_third_party_without_claude_name_does_not_cache(self):
|
|
# A provider exposing e.g. GLM via anthropic_messages transport — we
|
|
# don't know whether it supports cache_control, so stay conservative.
|
|
agent = _make_agent(
|
|
provider="custom",
|
|
base_url="https://api.minimax.io/anthropic",
|
|
api_mode="anthropic_messages",
|
|
model="minimax-m2.7",
|
|
)
|
|
assert agent._anthropic_prompt_cache_policy() == (False, False)
|
|
|
|
|
|
class TestOpenAIWireFormatOnCustomProvider:
|
|
"""A custom provider using chat_completions (OpenAI wire) should NOT get caching."""
|
|
|
|
def test_custom_openai_wire_does_not_cache_even_with_claude_name(self):
|
|
# This is the blocklist risk #9621 failed to avoid: sending
|
|
# cache_control fields in OpenAI-wire JSON can trip strict providers
|
|
# that reject unknown keys. Stay off unless the transport is
|
|
# explicitly anthropic_messages or the aggregator is OpenRouter.
|
|
agent = _make_agent(
|
|
provider="custom",
|
|
base_url="https://api.fireworks.ai/inference/v1",
|
|
api_mode="chat_completions",
|
|
model="claude-sonnet-4",
|
|
)
|
|
assert agent._anthropic_prompt_cache_policy() == (False, False)
|
|
|
|
|
|
class TestExplicitOverrides:
|
|
"""Policy accepts keyword overrides for switch_model / fallback activation."""
|
|
|
|
def test_overrides_take_precedence_over_self(self):
|
|
agent = _make_agent(
|
|
provider="openrouter",
|
|
base_url="https://openrouter.ai/api/v1",
|
|
api_mode="chat_completions",
|
|
model="openai/gpt-5.4",
|
|
)
|
|
# Simulate switch_model evaluating cache policy for a Claude target
|
|
# before self.model is mutated.
|
|
should, native = agent._anthropic_prompt_cache_policy(
|
|
model="anthropic/claude-sonnet-4.6",
|
|
)
|
|
assert (should, native) == (True, False)
|
|
|
|
def test_fallback_target_evaluated_independently(self):
|
|
# Starting on native Anthropic but falling back to OpenRouter.
|
|
agent = _make_agent(
|
|
provider="anthropic",
|
|
base_url="https://api.anthropic.com",
|
|
api_mode="anthropic_messages",
|
|
model="claude-opus-4.6",
|
|
)
|
|
should, native = agent._anthropic_prompt_cache_policy(
|
|
provider="openrouter",
|
|
base_url="https://openrouter.ai/api/v1",
|
|
api_mode="chat_completions",
|
|
model="anthropic/claude-sonnet-4.6",
|
|
)
|
|
assert (should, native) == (True, False)
|