mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
Consolidate 4 per-transport lazy singleton helpers (_get_anthropic_transport, _get_codex_transport, _get_chat_completions_transport, _get_bedrock_transport) into one generic _get_transport(api_mode) with a shared dict cache. Collapse the 65-line main normalize block (3 api_mode branches, each with its own SimpleNamespace shim) into 7 lines: one _get_transport() call + one _nr_to_assistant_message() shared shim. The shim extracts provider_data fields (codex_reasoning_items, reasoning_details, call_id, response_item_id) into the SimpleNamespace shape downstream code expects. Wire chat_completions and bedrock_converse normalize through their transports for the first time — these were previously falling into the raw response.choices[0].message else branch. Remove 8 dead codex adapter imports that have zero callers after PRs 1-6. Transport lifecycle improvements: - Eagerly warm transport cache at __init__ (surfaces import errors early) - Invalidate transport cache on api_mode change (switch_model, fallback activation, fallback restore, transport recovery) — prevents stale transport after mid-session provider switch run_agent.py: -32 net lines (11,988 -> 11,956). PR 7 of the provider transport refactor.
156 lines
5.7 KiB
Python
156 lines
5.7 KiB
Python
"""Anthropic Messages API transport.
|
|
|
|
Delegates to the existing adapter functions in agent/anthropic_adapter.py.
|
|
This transport owns format conversion and normalization — NOT client lifecycle.
|
|
"""
|
|
|
|
from typing import Any, Dict, List, Optional
|
|
|
|
from agent.transports.base import ProviderTransport
|
|
from agent.transports.types import NormalizedResponse
|
|
|
|
|
|
class AnthropicTransport(ProviderTransport):
|
|
"""Transport for api_mode='anthropic_messages'.
|
|
|
|
Wraps the existing functions in anthropic_adapter.py behind the
|
|
ProviderTransport ABC. Each method delegates — no logic is duplicated.
|
|
"""
|
|
|
|
@property
|
|
def api_mode(self) -> str:
|
|
return "anthropic_messages"
|
|
|
|
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
|
"""Convert OpenAI messages to Anthropic (system, messages) tuple.
|
|
|
|
kwargs:
|
|
base_url: Optional[str] — affects thinking signature handling.
|
|
"""
|
|
from agent.anthropic_adapter import convert_messages_to_anthropic
|
|
|
|
base_url = kwargs.get("base_url")
|
|
return convert_messages_to_anthropic(messages, base_url=base_url)
|
|
|
|
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
|
"""Convert OpenAI tool schemas to Anthropic input_schema format."""
|
|
from agent.anthropic_adapter import convert_tools_to_anthropic
|
|
|
|
return convert_tools_to_anthropic(tools)
|
|
|
|
def build_kwargs(
|
|
self,
|
|
model: str,
|
|
messages: List[Dict[str, Any]],
|
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
**params,
|
|
) -> Dict[str, Any]:
|
|
"""Build Anthropic messages.create() kwargs.
|
|
|
|
Calls convert_messages and convert_tools internally.
|
|
|
|
params (all optional):
|
|
max_tokens: int
|
|
reasoning_config: dict | None
|
|
tool_choice: str | None
|
|
is_oauth: bool
|
|
preserve_dots: bool
|
|
context_length: int | None
|
|
base_url: str | None
|
|
fast_mode: bool
|
|
"""
|
|
from agent.anthropic_adapter import build_anthropic_kwargs
|
|
|
|
return build_anthropic_kwargs(
|
|
model=model,
|
|
messages=messages,
|
|
tools=tools,
|
|
max_tokens=params.get("max_tokens", 16384),
|
|
reasoning_config=params.get("reasoning_config"),
|
|
tool_choice=params.get("tool_choice"),
|
|
is_oauth=params.get("is_oauth", False),
|
|
preserve_dots=params.get("preserve_dots", False),
|
|
context_length=params.get("context_length"),
|
|
base_url=params.get("base_url"),
|
|
fast_mode=params.get("fast_mode", False),
|
|
)
|
|
|
|
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
|
"""Normalize Anthropic response to NormalizedResponse.
|
|
|
|
Calls the adapter's v1 normalize and maps the (SimpleNamespace, finish_reason)
|
|
tuple to the shared NormalizedResponse type.
|
|
"""
|
|
from agent.anthropic_adapter import normalize_anthropic_response
|
|
from agent.transports.types import build_tool_call
|
|
|
|
strip_tool_prefix = kwargs.get("strip_tool_prefix", False)
|
|
assistant_msg, finish_reason = normalize_anthropic_response(response, strip_tool_prefix)
|
|
|
|
tool_calls = None
|
|
if assistant_msg.tool_calls:
|
|
tool_calls = [
|
|
build_tool_call(id=tc.id, name=tc.function.name, arguments=tc.function.arguments)
|
|
for tc in assistant_msg.tool_calls
|
|
]
|
|
|
|
provider_data = {}
|
|
if getattr(assistant_msg, "reasoning_details", None):
|
|
provider_data["reasoning_details"] = assistant_msg.reasoning_details
|
|
|
|
return NormalizedResponse(
|
|
content=assistant_msg.content,
|
|
tool_calls=tool_calls,
|
|
finish_reason=finish_reason,
|
|
reasoning=getattr(assistant_msg, "reasoning", None),
|
|
usage=None,
|
|
provider_data=provider_data or None,
|
|
)
|
|
|
|
def validate_response(self, response: Any) -> bool:
|
|
"""Check Anthropic response structure is valid.
|
|
|
|
An empty content list is legitimate when ``stop_reason == "end_turn"``
|
|
— the model's canonical way of signalling "nothing more to add" after
|
|
a tool turn that already delivered the user-facing text. Treating it
|
|
as invalid falsely retries a completed response.
|
|
"""
|
|
if response is None:
|
|
return False
|
|
content_blocks = getattr(response, "content", None)
|
|
if not isinstance(content_blocks, list):
|
|
return False
|
|
if not content_blocks:
|
|
return getattr(response, "stop_reason", None) == "end_turn"
|
|
return True
|
|
|
|
def extract_cache_stats(self, response: Any) -> Optional[Dict[str, int]]:
|
|
"""Extract Anthropic cache_read and cache_creation token counts."""
|
|
usage = getattr(response, "usage", None)
|
|
if usage is None:
|
|
return None
|
|
cached = getattr(usage, "cache_read_input_tokens", 0) or 0
|
|
written = getattr(usage, "cache_creation_input_tokens", 0) or 0
|
|
if cached or written:
|
|
return {"cached_tokens": cached, "creation_tokens": written}
|
|
return None
|
|
|
|
# Promote the adapter's canonical mapping to module level so it's shared
|
|
_STOP_REASON_MAP = {
|
|
"end_turn": "stop",
|
|
"tool_use": "tool_calls",
|
|
"max_tokens": "length",
|
|
"stop_sequence": "stop",
|
|
"refusal": "content_filter",
|
|
"model_context_window_exceeded": "length",
|
|
}
|
|
|
|
def map_finish_reason(self, raw_reason: str) -> str:
|
|
"""Map Anthropic stop_reason to OpenAI finish_reason."""
|
|
return self._STOP_REASON_MAP.get(raw_reason, "stop")
|
|
|
|
|
|
# Auto-register on import
|
|
from agent.transports import register_transport # noqa: E402
|
|
|
|
register_transport("anthropic_messages", AnthropicTransport)
|