refactor: collapse normalize_anthropic_response to return NormalizedResponse directly

3-layer chain (transport → v2 → v1) was collapsed to 2-layer in PR 7.
This collapses the remaining 2-layer (transport → v1 → NR mapping in
transport) to 1-layer: v1 now returns NormalizedResponse directly.

Before: adapter returns (SimpleNamespace, finish_reason) tuple,
  transport unpacks and maps to NormalizedResponse (22 lines).
After: adapter returns NormalizedResponse, transport is a
  1-line passthrough.

Also updates ToolCall construction — adapter now creates ToolCall
dataclass directly instead of SimpleNamespace(id, type, function).

WS1 item 1 of Cycle 2 (#14418).
This commit is contained in:
kshitijk4poor 2026-04-23 13:49:18 +05:30 committed by Teknium
parent 738d0900fd
commit f4612785a4
4 changed files with 63 additions and 91 deletions

View file

@ -1602,15 +1602,17 @@ def build_anthropic_kwargs(
def normalize_anthropic_response(
response,
strip_tool_prefix: bool = False,
) -> Tuple[SimpleNamespace, str]:
"""Normalize Anthropic response to match the shape expected by AIAgent.
) -> "NormalizedResponse":
"""Normalize Anthropic response to NormalizedResponse.
Returns (assistant_message, finish_reason) where assistant_message has
.content, .tool_calls, and .reasoning attributes.
Returns a NormalizedResponse with content, tool_calls, finish_reason,
reasoning, and provider_data fields.
When *strip_tool_prefix* is True, removes the ``mcp_`` prefix that was
added to tool names for OAuth Claude Code compatibility.
"""
from agent.transports.types import NormalizedResponse, ToolCall
text_parts = []
reasoning_parts = []
reasoning_details = []
@ -1629,23 +1631,13 @@ def normalize_anthropic_response(
if strip_tool_prefix and name.startswith(_MCP_TOOL_PREFIX):
name = name[len(_MCP_TOOL_PREFIX):]
tool_calls.append(
SimpleNamespace(
ToolCall(
id=block.id,
type="function",
function=SimpleNamespace(
name=name,
arguments=json.dumps(block.input),
),
name=name,
arguments=json.dumps(block.input),
)
)
# Map Anthropic stop_reason to OpenAI finish_reason.
# Newer stop reasons added in Claude 4.5+ / 4.7:
# - refusal: the model declined to answer (cyber safeguards, CSAM, etc.)
# - model_context_window_exceeded: hit context limit (not max_tokens)
# Both need distinct handling upstream — a refusal should surface to the
# user with a clear message, and a context-window overflow should trigger
# compression/truncation rather than be treated as normal end-of-turn.
stop_reason_map = {
"end_turn": "stop",
"tool_use": "tool_calls",
@ -1656,13 +1648,15 @@ def normalize_anthropic_response(
}
finish_reason = stop_reason_map.get(response.stop_reason, "stop")
return (
SimpleNamespace(
content="\n".join(text_parts) if text_parts else None,
tool_calls=tool_calls or None,
reasoning="\n\n".join(reasoning_parts) if reasoning_parts else None,
reasoning_content=None,
reasoning_details=reasoning_details or None,
),
finish_reason,
provider_data = {}
if reasoning_details:
provider_data["reasoning_details"] = reasoning_details
return NormalizedResponse(
content="\n".join(text_parts) if text_parts else None,
tool_calls=tool_calls or None,
finish_reason=finish_reason,
reasoning="\n\n".join(reasoning_parts) if reasoning_parts else None,
usage=None,
provider_data=provider_data or None,
)

View file

@ -78,34 +78,12 @@ class AnthropicTransport(ProviderTransport):
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
"""Normalize Anthropic response to NormalizedResponse.
Calls the adapter's v1 normalize and maps the (SimpleNamespace, finish_reason)
tuple to the shared NormalizedResponse type.
Delegates directly to the adapter which now returns NormalizedResponse.
"""
from agent.anthropic_adapter import normalize_anthropic_response
from agent.transports.types import build_tool_call
strip_tool_prefix = kwargs.get("strip_tool_prefix", False)
assistant_msg, finish_reason = normalize_anthropic_response(response, strip_tool_prefix)
tool_calls = None
if assistant_msg.tool_calls:
tool_calls = [
build_tool_call(id=tc.id, name=tc.function.name, arguments=tc.function.arguments)
for tc in assistant_msg.tool_calls
]
provider_data = {}
if getattr(assistant_msg, "reasoning_details", None):
provider_data["reasoning_details"] = assistant_msg.reasoning_details
return NormalizedResponse(
content=assistant_msg.content,
tool_calls=tool_calls,
finish_reason=finish_reason,
reasoning=getattr(assistant_msg, "reasoning", None),
usage=None,
provider_data=provider_data or None,
)
return normalize_anthropic_response(response, strip_tool_prefix)
def validate_response(self, response: Any) -> bool:
"""Check Anthropic response structure is valid.

View file

@ -1242,10 +1242,10 @@ class TestNormalizeResponse:
def test_text_response(self):
block = SimpleNamespace(type="text", text="Hello world")
msg, reason = normalize_anthropic_response(self._make_response([block]))
assert msg.content == "Hello world"
assert reason == "stop"
assert msg.tool_calls is None
nr = normalize_anthropic_response(self._make_response([block]))
assert nr.content == "Hello world"
assert nr.finish_reason == "stop"
assert nr.tool_calls is None
def test_tool_use_response(self):
blocks = [
@ -1257,24 +1257,24 @@ class TestNormalizeResponse:
input={"query": "test"},
),
]
msg, reason = normalize_anthropic_response(
nr = normalize_anthropic_response(
self._make_response(blocks, "tool_use")
)
assert msg.content == "Searching..."
assert reason == "tool_calls"
assert len(msg.tool_calls) == 1
assert msg.tool_calls[0].function.name == "search"
assert json.loads(msg.tool_calls[0].function.arguments) == {"query": "test"}
assert nr.content == "Searching..."
assert nr.finish_reason == "tool_calls"
assert len(nr.tool_calls) == 1
assert nr.tool_calls[0].name == "search"
assert json.loads(nr.tool_calls[0].arguments) == {"query": "test"}
def test_thinking_response(self):
blocks = [
SimpleNamespace(type="thinking", thinking="Let me reason about this..."),
SimpleNamespace(type="text", text="The answer is 42."),
]
msg, reason = normalize_anthropic_response(self._make_response(blocks))
assert msg.content == "The answer is 42."
assert msg.reasoning == "Let me reason about this..."
assert msg.reasoning_details == [{"type": "thinking", "thinking": "Let me reason about this..."}]
nr = normalize_anthropic_response(self._make_response(blocks))
assert nr.content == "The answer is 42."
assert nr.reasoning == "Let me reason about this..."
assert nr.provider_data["reasoning_details"] == [{"type": "thinking", "thinking": "Let me reason about this..."}]
def test_thinking_response_preserves_signature(self):
blocks = [
@ -1285,24 +1285,24 @@ class TestNormalizeResponse:
redacted=False,
),
]
msg, _ = normalize_anthropic_response(self._make_response(blocks))
assert msg.reasoning_details[0]["signature"] == "opaque_signature"
assert msg.reasoning_details[0]["thinking"] == "Let me reason about this..."
nr = normalize_anthropic_response(self._make_response(blocks))
assert nr.provider_data["reasoning_details"][0]["signature"] == "opaque_signature"
assert nr.provider_data["reasoning_details"][0]["thinking"] == "Let me reason about this..."
def test_stop_reason_mapping(self):
block = SimpleNamespace(type="text", text="x")
_, r1 = normalize_anthropic_response(
nr1 = normalize_anthropic_response(
self._make_response([block], "end_turn")
)
_, r2 = normalize_anthropic_response(
nr2 = normalize_anthropic_response(
self._make_response([block], "tool_use")
)
_, r3 = normalize_anthropic_response(
nr3 = normalize_anthropic_response(
self._make_response([block], "max_tokens")
)
assert r1 == "stop"
assert r2 == "tool_calls"
assert r3 == "length"
assert nr1.finish_reason == "stop"
assert nr2.finish_reason == "tool_calls"
assert nr3.finish_reason == "length"
def test_stop_reason_refusal_and_context_exceeded(self):
# Claude 4.5+ introduced two new stop_reason values the Messages API
@ -1310,24 +1310,24 @@ class TestNormalizeResponse:
# handlers already understand, instead of silently collapsing to
# "stop" (old behavior).
block = SimpleNamespace(type="text", text="")
_, refusal_reason = normalize_anthropic_response(
nr_refusal = normalize_anthropic_response(
self._make_response([block], "refusal")
)
_, overflow_reason = normalize_anthropic_response(
nr_overflow = normalize_anthropic_response(
self._make_response([block], "model_context_window_exceeded")
)
assert refusal_reason == "content_filter"
assert overflow_reason == "length"
assert nr_refusal.finish_reason == "content_filter"
assert nr_overflow.finish_reason == "length"
def test_no_text_content(self):
block = SimpleNamespace(
type="tool_use", id="tc_1", name="search", input={"q": "hi"}
)
msg, reason = normalize_anthropic_response(
nr = normalize_anthropic_response(
self._make_response([block], "tool_use")
)
assert msg.content is None
assert len(msg.tool_calls) == 1
assert nr.content is None
assert len(nr.tool_calls) == 1
# ---------------------------------------------------------------------------

View file

@ -56,18 +56,18 @@ class TestTruncatedAnthropicResponseNormalization:
response = _make_anthropic_response(
[_make_anthropic_text_block("partial response that was cut off")]
)
msg, finish = normalize_anthropic_response(response)
nr = normalize_anthropic_response(response)
# The continuation block checks these two attributes:
# assistant_message.content → appended to truncated_response_prefix
# assistant_message.tool_calls → guards the text-retry branch
assert msg.content is not None
assert "partial response" in msg.content
assert not msg.tool_calls, (
assert nr.content is not None
assert "partial response" in nr.content
assert not nr.tool_calls, (
"Pure-text truncation must have no tool_calls so the text-continuation "
"branch (not the tool-retry branch) fires"
)
assert finish == "length", "max_tokens stop_reason must map to OpenAI-style 'length'"
assert nr.finish_reason == "length", "max_tokens stop_reason must map to OpenAI-style 'length'"
def test_truncated_tool_call_produces_tool_calls(self):
"""Tool-use truncation → tool-call retry path should fire."""
@ -79,24 +79,24 @@ class TestTruncatedAnthropicResponseNormalization:
_make_anthropic_tool_use_block(),
]
)
msg, finish = normalize_anthropic_response(response)
nr = normalize_anthropic_response(response)
assert bool(msg.tool_calls), (
assert bool(nr.tool_calls), (
"Truncation mid-tool_use must expose tool_calls so the "
"tool-call retry branch fires instead of text continuation"
)
assert finish == "length"
assert nr.finish_reason == "length"
def test_empty_content_does_not_crash(self):
"""Empty response.content — defensive: treat as a truncation with no text."""
from agent.anthropic_adapter import normalize_anthropic_response
response = _make_anthropic_response([])
msg, finish = normalize_anthropic_response(response)
nr = normalize_anthropic_response(response)
# Depending on the adapter, content may be "" or None — both are
# acceptable; what matters is no exception.
assert msg is not None
assert not msg.tool_calls
assert nr is not None
assert not nr.tool_calls
class TestContinuationLogicBranching: