fix(agent): propagate api_mode to vision provider resolution

resolve_vision_provider_client() computed resolved_api_mode from config
but never passed it to downstream resolve_provider_client() or
_get_cached_client() calls, causing custom providers with
api_mode: anthropic_messages to crash when used for vision tasks.

Also remove the for_vision special case in _normalize_aux_provider()
that incorrectly discarded named custom provider identifiers.

Fixes #8857

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
luyao618 2026-04-13 16:08:19 +08:00 committed by Teknium
parent e3ffe5b75f
commit 8ec1608642
2 changed files with 31 additions and 5 deletions

View file

@ -71,13 +71,13 @@ _PROVIDER_ALIASES = {
}
def _normalize_aux_provider(provider: Optional[str], *, for_vision: bool = False) -> str:
def _normalize_aux_provider(provider: Optional[str]) -> str:
normalized = (provider or "auto").strip().lower()
if normalized.startswith("custom:"):
suffix = normalized.split(":", 1)[1].strip()
if not suffix:
return "custom"
normalized = suffix if not for_vision else "custom"
normalized = suffix
if normalized == "codex":
return "openai-codex"
if normalized == "main":
@ -1603,7 +1603,7 @@ _VISION_AUTO_PROVIDER_ORDER = (
def _normalize_vision_provider(provider: Optional[str]) -> str:
return _normalize_aux_provider(provider, for_vision=True)
return _normalize_aux_provider(provider)
def _resolve_strict_vision_backend(provider: str) -> Tuple[Optional[Any], Optional[str]]:
@ -1686,6 +1686,7 @@ def resolve_vision_provider_client(
async_mode=async_mode,
explicit_base_url=resolved_base_url,
explicit_api_key=resolved_api_key,
api_mode=resolved_api_mode,
)
if client is None:
return "custom", None, None
@ -1710,7 +1711,8 @@ def resolve_vision_provider_client(
# Use provider-specific vision model if available, otherwise main model.
vision_model = _PROVIDER_VISION_MODELS.get(main_provider, main_model)
rpc_client, rpc_model = resolve_provider_client(
main_provider, vision_model)
main_provider, vision_model,
api_mode=resolved_api_mode)
if rpc_client is not None:
logger.info(
"Vision auto-detect: using active provider %s (%s)",
@ -1734,7 +1736,8 @@ def resolve_vision_provider_client(
sync_client, default_model = _resolve_strict_vision_backend(requested)
return _finalize(requested, sync_client, default_model)
client, final_model = _get_cached_client(requested, resolved_model, async_mode)
client, final_model = _get_cached_client(requested, resolved_model, async_mode,
api_mode=resolved_api_mode)
if client is None:
return requested, None, None
return requested, client, final_model

View file

@ -58,6 +58,10 @@ class TestNormalizeVisionProvider:
assert _normalize_vision_provider("beans") == "beans"
assert _normalize_vision_provider("deepseek") == "deepseek"
def test_custom_colon_named_provider_preserved(self):
from agent.auxiliary_client import _normalize_vision_provider
assert _normalize_vision_provider("custom:beans") == "beans"
def test_codex_alias_still_works(self):
from agent.auxiliary_client import _normalize_vision_provider
assert _normalize_vision_provider("codex") == "openai-codex"
@ -229,3 +233,22 @@ class TestResolveVisionProviderClientModelNormalization:
assert provider == "zai"
assert client is not None
assert model == "glm-5.1"
class TestVisionPathApiMode:
"""Vision path should propagate api_mode to _get_cached_client."""
def test_explicit_provider_passes_api_mode(self, tmp_path):
_write_config(tmp_path, {
"model": {"default": "test-model"},
"auxiliary": {"vision": {"api_mode": "chat_completions"}},
})
with patch("agent.auxiliary_client._get_cached_client") as mock_gcc:
mock_gcc.return_value = (MagicMock(), "test-model")
from agent.auxiliary_client import resolve_vision_provider_client
provider, client, model = resolve_vision_provider_client(provider="deepseek")
mock_gcc.assert_called_once()
_, kwargs = mock_gcc.call_args
assert kwargs.get("api_mode") == "chat_completions"