mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 03:01:47 +00:00
feat: provider modules — ProviderProfile ABC, 33 providers, fetch_models, transport single-path
Introduces providers/ package — single source of truth for every inference provider. Adding a simple api-key provider now requires one providers/<name>.py file with zero edits anywhere else. What this PR ships: - providers/ package (ProviderProfile ABC + 33 profiles across 4 api_modes) - ProviderProfile declarative fields: name, api_mode, aliases, display_name, env_vars, base_url, models_url, auth_type, fallback_models, hostname, default_headers, fixed_temperature, default_max_tokens, default_aux_model - 4 overridable hooks: prepare_messages, build_extra_body, build_api_kwargs_extras, fetch_models - chat_completions.build_kwargs: profile path via _build_kwargs_from_profile, legacy flag path retained for lmstudio/tencent-tokenhub (which have session-aware reasoning probing that doesn't map cleanly to hooks yet) - run_agent.py: profile path for all registered providers; legacy path variable scoping fixed (all flags defined before branching) - Auto-wires: auth.PROVIDER_REGISTRY, models.CANONICAL_PROVIDERS, doctor health checks, config.OPTIONAL_ENV_VARS, model_metadata._URL_TO_PROVIDER - GeminiProfile: thinking_config translation (native + openai-compat nested) - New tests/providers/ (79 tests covering profile declarations, transport parity, hook overrides, e2e kwargs assembly) Deltas vs original PR (salvaged onto current main): - Added profiles: alibaba-coding-plan, azure-foundry, minimax-oauth (were added to main since original PR) - Skipped profiles: lmstudio, tencent-tokenhub stay on legacy path (their reasoning_effort probing has no clean hook equivalent yet) - Removed lmstudio alias from custom profile (it's a separate provider now) - Skipped openrouter/custom from PROVIDER_REGISTRY auto-extension (resolve_provider special-cases them; adding breaks runtime resolution) - runtime_provider: profile.api_mode only as fallback when URL detection finds nothing (was breaking minimax /v1 override) - Preserved main's legacy-path improvements: deepseek reasoning_content preserve, gemini Gemma skip, OpenRouter response caching, Anthropic 1M beta recovery, etc. - Kept agent/copilot_acp_client.py in place (rejected PR's relocation — main has 7 fixes landed since; relocation would revert them) - _API_KEY_PROVIDER_AUX_MODELS alias kept for backward compat with existing test imports Co-authored-by: kshitijk4poor <82637225+kshitijk4poor@users.noreply.github.com> Closes #14418
This commit is contained in:
parent
2b500ed68a
commit
20a4f79ed1
57 changed files with 3149 additions and 177 deletions
|
|
@ -71,17 +71,17 @@ class TestMinimaxThinkingSupport:
|
|||
|
||||
|
||||
class TestMinimaxAuxModel:
|
||||
"""Verify auxiliary model is standard (not highspeed)."""
|
||||
"""Verify auxiliary model is standard (not highspeed) — now reads from profiles."""
|
||||
|
||||
def test_minimax_aux_is_standard(self):
|
||||
from agent.auxiliary_client import _API_KEY_PROVIDER_AUX_MODELS
|
||||
assert _API_KEY_PROVIDER_AUX_MODELS["minimax"] == "MiniMax-M2.7"
|
||||
assert _API_KEY_PROVIDER_AUX_MODELS["minimax-cn"] == "MiniMax-M2.7"
|
||||
from agent.auxiliary_client import _get_aux_model_for_provider
|
||||
assert _get_aux_model_for_provider("minimax") == "MiniMax-M2.7"
|
||||
assert _get_aux_model_for_provider("minimax-cn") == "MiniMax-M2.7"
|
||||
|
||||
def test_minimax_aux_not_highspeed(self):
|
||||
from agent.auxiliary_client import _API_KEY_PROVIDER_AUX_MODELS
|
||||
assert "highspeed" not in _API_KEY_PROVIDER_AUX_MODELS["minimax"]
|
||||
assert "highspeed" not in _API_KEY_PROVIDER_AUX_MODELS["minimax-cn"]
|
||||
from agent.auxiliary_client import _get_aux_model_for_provider
|
||||
assert "highspeed" not in _get_aux_model_for_provider("minimax")
|
||||
assert "highspeed" not in _get_aux_model_for_provider("minimax-cn")
|
||||
|
||||
|
||||
class TestMinimaxBetaHeaders:
|
||||
|
|
|
|||
|
|
@ -73,17 +73,21 @@ class TestChatCompletionsBuildKwargs:
|
|||
assert kw["tools"] == tools
|
||||
|
||||
def test_openrouter_provider_prefs(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("openrouter")
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-4o", messages=msgs,
|
||||
is_openrouter=True,
|
||||
provider_profile=profile,
|
||||
provider_preferences={"only": ["openai"]},
|
||||
)
|
||||
assert kw["extra_body"]["provider"] == {"only": ["openai"]}
|
||||
|
||||
def test_nous_tags(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("nous")
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(model="gpt-4o", messages=msgs, is_nous=True)
|
||||
kw = transport.build_kwargs(model="gpt-4o", messages=msgs, provider_profile=profile)
|
||||
assert kw["extra_body"]["tags"] == ["product=hermes-agent"]
|
||||
|
||||
def test_reasoning_default(self, transport):
|
||||
|
|
@ -95,29 +99,36 @@ class TestChatCompletionsBuildKwargs:
|
|||
assert kw["extra_body"]["reasoning"] == {"enabled": True, "effort": "medium"}
|
||||
|
||||
def test_nous_omits_disabled_reasoning(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("nous")
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-4o", messages=msgs,
|
||||
provider_profile=profile,
|
||||
supports_reasoning=True,
|
||||
is_nous=True,
|
||||
reasoning_config={"enabled": False},
|
||||
)
|
||||
# Nous rejects enabled=false; reasoning omitted entirely
|
||||
assert "reasoning" not in kw.get("extra_body", {})
|
||||
|
||||
def test_ollama_num_ctx(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("custom")
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="llama3", messages=msgs,
|
||||
provider_profile=profile,
|
||||
ollama_num_ctx=32768,
|
||||
)
|
||||
assert kw["extra_body"]["options"]["num_ctx"] == 32768
|
||||
|
||||
def test_custom_think_false(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("custom")
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="qwen3", messages=msgs,
|
||||
is_custom_provider=True,
|
||||
provider_profile=profile,
|
||||
reasoning_config={"effort": "none"},
|
||||
)
|
||||
assert kw["extra_body"]["think"] is False
|
||||
|
|
@ -304,23 +315,29 @@ class TestChatCompletionsBuildKwargs:
|
|||
assert kw["max_tokens"] == 2048
|
||||
|
||||
def test_nvidia_default_max_tokens(self, transport):
|
||||
"""NVIDIA max_tokens=16384 is now set via ProviderProfile, not legacy flag."""
|
||||
from providers import get_provider_profile
|
||||
|
||||
profile = get_provider_profile("nvidia")
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="glm-4.7", messages=msgs,
|
||||
is_nvidia_nim=True,
|
||||
model="nvidia/llama-3.1-405b-instruct",
|
||||
messages=msgs,
|
||||
max_tokens_param_fn=lambda n: {"max_tokens": n},
|
||||
provider_profile=profile,
|
||||
)
|
||||
# NVIDIA default: 16384
|
||||
assert kw["max_tokens"] == 16384
|
||||
|
||||
def test_qwen_default_max_tokens(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("qwen-oauth")
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="qwen3-coder-plus", messages=msgs,
|
||||
is_qwen_portal=True,
|
||||
provider_profile=profile,
|
||||
max_tokens_param_fn=lambda n: {"max_tokens": n},
|
||||
)
|
||||
# Qwen default: 65536
|
||||
# Qwen default: 65536 from profile.default_max_tokens
|
||||
assert kw["max_tokens"] == 65536
|
||||
|
||||
def test_anthropic_max_output_for_claude_on_aggregator(self, transport):
|
||||
|
|
@ -343,14 +360,23 @@ class TestChatCompletionsBuildKwargs:
|
|||
assert kw["service_tier"] == "priority"
|
||||
|
||||
def test_fixed_temperature(self, transport):
|
||||
"""Fixed temperature is now set via ProviderProfile.fixed_temperature."""
|
||||
from providers.base import ProviderProfile
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(model="gpt-4o", messages=msgs, fixed_temperature=0.6)
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-4o", messages=msgs,
|
||||
provider_profile=ProviderProfile(name="_t", fixed_temperature=0.6),
|
||||
)
|
||||
assert kw["temperature"] == 0.6
|
||||
|
||||
def test_omit_temperature(self, transport):
|
||||
"""Omit temperature is set via ProviderProfile with OMIT_TEMPERATURE sentinel."""
|
||||
from providers.base import ProviderProfile, OMIT_TEMPERATURE
|
||||
msgs = [{"role": "user", "content": "Hi"}]
|
||||
kw = transport.build_kwargs(model="gpt-4o", messages=msgs, omit_temperature=True, fixed_temperature=0.5)
|
||||
# omit wins
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-4o", messages=msgs,
|
||||
provider_profile=ProviderProfile(name="_t", fixed_temperature=OMIT_TEMPERATURE),
|
||||
)
|
||||
assert "temperature" not in kw
|
||||
|
||||
|
||||
|
|
@ -358,18 +384,22 @@ class TestChatCompletionsKimi:
|
|||
"""Regression tests for the Kimi/Moonshot quirks migrated into the transport."""
|
||||
|
||||
def test_kimi_max_tokens_default(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("kimi-coding")
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2", messages=[{"role": "user", "content": "Hi"}],
|
||||
is_kimi=True,
|
||||
provider_profile=profile,
|
||||
max_tokens_param_fn=lambda n: {"max_tokens": n},
|
||||
)
|
||||
# Kimi CLI default: 32000
|
||||
# Kimi CLI default: 32000 from KimiProfile.default_max_tokens
|
||||
assert kw["max_tokens"] == 32000
|
||||
|
||||
def test_kimi_reasoning_effort_top_level(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("kimi-coding")
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2", messages=[{"role": "user", "content": "Hi"}],
|
||||
is_kimi=True,
|
||||
provider_profile=profile,
|
||||
reasoning_config={"effort": "high"},
|
||||
max_tokens_param_fn=lambda n: {"max_tokens": n},
|
||||
)
|
||||
|
|
@ -387,17 +417,21 @@ class TestChatCompletionsKimi:
|
|||
assert "reasoning_effort" not in kw
|
||||
|
||||
def test_kimi_thinking_enabled_extra_body(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("kimi-coding")
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2", messages=[{"role": "user", "content": "Hi"}],
|
||||
is_kimi=True,
|
||||
provider_profile=profile,
|
||||
max_tokens_param_fn=lambda n: {"max_tokens": n},
|
||||
)
|
||||
assert kw["extra_body"]["thinking"] == {"type": "enabled"}
|
||||
|
||||
def test_kimi_thinking_disabled_extra_body(self, transport):
|
||||
from providers import get_provider_profile
|
||||
profile = get_provider_profile("kimi-coding")
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2", messages=[{"role": "user", "content": "Hi"}],
|
||||
is_kimi=True,
|
||||
provider_profile=profile,
|
||||
reasoning_config={"enabled": False},
|
||||
max_tokens_param_fn=lambda n: {"max_tokens": n},
|
||||
)
|
||||
|
|
|
|||
|
|
@ -269,9 +269,9 @@ class TestGmiModelMetadata:
|
|||
|
||||
class TestGmiAuxiliary:
|
||||
def test_aux_default_model(self):
|
||||
from agent.auxiliary_client import _API_KEY_PROVIDER_AUX_MODELS
|
||||
from agent.auxiliary_client import _get_aux_model_for_provider
|
||||
|
||||
assert _API_KEY_PROVIDER_AUX_MODELS["gmi"] == "google/gemini-3.1-flash-lite-preview"
|
||||
assert _get_aux_model_for_provider("gmi") == "google/gemini-3.1-flash-lite-preview"
|
||||
|
||||
def test_resolve_provider_client_uses_gmi_aux_default(self, monkeypatch):
|
||||
monkeypatch.setenv("GMI_API_KEY", "gmi-test-key")
|
||||
|
|
|
|||
0
tests/providers/__init__.py
Normal file
0
tests/providers/__init__.py
Normal file
118
tests/providers/test_e2e_wiring.py
Normal file
118
tests/providers/test_e2e_wiring.py
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
"""E2E tests: verify _build_kwargs_from_profile produces correct output.
|
||||
|
||||
These tests call _build_kwargs_from_profile on the transport directly,
|
||||
without importing run_agent (which would cause xdist worker contamination).
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from agent.transports.chat_completions import ChatCompletionsTransport
|
||||
from providers import get_provider_profile
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def transport():
|
||||
return ChatCompletionsTransport()
|
||||
|
||||
|
||||
def _msgs():
|
||||
return [{"role": "user", "content": "hi"}]
|
||||
|
||||
|
||||
class TestNvidiaProfileWiring:
|
||||
def test_nvidia_gets_default_max_tokens(self, transport):
|
||||
profile = get_provider_profile("nvidia")
|
||||
kwargs = transport.build_kwargs(
|
||||
model="nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
messages=_msgs(),
|
||||
tools=None,
|
||||
provider_profile=profile,
|
||||
max_tokens=None,
|
||||
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
||||
timeout=300,
|
||||
reasoning_config=None,
|
||||
request_overrides=None,
|
||||
session_id="test",
|
||||
ollama_num_ctx=None,
|
||||
)
|
||||
# NVIDIA profile sets default_max_tokens=16384
|
||||
assert kwargs.get("max_tokens") == 16384
|
||||
|
||||
def test_nvidia_nim_alias(self, transport):
|
||||
profile = get_provider_profile("nvidia-nim")
|
||||
assert profile is not None
|
||||
assert profile.name == "nvidia"
|
||||
assert profile.default_max_tokens == 16384
|
||||
|
||||
def test_nvidia_model_passed(self, transport):
|
||||
profile = get_provider_profile("nvidia")
|
||||
kwargs = transport.build_kwargs(
|
||||
model="nvidia/test-model",
|
||||
messages=_msgs(),
|
||||
tools=None,
|
||||
provider_profile=profile,
|
||||
max_tokens=None,
|
||||
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
||||
timeout=300,
|
||||
reasoning_config=None,
|
||||
request_overrides=None,
|
||||
session_id="test",
|
||||
ollama_num_ctx=None,
|
||||
)
|
||||
assert kwargs["model"] == "nvidia/test-model"
|
||||
|
||||
def test_nvidia_messages_passed(self, transport):
|
||||
profile = get_provider_profile("nvidia")
|
||||
msgs = _msgs()
|
||||
kwargs = transport.build_kwargs(
|
||||
model="nvidia/test",
|
||||
messages=msgs,
|
||||
tools=None,
|
||||
provider_profile=profile,
|
||||
max_tokens=None,
|
||||
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
||||
timeout=300,
|
||||
reasoning_config=None,
|
||||
request_overrides=None,
|
||||
session_id="test",
|
||||
ollama_num_ctx=None,
|
||||
)
|
||||
assert kwargs["messages"] == msgs
|
||||
|
||||
|
||||
class TestDeepSeekProfileWiring:
|
||||
def test_deepseek_no_forced_max_tokens(self, transport):
|
||||
profile = get_provider_profile("deepseek")
|
||||
kwargs = transport.build_kwargs(
|
||||
model="deepseek-chat",
|
||||
messages=_msgs(),
|
||||
tools=None,
|
||||
provider_profile=profile,
|
||||
max_tokens=None,
|
||||
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
||||
timeout=300,
|
||||
reasoning_config=None,
|
||||
request_overrides=None,
|
||||
session_id="test",
|
||||
ollama_num_ctx=None,
|
||||
)
|
||||
# DeepSeek has no default_max_tokens
|
||||
assert kwargs["model"] == "deepseek-chat"
|
||||
assert kwargs.get("max_tokens") is None or "max_tokens" not in kwargs
|
||||
|
||||
def test_deepseek_messages_passed(self, transport):
|
||||
profile = get_provider_profile("deepseek")
|
||||
msgs = _msgs()
|
||||
kwargs = transport.build_kwargs(
|
||||
model="deepseek-chat",
|
||||
messages=msgs,
|
||||
tools=None,
|
||||
provider_profile=profile,
|
||||
max_tokens=None,
|
||||
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
||||
timeout=300,
|
||||
reasoning_config=None,
|
||||
request_overrides=None,
|
||||
session_id="test",
|
||||
ollama_num_ctx=None,
|
||||
)
|
||||
assert kwargs["messages"] == msgs
|
||||
290
tests/providers/test_profile_wiring.py
Normal file
290
tests/providers/test_profile_wiring.py
Normal file
|
|
@ -0,0 +1,290 @@
|
|||
"""Profile-path parity tests: verify profile path produces identical output to legacy flags.
|
||||
|
||||
Each test calls build_kwargs twice — once with legacy flags, once with provider_profile —
|
||||
and asserts the output is identical. This catches any behavioral drift between the two paths.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from agent.transports.chat_completions import ChatCompletionsTransport
|
||||
from providers import get_provider_profile
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def transport():
|
||||
return ChatCompletionsTransport()
|
||||
|
||||
|
||||
def _msgs():
|
||||
return [{"role": "user", "content": "hello"}]
|
||||
|
||||
|
||||
def _max_tokens_fn(n):
|
||||
return {"max_completion_tokens": n}
|
||||
|
||||
|
||||
class TestNvidiaProfileParity:
|
||||
def test_max_tokens_match(self, transport):
|
||||
"""NVIDIA profile sets max_tokens=16384; legacy flag is removed."""
|
||||
profile = transport.build_kwargs(
|
||||
model="nvidia/nemotron", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("nvidia"),
|
||||
max_tokens_param_fn=_max_tokens_fn,
|
||||
)
|
||||
assert profile["max_completion_tokens"] == 16384
|
||||
|
||||
|
||||
class TestKimiProfileParity:
|
||||
def test_temperature_omitted(self, transport):
|
||||
legacy = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"), omit_temperature=True,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi"),
|
||||
)
|
||||
assert "temperature" not in legacy
|
||||
assert "temperature" not in profile
|
||||
|
||||
def test_max_tokens(self, transport):
|
||||
legacy = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"), max_tokens_param_fn=_max_tokens_fn,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi"),
|
||||
max_tokens_param_fn=_max_tokens_fn,
|
||||
)
|
||||
assert profile["max_completion_tokens"] == legacy["max_completion_tokens"] == 32000
|
||||
|
||||
def test_thinking_enabled(self, transport):
|
||||
rc = {"enabled": True, "effort": "high"}
|
||||
legacy = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"), reasoning_config=rc,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi"),
|
||||
reasoning_config=rc,
|
||||
)
|
||||
assert profile["extra_body"]["thinking"] == legacy["extra_body"]["thinking"]
|
||||
assert profile["reasoning_effort"] == legacy["reasoning_effort"] == "high"
|
||||
|
||||
def test_thinking_disabled(self, transport):
|
||||
rc = {"enabled": False}
|
||||
legacy = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"), reasoning_config=rc,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi"),
|
||||
reasoning_config=rc,
|
||||
)
|
||||
assert profile["extra_body"]["thinking"] == legacy["extra_body"]["thinking"]
|
||||
assert profile["extra_body"]["thinking"]["type"] == "disabled"
|
||||
assert "reasoning_effort" not in profile
|
||||
assert "reasoning_effort" not in legacy
|
||||
|
||||
def test_reasoning_effort_default(self, transport):
|
||||
rc = {"enabled": True}
|
||||
legacy = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"), reasoning_config=rc,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="kimi-k2", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("kimi"),
|
||||
reasoning_config=rc,
|
||||
)
|
||||
assert profile["reasoning_effort"] == legacy["reasoning_effort"] == "medium"
|
||||
|
||||
|
||||
class TestOpenRouterProfileParity:
|
||||
def test_provider_preferences(self, transport):
|
||||
prefs = {"allow": ["anthropic"]}
|
||||
legacy = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"), provider_preferences=prefs,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
provider_preferences=prefs,
|
||||
)
|
||||
assert profile["extra_body"]["provider"] == legacy["extra_body"]["provider"]
|
||||
|
||||
def test_reasoning_full_config(self, transport):
|
||||
rc = {"enabled": True, "effort": "high"}
|
||||
legacy = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"), supports_reasoning=True, reasoning_config=rc,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
supports_reasoning=True, reasoning_config=rc,
|
||||
)
|
||||
assert profile["extra_body"]["reasoning"] == legacy["extra_body"]["reasoning"]
|
||||
|
||||
def test_default_reasoning(self, transport):
|
||||
legacy = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"), supports_reasoning=True,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
supports_reasoning=True,
|
||||
)
|
||||
assert profile["extra_body"]["reasoning"] == legacy["extra_body"]["reasoning"]
|
||||
|
||||
|
||||
class TestNousProfileParity:
|
||||
def test_tags(self, transport):
|
||||
legacy = transport.build_kwargs(
|
||||
model="hermes-3", messages=_msgs(), tools=None, provider_profile=get_provider_profile("nous"),
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="hermes-3", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("nous"),
|
||||
)
|
||||
assert profile["extra_body"]["tags"] == legacy["extra_body"]["tags"]
|
||||
|
||||
def test_reasoning_omitted_when_disabled(self, transport):
|
||||
rc = {"enabled": False}
|
||||
legacy = transport.build_kwargs(
|
||||
model="hermes-3", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("nous"), supports_reasoning=True, reasoning_config=rc,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="hermes-3", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("nous"),
|
||||
supports_reasoning=True, reasoning_config=rc,
|
||||
)
|
||||
assert "reasoning" not in legacy.get("extra_body", {})
|
||||
assert "reasoning" not in profile.get("extra_body", {})
|
||||
|
||||
|
||||
class TestQwenProfileParity:
|
||||
def test_max_tokens(self, transport):
|
||||
legacy = transport.build_kwargs(
|
||||
model="qwen3.5", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("qwen-oauth"), max_tokens_param_fn=_max_tokens_fn,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="qwen3.5", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("qwen"),
|
||||
max_tokens_param_fn=_max_tokens_fn,
|
||||
)
|
||||
assert profile["max_completion_tokens"] == legacy["max_completion_tokens"] == 65536
|
||||
|
||||
def test_vl_high_resolution(self, transport):
|
||||
legacy = transport.build_kwargs(
|
||||
model="qwen3.5", messages=_msgs(), tools=None, provider_profile=get_provider_profile("qwen-oauth"),
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="qwen3.5", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("qwen"),
|
||||
)
|
||||
assert profile["extra_body"]["vl_high_resolution_images"] == legacy["extra_body"]["vl_high_resolution_images"]
|
||||
|
||||
def test_metadata_top_level(self, transport):
|
||||
meta = {"sessionId": "s123", "promptId": "p456"}
|
||||
legacy = transport.build_kwargs(
|
||||
model="qwen3.5", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("qwen-oauth"), qwen_session_metadata=meta,
|
||||
)
|
||||
profile = transport.build_kwargs(
|
||||
model="qwen3.5", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("qwen"),
|
||||
qwen_session_metadata=meta,
|
||||
)
|
||||
assert profile["metadata"] == legacy["metadata"] == meta
|
||||
assert "metadata" not in profile.get("extra_body", {})
|
||||
|
||||
def test_message_preprocessing(self, transport):
|
||||
"""Qwen profile normalizes string content to list-of-parts."""
|
||||
msgs = [
|
||||
{"role": "system", "content": "You are helpful."},
|
||||
{"role": "user", "content": "hello"},
|
||||
]
|
||||
profile = transport.build_kwargs(
|
||||
model="qwen3.5", messages=msgs, tools=None,
|
||||
provider_profile=get_provider_profile("qwen"),
|
||||
)
|
||||
out_msgs = profile["messages"]
|
||||
# System message content normalized + cache_control injected
|
||||
assert isinstance(out_msgs[0]["content"], list)
|
||||
assert out_msgs[0]["content"][0]["type"] == "text"
|
||||
assert "cache_control" in out_msgs[0]["content"][-1]
|
||||
# User message content normalized
|
||||
assert isinstance(out_msgs[1]["content"], list)
|
||||
assert out_msgs[1]["content"][0] == {"type": "text", "text": "hello"}
|
||||
|
||||
|
||||
class TestDeveloperRoleParity:
|
||||
"""Developer role swap must work on BOTH legacy and profile paths."""
|
||||
|
||||
def test_legacy_path_swaps_for_gpt5(self, transport):
|
||||
msgs = [{"role": "system", "content": "Be helpful"}, {"role": "user", "content": "hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-5.4", messages=msgs, tools=None,
|
||||
)
|
||||
assert kw["messages"][0]["role"] == "developer"
|
||||
|
||||
def test_profile_path_swaps_for_gpt5(self, transport):
|
||||
msgs = [{"role": "system", "content": "Be helpful"}, {"role": "user", "content": "hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-5.4", messages=msgs, tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
)
|
||||
assert kw["messages"][0]["role"] == "developer"
|
||||
|
||||
def test_profile_path_no_swap_for_claude(self, transport):
|
||||
msgs = [{"role": "system", "content": "Be helpful"}, {"role": "user", "content": "hi"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6", messages=msgs, tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
)
|
||||
assert kw["messages"][0]["role"] == "system"
|
||||
|
||||
|
||||
class TestRequestOverridesParity:
|
||||
"""request_overrides with extra_body must merge identically on both paths."""
|
||||
|
||||
def test_extra_body_override_legacy(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-5.4", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
request_overrides={"extra_body": {"custom_key": "custom_val"}},
|
||||
)
|
||||
assert kw["extra_body"]["custom_key"] == "custom_val"
|
||||
|
||||
def test_extra_body_override_profile(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-5.4", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
request_overrides={"extra_body": {"custom_key": "custom_val"}},
|
||||
)
|
||||
assert kw["extra_body"]["custom_key"] == "custom_val"
|
||||
|
||||
def test_extra_body_override_merges_with_provider_body(self, transport):
|
||||
"""Override extra_body merges WITH provider extra_body, not replaces."""
|
||||
kw = transport.build_kwargs(
|
||||
model="hermes-3", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("nous"),
|
||||
request_overrides={"extra_body": {"custom": True}},
|
||||
)
|
||||
assert kw["extra_body"]["tags"] == ["product=hermes-agent"] # from profile
|
||||
assert kw["extra_body"]["custom"] is True # from override
|
||||
|
||||
def test_top_level_override(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="gpt-5.4", messages=_msgs(), tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
request_overrides={"top_p": 0.9},
|
||||
)
|
||||
assert kw["top_p"] == 0.9
|
||||
203
tests/providers/test_provider_profiles.py
Normal file
203
tests/providers/test_provider_profiles.py
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
"""Tests for the provider module registry and profiles."""
|
||||
|
||||
import pytest
|
||||
from providers import get_provider_profile, _REGISTRY
|
||||
from providers.base import ProviderProfile, OMIT_TEMPERATURE
|
||||
|
||||
|
||||
class TestRegistry:
|
||||
def test_discovery_populates_registry(self):
|
||||
p = get_provider_profile("nvidia")
|
||||
assert p is not None
|
||||
assert p.name == "nvidia"
|
||||
|
||||
def test_alias_lookup(self):
|
||||
assert get_provider_profile("kimi").name == "kimi-coding"
|
||||
assert get_provider_profile("moonshot").name == "kimi-coding"
|
||||
assert get_provider_profile("kimi-coding-cn").name == "kimi-coding-cn"
|
||||
assert get_provider_profile("or").name == "openrouter"
|
||||
assert get_provider_profile("nous-portal").name == "nous"
|
||||
assert get_provider_profile("qwen").name == "qwen-oauth"
|
||||
assert get_provider_profile("qwen-portal").name == "qwen-oauth"
|
||||
|
||||
def test_unknown_provider_returns_none(self):
|
||||
assert get_provider_profile("nonexistent-provider") is None
|
||||
|
||||
def test_all_providers_have_name(self):
|
||||
get_provider_profile("nvidia") # trigger discovery
|
||||
for name, profile in _REGISTRY.items():
|
||||
assert profile.name == name
|
||||
|
||||
|
||||
class TestNvidiaProfile:
|
||||
def test_max_tokens(self):
|
||||
p = get_provider_profile("nvidia")
|
||||
assert p.default_max_tokens == 16384
|
||||
|
||||
def test_no_special_temperature(self):
|
||||
p = get_provider_profile("nvidia")
|
||||
assert p.fixed_temperature is None
|
||||
|
||||
def test_base_url(self):
|
||||
p = get_provider_profile("nvidia")
|
||||
assert "nvidia.com" in p.base_url
|
||||
|
||||
|
||||
class TestKimiProfile:
|
||||
def test_temperature_omit(self):
|
||||
p = get_provider_profile("kimi")
|
||||
assert p.fixed_temperature is OMIT_TEMPERATURE
|
||||
|
||||
def test_max_tokens(self):
|
||||
p = get_provider_profile("kimi")
|
||||
assert p.default_max_tokens == 32000
|
||||
|
||||
def test_cn_separate_profile(self):
|
||||
p = get_provider_profile("kimi-coding-cn")
|
||||
assert p.name == "kimi-coding-cn"
|
||||
assert p.env_vars == ("KIMI_CN_API_KEY",)
|
||||
assert "moonshot.cn" in p.base_url
|
||||
|
||||
def test_cn_not_alias_of_kimi(self):
|
||||
kimi = get_provider_profile("kimi-coding")
|
||||
cn = get_provider_profile("kimi-coding-cn")
|
||||
assert kimi is not cn
|
||||
assert kimi.base_url != cn.base_url
|
||||
|
||||
def test_thinking_enabled(self):
|
||||
p = get_provider_profile("kimi")
|
||||
eb, tl = p.build_api_kwargs_extras(reasoning_config={"enabled": True, "effort": "high"})
|
||||
assert eb["thinking"] == {"type": "enabled"}
|
||||
assert tl["reasoning_effort"] == "high"
|
||||
|
||||
def test_thinking_disabled(self):
|
||||
p = get_provider_profile("kimi")
|
||||
eb, tl = p.build_api_kwargs_extras(reasoning_config={"enabled": False})
|
||||
assert eb["thinking"] == {"type": "disabled"}
|
||||
assert "reasoning_effort" not in tl
|
||||
|
||||
def test_reasoning_effort_default(self):
|
||||
p = get_provider_profile("kimi")
|
||||
eb, tl = p.build_api_kwargs_extras(reasoning_config={"enabled": True})
|
||||
assert tl["reasoning_effort"] == "medium"
|
||||
|
||||
def test_no_config_defaults(self):
|
||||
p = get_provider_profile("kimi")
|
||||
eb, tl = p.build_api_kwargs_extras(reasoning_config=None)
|
||||
assert eb["thinking"] == {"type": "enabled"}
|
||||
assert tl["reasoning_effort"] == "medium"
|
||||
|
||||
|
||||
class TestOpenRouterProfile:
|
||||
def test_extra_body_with_prefs(self):
|
||||
p = get_provider_profile("openrouter")
|
||||
body = p.build_extra_body(provider_preferences={"allow": ["anthropic"]})
|
||||
assert body["provider"] == {"allow": ["anthropic"]}
|
||||
|
||||
def test_extra_body_no_prefs(self):
|
||||
p = get_provider_profile("openrouter")
|
||||
body = p.build_extra_body()
|
||||
assert body == {}
|
||||
|
||||
def test_reasoning_full_config(self):
|
||||
p = get_provider_profile("openrouter")
|
||||
eb, _ = p.build_api_kwargs_extras(
|
||||
reasoning_config={"enabled": True, "effort": "high"},
|
||||
supports_reasoning=True,
|
||||
)
|
||||
assert eb["reasoning"] == {"enabled": True, "effort": "high"}
|
||||
|
||||
def test_reasoning_disabled_still_passes(self):
|
||||
"""OpenRouter passes disabled reasoning through (unlike Nous)."""
|
||||
p = get_provider_profile("openrouter")
|
||||
eb, _ = p.build_api_kwargs_extras(
|
||||
reasoning_config={"enabled": False},
|
||||
supports_reasoning=True,
|
||||
)
|
||||
assert eb["reasoning"] == {"enabled": False}
|
||||
|
||||
def test_default_reasoning(self):
|
||||
p = get_provider_profile("openrouter")
|
||||
eb, _ = p.build_api_kwargs_extras(supports_reasoning=True)
|
||||
assert eb["reasoning"] == {"enabled": True, "effort": "medium"}
|
||||
|
||||
|
||||
class TestNousProfile:
|
||||
def test_tags(self):
|
||||
p = get_provider_profile("nous")
|
||||
body = p.build_extra_body()
|
||||
assert body["tags"] == ["product=hermes-agent"]
|
||||
|
||||
def test_auth_type(self):
|
||||
p = get_provider_profile("nous")
|
||||
assert p.auth_type == "oauth_device_code"
|
||||
|
||||
def test_reasoning_enabled(self):
|
||||
p = get_provider_profile("nous")
|
||||
eb, _ = p.build_api_kwargs_extras(
|
||||
reasoning_config={"enabled": True, "effort": "medium"},
|
||||
supports_reasoning=True,
|
||||
)
|
||||
assert eb["reasoning"] == {"enabled": True, "effort": "medium"}
|
||||
|
||||
def test_reasoning_omitted_when_disabled(self):
|
||||
p = get_provider_profile("nous")
|
||||
eb, _ = p.build_api_kwargs_extras(
|
||||
reasoning_config={"enabled": False},
|
||||
supports_reasoning=True,
|
||||
)
|
||||
assert "reasoning" not in eb
|
||||
|
||||
|
||||
class TestQwenProfile:
|
||||
def test_max_tokens(self):
|
||||
p = get_provider_profile("qwen-oauth")
|
||||
assert p.default_max_tokens == 65536
|
||||
|
||||
def test_auth_type(self):
|
||||
p = get_provider_profile("qwen-oauth")
|
||||
assert p.auth_type == "oauth_external"
|
||||
|
||||
def test_extra_body_vl(self):
|
||||
p = get_provider_profile("qwen-oauth")
|
||||
body = p.build_extra_body()
|
||||
assert body["vl_high_resolution_images"] is True
|
||||
|
||||
def test_prepare_messages_normalizes_content(self):
|
||||
p = get_provider_profile("qwen-oauth")
|
||||
msgs = [
|
||||
{"role": "system", "content": "Be helpful"},
|
||||
{"role": "user", "content": "hello"},
|
||||
]
|
||||
result = p.prepare_messages(msgs)
|
||||
# System message: content normalized to list, cache_control on last part
|
||||
assert isinstance(result[0]["content"], list)
|
||||
assert result[0]["content"][-1].get("cache_control") == {"type": "ephemeral"}
|
||||
assert result[0]["content"][-1]["text"] == "Be helpful"
|
||||
# User message: content normalized to list
|
||||
assert isinstance(result[1]["content"], list)
|
||||
assert result[1]["content"][0]["text"] == "hello"
|
||||
|
||||
def test_metadata_top_level(self):
|
||||
p = get_provider_profile("qwen-oauth")
|
||||
meta = {"sessionId": "s123", "promptId": "p456"}
|
||||
eb, tl = p.build_api_kwargs_extras(qwen_session_metadata=meta)
|
||||
assert tl["metadata"] == meta
|
||||
assert "metadata" not in eb
|
||||
|
||||
|
||||
class TestBaseProfile:
|
||||
def test_prepare_messages_passthrough(self):
|
||||
p = ProviderProfile(name="test")
|
||||
msgs = [{"role": "user", "content": "hi"}]
|
||||
assert p.prepare_messages(msgs) is msgs
|
||||
|
||||
def test_build_extra_body_empty(self):
|
||||
p = ProviderProfile(name="test")
|
||||
assert p.build_extra_body() == {}
|
||||
|
||||
def test_build_api_kwargs_extras_empty(self):
|
||||
p = ProviderProfile(name="test")
|
||||
eb, tl = p.build_api_kwargs_extras()
|
||||
assert eb == {}
|
||||
assert tl == {}
|
||||
258
tests/providers/test_transport_parity.py
Normal file
258
tests/providers/test_transport_parity.py
Normal file
|
|
@ -0,0 +1,258 @@
|
|||
"""Parity tests: pin the exact current transport behavior per provider.
|
||||
|
||||
These tests document the flag-based contract between run_agent.py and
|
||||
ChatCompletionsTransport.build_kwargs(). When the next PR wires profiles
|
||||
to replace flags, every assertion here must still pass — any failure is
|
||||
a behavioral regression.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from agent.transports.chat_completions import ChatCompletionsTransport
|
||||
from providers import get_provider_profile
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def transport():
|
||||
return ChatCompletionsTransport()
|
||||
|
||||
|
||||
def _simple_messages():
|
||||
return [{"role": "user", "content": "hello"}]
|
||||
|
||||
|
||||
def _max_tokens_fn(n):
|
||||
return {"max_completion_tokens": n}
|
||||
|
||||
|
||||
class TestNvidiaParity:
|
||||
"""NVIDIA NIM: default max_tokens=16384."""
|
||||
|
||||
def test_default_max_tokens(self, transport):
|
||||
"""NVIDIA default max_tokens=16384 comes from profile, not legacy is_nvidia_nim flag."""
|
||||
from providers import get_provider_profile
|
||||
|
||||
profile = get_provider_profile("nvidia")
|
||||
kw = transport.build_kwargs(
|
||||
model="nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
max_tokens_param_fn=_max_tokens_fn,
|
||||
provider_profile=profile,
|
||||
)
|
||||
assert kw["max_completion_tokens"] == 16384
|
||||
|
||||
def test_user_max_tokens_overrides(self, transport):
|
||||
from providers import get_provider_profile
|
||||
|
||||
profile = get_provider_profile("nvidia")
|
||||
kw = transport.build_kwargs(
|
||||
model="nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
max_tokens=4096,
|
||||
max_tokens_param_fn=_max_tokens_fn,
|
||||
provider_profile=profile,
|
||||
)
|
||||
assert kw["max_completion_tokens"] == 4096 # user overrides default
|
||||
|
||||
|
||||
class TestKimiParity:
|
||||
"""Kimi: OMIT temperature, max_tokens=32000, thinking + reasoning_effort."""
|
||||
|
||||
def test_temperature_omitted(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"),
|
||||
omit_temperature=True,
|
||||
)
|
||||
assert "temperature" not in kw
|
||||
|
||||
def test_default_max_tokens(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"),
|
||||
max_tokens_param_fn=_max_tokens_fn,
|
||||
)
|
||||
assert kw["max_completion_tokens"] == 32000
|
||||
|
||||
def test_thinking_enabled(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"),
|
||||
reasoning_config={"enabled": True, "effort": "high"},
|
||||
)
|
||||
assert kw["extra_body"]["thinking"] == {"type": "enabled"}
|
||||
|
||||
def test_thinking_disabled(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"),
|
||||
reasoning_config={"enabled": False},
|
||||
)
|
||||
assert kw["extra_body"]["thinking"] == {"type": "disabled"}
|
||||
|
||||
def test_reasoning_effort_top_level(self, transport):
|
||||
"""Kimi reasoning_effort is a TOP-LEVEL api_kwargs key, NOT in extra_body."""
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"),
|
||||
reasoning_config={"enabled": True, "effort": "high"},
|
||||
)
|
||||
assert kw.get("reasoning_effort") == "high"
|
||||
assert "reasoning_effort" not in kw.get("extra_body", {})
|
||||
|
||||
def test_reasoning_effort_default_medium(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="kimi-k2",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("kimi-coding"),
|
||||
reasoning_config={"enabled": True},
|
||||
)
|
||||
assert kw.get("reasoning_effort") == "medium"
|
||||
|
||||
|
||||
class TestOpenRouterParity:
|
||||
"""OpenRouter: provider preferences, reasoning in extra_body."""
|
||||
|
||||
def test_provider_preferences(self, transport):
|
||||
prefs = {"allow": ["anthropic"], "sort": "price"}
|
||||
kw = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
provider_preferences=prefs,
|
||||
)
|
||||
assert kw["extra_body"]["provider"] == prefs
|
||||
|
||||
def test_reasoning_passes_full_config(self, transport):
|
||||
"""OpenRouter passes the FULL reasoning_config dict, not just effort."""
|
||||
rc = {"enabled": True, "effort": "high"}
|
||||
kw = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
supports_reasoning=True,
|
||||
reasoning_config=rc,
|
||||
)
|
||||
assert kw["extra_body"]["reasoning"] == rc
|
||||
|
||||
def test_default_reasoning_when_no_config(self, transport):
|
||||
"""When supports_reasoning=True but no config, adds default."""
|
||||
kw = transport.build_kwargs(
|
||||
model="anthropic/claude-sonnet-4.6",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("openrouter"),
|
||||
supports_reasoning=True,
|
||||
)
|
||||
assert kw["extra_body"]["reasoning"] == {"enabled": True, "effort": "medium"}
|
||||
|
||||
|
||||
class TestNousParity:
|
||||
"""Nous: product tags, reasoning, omit when disabled."""
|
||||
|
||||
def test_tags(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="hermes-3-llama-3.1-405b",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("nous"),
|
||||
)
|
||||
assert kw["extra_body"]["tags"] == ["product=hermes-agent"]
|
||||
|
||||
def test_reasoning_omitted_when_disabled(self, transport):
|
||||
"""Nous special case: reasoning omitted entirely when disabled."""
|
||||
kw = transport.build_kwargs(
|
||||
model="hermes-3-llama-3.1-405b",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("nous"),
|
||||
supports_reasoning=True,
|
||||
reasoning_config={"enabled": False},
|
||||
)
|
||||
assert "reasoning" not in kw.get("extra_body", {})
|
||||
|
||||
def test_reasoning_enabled(self, transport):
|
||||
rc = {"enabled": True, "effort": "high"}
|
||||
kw = transport.build_kwargs(
|
||||
model="hermes-3-llama-3.1-405b",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("nous"),
|
||||
supports_reasoning=True,
|
||||
reasoning_config=rc,
|
||||
)
|
||||
assert kw["extra_body"]["reasoning"] == rc
|
||||
|
||||
|
||||
class TestQwenParity:
|
||||
"""Qwen: max_tokens=65536, vl_high_resolution, metadata top-level."""
|
||||
|
||||
def test_default_max_tokens(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="qwen3.5-plus",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("qwen-oauth"),
|
||||
max_tokens_param_fn=_max_tokens_fn,
|
||||
)
|
||||
assert kw["max_completion_tokens"] == 65536
|
||||
|
||||
def test_vl_high_resolution(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="qwen3.5-plus",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("qwen-oauth"),
|
||||
)
|
||||
assert kw["extra_body"]["vl_high_resolution_images"] is True
|
||||
|
||||
def test_metadata_top_level(self, transport):
|
||||
"""Qwen metadata goes to top-level api_kwargs, NOT extra_body."""
|
||||
meta = {"sessionId": "s123", "promptId": "p456"}
|
||||
kw = transport.build_kwargs(
|
||||
model="qwen3.5-plus",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("qwen-oauth"),
|
||||
qwen_session_metadata=meta,
|
||||
)
|
||||
assert kw["metadata"] == meta
|
||||
assert "metadata" not in kw.get("extra_body", {})
|
||||
|
||||
|
||||
class TestCustomOllamaParity:
|
||||
"""Custom/Ollama: num_ctx, think=false — now tested via profile."""
|
||||
|
||||
def test_ollama_num_ctx(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="llama3.1",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("custom"),
|
||||
ollama_num_ctx=131072,
|
||||
)
|
||||
assert kw["extra_body"]["options"]["num_ctx"] == 131072
|
||||
|
||||
def test_think_false_when_disabled(self, transport):
|
||||
kw = transport.build_kwargs(
|
||||
model="qwen3:72b",
|
||||
messages=_simple_messages(),
|
||||
tools=None,
|
||||
provider_profile=get_provider_profile("custom"),
|
||||
reasoning_config={"enabled": False, "effort": "none"},
|
||||
)
|
||||
assert kw["extra_body"]["think"] is False
|
||||
|
|
@ -1117,6 +1117,7 @@ class TestBuildApiKwargs:
|
|||
assert "temperature" not in kwargs
|
||||
|
||||
def test_kimi_coding_endpoint_omits_temperature(self, agent):
|
||||
agent.provider = "kimi-coding"
|
||||
agent.base_url = "https://api.kimi.com/coding/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.model = "kimi-k2.5"
|
||||
|
|
@ -1129,6 +1130,7 @@ class TestBuildApiKwargs:
|
|||
def test_kimi_coding_endpoint_sends_max_tokens_and_reasoning(self, agent):
|
||||
"""Kimi endpoint should send max_tokens=32000 and reasoning_effort as
|
||||
top-level params, matching Kimi CLI's default behavior."""
|
||||
agent.provider = "kimi-coding"
|
||||
agent.base_url = "https://api.kimi.com/coding/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.model = "kimi-for-coding"
|
||||
|
|
@ -1141,6 +1143,7 @@ class TestBuildApiKwargs:
|
|||
|
||||
def test_kimi_coding_endpoint_respects_custom_effort(self, agent):
|
||||
"""reasoning_effort should reflect reasoning_config.effort when set."""
|
||||
agent.provider = "kimi-coding"
|
||||
agent.base_url = "https://api.kimi.com/coding/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.model = "kimi-for-coding"
|
||||
|
|
@ -1154,6 +1157,7 @@ class TestBuildApiKwargs:
|
|||
def test_kimi_coding_endpoint_sends_thinking_extra_body(self, agent):
|
||||
"""Kimi endpoint should send extra_body.thinking={"type":"enabled"}
|
||||
to activate reasoning mode, mirroring Kimi CLI's with_thinking()."""
|
||||
agent.provider = "kimi-coding"
|
||||
agent.base_url = "https://api.kimi.com/coding/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.model = "kimi-for-coding"
|
||||
|
|
@ -1167,6 +1171,7 @@ class TestBuildApiKwargs:
|
|||
"""When reasoning_config.enabled=False, thinking should be disabled
|
||||
and reasoning_effort should be omitted entirely — mirroring Kimi
|
||||
CLI's with_thinking("off") which maps to reasoning_effort=None."""
|
||||
agent.provider = "kimi-coding"
|
||||
agent.base_url = "https://api.kimi.com/coding/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.model = "kimi-for-coding"
|
||||
|
|
@ -1180,6 +1185,7 @@ class TestBuildApiKwargs:
|
|||
|
||||
def test_moonshot_endpoint_sends_max_tokens_and_reasoning(self, agent):
|
||||
"""api.moonshot.ai should get the same Kimi-compatible params."""
|
||||
agent.provider = "kimi-coding"
|
||||
agent.base_url = "https://api.moonshot.ai/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.model = "kimi-k2.5"
|
||||
|
|
@ -1193,6 +1199,7 @@ class TestBuildApiKwargs:
|
|||
|
||||
def test_moonshot_cn_endpoint_sends_max_tokens_and_reasoning(self, agent):
|
||||
"""api.moonshot.cn (China endpoint) should get the same params."""
|
||||
agent.provider = "kimi-coding-cn"
|
||||
agent.base_url = "https://api.moonshot.cn/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.model = "kimi-k2.5"
|
||||
|
|
@ -1205,6 +1212,7 @@ class TestBuildApiKwargs:
|
|||
assert kwargs["extra_body"]["thinking"] == {"type": "enabled"}
|
||||
|
||||
def test_provider_preferences_injected(self, agent):
|
||||
agent.provider = "openrouter"
|
||||
agent.base_url = "https://openrouter.ai/api/v1"
|
||||
agent.providers_allowed = ["Anthropic"]
|
||||
messages = [{"role": "user", "content": "hi"}]
|
||||
|
|
@ -1213,6 +1221,7 @@ class TestBuildApiKwargs:
|
|||
|
||||
def test_reasoning_config_default_openrouter(self, agent):
|
||||
"""Default reasoning config for OpenRouter should be medium."""
|
||||
agent.provider = "openrouter"
|
||||
agent.base_url = "https://openrouter.ai/api/v1"
|
||||
agent.model = "anthropic/claude-sonnet-4-20250514"
|
||||
messages = [{"role": "user", "content": "hi"}]
|
||||
|
|
@ -1222,6 +1231,7 @@ class TestBuildApiKwargs:
|
|||
assert reasoning["effort"] == "medium"
|
||||
|
||||
def test_reasoning_config_custom(self, agent):
|
||||
agent.provider = "openrouter"
|
||||
agent.base_url = "https://openrouter.ai/api/v1"
|
||||
agent.model = "anthropic/claude-sonnet-4-20250514"
|
||||
agent.reasoning_config = {"enabled": False}
|
||||
|
|
@ -1237,6 +1247,7 @@ class TestBuildApiKwargs:
|
|||
assert "reasoning" not in kwargs.get("extra_body", {})
|
||||
|
||||
def test_reasoning_sent_for_supported_openrouter_model(self, agent):
|
||||
agent.provider = "openrouter"
|
||||
agent.base_url = "https://openrouter.ai/api/v1"
|
||||
agent.model = "qwen/qwen3.5-plus-02-15"
|
||||
messages = [{"role": "user", "content": "hi"}]
|
||||
|
|
@ -1244,6 +1255,7 @@ class TestBuildApiKwargs:
|
|||
assert kwargs["extra_body"]["reasoning"]["effort"] == "medium"
|
||||
|
||||
def test_reasoning_sent_for_nous_route(self, agent):
|
||||
agent.provider = "nous"
|
||||
agent.base_url = "https://inference-api.nousresearch.com/v1"
|
||||
agent.model = "minimax/minimax-m2.5"
|
||||
messages = [{"role": "user", "content": "hi"}]
|
||||
|
|
@ -1251,18 +1263,38 @@ class TestBuildApiKwargs:
|
|||
assert kwargs["extra_body"]["reasoning"]["effort"] == "medium"
|
||||
|
||||
def test_reasoning_sent_for_copilot_gpt5(self, agent):
|
||||
agent.base_url = "https://api.githubcopilot.com"
|
||||
agent.model = "gpt-5.4"
|
||||
messages = [{"role": "user", "content": "hi"}]
|
||||
kwargs = agent._build_api_kwargs(messages)
|
||||
"""Copilot/GitHub Models: GPT-5 reasoning goes in extra_body.reasoning."""
|
||||
from agent.transports import get_transport
|
||||
from providers import get_provider_profile
|
||||
|
||||
transport = get_transport("chat_completions")
|
||||
profile = get_provider_profile("copilot")
|
||||
msgs = [{"role": "user", "content": "hi"}]
|
||||
kwargs = transport.build_kwargs(
|
||||
model="gpt-5.4",
|
||||
messages=msgs,
|
||||
tools=None,
|
||||
supports_reasoning=True,
|
||||
provider_profile=profile,
|
||||
)
|
||||
assert kwargs["extra_body"]["reasoning"] == {"effort": "medium"}
|
||||
|
||||
def test_reasoning_xhigh_normalized_for_copilot(self, agent):
|
||||
agent.base_url = "https://api.githubcopilot.com"
|
||||
agent.model = "gpt-5.4"
|
||||
agent.reasoning_config = {"enabled": True, "effort": "xhigh"}
|
||||
messages = [{"role": "user", "content": "hi"}]
|
||||
kwargs = agent._build_api_kwargs(messages)
|
||||
"""xhigh effort should normalize to high for Copilot GitHub Models."""
|
||||
from agent.transports import get_transport
|
||||
from providers import get_provider_profile
|
||||
|
||||
transport = get_transport("chat_completions")
|
||||
profile = get_provider_profile("copilot")
|
||||
msgs = [{"role": "user", "content": "hi"}]
|
||||
kwargs = transport.build_kwargs(
|
||||
model="gpt-5.4",
|
||||
messages=msgs,
|
||||
tools=None,
|
||||
supports_reasoning=True,
|
||||
reasoning_config={"enabled": True, "effort": "xhigh"},
|
||||
provider_profile=profile,
|
||||
)
|
||||
assert kwargs["extra_body"]["reasoning"] == {"effort": "high"}
|
||||
|
||||
def test_reasoning_omitted_for_non_reasoning_copilot_model(self, agent):
|
||||
|
|
@ -1280,6 +1312,7 @@ class TestBuildApiKwargs:
|
|||
|
||||
|
||||
def test_qwen_portal_formats_messages_and_metadata(self, agent):
|
||||
agent.provider = "qwen-oauth"
|
||||
agent.base_url = "https://portal.qwen.ai/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.session_id = "sess-123"
|
||||
|
|
@ -1296,6 +1329,7 @@ class TestBuildApiKwargs:
|
|||
assert kwargs["messages"][2]["content"][0]["text"] == "hi"
|
||||
|
||||
def test_qwen_portal_normalizes_bare_string_content_parts(self, agent):
|
||||
agent.provider = "qwen-oauth"
|
||||
agent.base_url = "https://portal.qwen.ai/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
messages = [
|
||||
|
|
@ -1308,6 +1342,7 @@ class TestBuildApiKwargs:
|
|||
assert user_content[1] == {"type": "text", "text": "world"}
|
||||
|
||||
def test_qwen_portal_no_system_message(self, agent):
|
||||
agent.provider = "qwen-oauth"
|
||||
agent.base_url = "https://portal.qwen.ai/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
messages = [{"role": "user", "content": "hi"}]
|
||||
|
|
@ -1328,6 +1363,7 @@ class TestBuildApiKwargs:
|
|||
def test_qwen_portal_default_max_tokens(self, agent):
|
||||
"""When max_tokens is None, Qwen Portal gets a default of 65536
|
||||
to prevent reasoning models from exhausting their output budget."""
|
||||
agent.provider = "qwen-oauth"
|
||||
agent.base_url = "https://portal.qwen.ai/v1"
|
||||
agent._base_url_lower = agent.base_url.lower()
|
||||
agent.max_tokens = None
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue