mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
Cycle 2 PR 1 (#14418). Introduces providers/ package with ProviderProfile ABC and auto-discovery registry, then wires ChatCompletionsTransport to delegate to profiles via a clean single-path method. Provider profiles (8 providers): - nvidia: default_max_tokens=16384 - kimi + kimi-cn: OMIT_TEMPERATURE, thinking + top-level reasoning_effort - openrouter: provider_preferences, full reasoning_config passthrough - nous: product tags, reasoning with Nous-specific disabled omission - deepseek: base_url + env_vars - qwen-oauth: vl_high_resolution extra_body, metadata top-level api_kwargs Transport integration: - _build_kwargs_from_profile() replaces the entire legacy flag-based assembly when provider_profile param is passed - Single path: no dual-execution, no overwrites, no legacy fallthrough - build_api_kwargs_extras() returns (extra_body, top_level) tuple to handle Kimi's top-level reasoning_effort vs OpenRouter's extra_body Auth types: api_key | oauth_device_code | oauth_external | copilot | aws (expanded from the lossy 'oauth' to match real Hermes auth modes). 64 new tests: - 30 profile unit tests (registry, all 8 profiles, auth types) - 19 transport parity tests (pin legacy flag-based behavior) - 15 profile wiring tests (verify profile path = legacy path)
293 lines
12 KiB
Python
293 lines
12 KiB
Python
"""Profile-path parity tests: verify profile path produces identical output to legacy flags.
|
|
|
|
Each test calls build_kwargs twice — once with legacy flags, once with provider_profile —
|
|
and asserts the output is identical. This catches any behavioral drift between the two paths.
|
|
"""
|
|
|
|
import pytest
|
|
from agent.transports.chat_completions import ChatCompletionsTransport
|
|
from providers import get_provider_profile
|
|
|
|
|
|
@pytest.fixture
|
|
def transport():
|
|
return ChatCompletionsTransport()
|
|
|
|
|
|
def _msgs():
|
|
return [{"role": "user", "content": "hello"}]
|
|
|
|
|
|
def _max_tokens_fn(n):
|
|
return {"max_completion_tokens": n}
|
|
|
|
|
|
class TestNvidiaProfileParity:
|
|
def test_max_tokens_match(self, transport):
|
|
legacy = transport.build_kwargs(
|
|
model="nvidia/nemotron", messages=_msgs(), tools=None,
|
|
is_nvidia_nim=True, max_tokens_param_fn=_max_tokens_fn,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="nvidia/nemotron", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("nvidia"),
|
|
max_tokens_param_fn=_max_tokens_fn,
|
|
)
|
|
assert profile["max_completion_tokens"] == legacy["max_completion_tokens"] == 16384
|
|
|
|
|
|
class TestKimiProfileParity:
|
|
def test_temperature_omitted(self, transport):
|
|
legacy = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
is_kimi=True, omit_temperature=True,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("kimi"),
|
|
)
|
|
assert "temperature" not in legacy
|
|
assert "temperature" not in profile
|
|
|
|
def test_max_tokens(self, transport):
|
|
legacy = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
is_kimi=True, max_tokens_param_fn=_max_tokens_fn,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("kimi"),
|
|
max_tokens_param_fn=_max_tokens_fn,
|
|
)
|
|
assert profile["max_completion_tokens"] == legacy["max_completion_tokens"] == 32000
|
|
|
|
def test_thinking_enabled(self, transport):
|
|
rc = {"enabled": True, "effort": "high"}
|
|
legacy = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
is_kimi=True, reasoning_config=rc,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("kimi"),
|
|
reasoning_config=rc,
|
|
)
|
|
assert profile["extra_body"]["thinking"] == legacy["extra_body"]["thinking"]
|
|
assert profile["reasoning_effort"] == legacy["reasoning_effort"] == "high"
|
|
|
|
def test_thinking_disabled(self, transport):
|
|
rc = {"enabled": False}
|
|
legacy = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
is_kimi=True, reasoning_config=rc,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("kimi"),
|
|
reasoning_config=rc,
|
|
)
|
|
assert profile["extra_body"]["thinking"] == legacy["extra_body"]["thinking"]
|
|
assert profile["extra_body"]["thinking"]["type"] == "disabled"
|
|
assert "reasoning_effort" not in profile
|
|
assert "reasoning_effort" not in legacy
|
|
|
|
def test_reasoning_effort_default(self, transport):
|
|
rc = {"enabled": True}
|
|
legacy = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
is_kimi=True, reasoning_config=rc,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="kimi-k2", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("kimi"),
|
|
reasoning_config=rc,
|
|
)
|
|
assert profile["reasoning_effort"] == legacy["reasoning_effort"] == "medium"
|
|
|
|
|
|
class TestOpenRouterProfileParity:
|
|
def test_provider_preferences(self, transport):
|
|
prefs = {"allow": ["anthropic"]}
|
|
legacy = transport.build_kwargs(
|
|
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
|
is_openrouter=True, provider_preferences=prefs,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("openrouter"),
|
|
provider_preferences=prefs,
|
|
)
|
|
assert profile["extra_body"]["provider"] == legacy["extra_body"]["provider"]
|
|
|
|
def test_reasoning_full_config(self, transport):
|
|
rc = {"enabled": True, "effort": "high"}
|
|
legacy = transport.build_kwargs(
|
|
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
|
is_openrouter=True, supports_reasoning=True, reasoning_config=rc,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("openrouter"),
|
|
supports_reasoning=True, reasoning_config=rc,
|
|
)
|
|
assert profile["extra_body"]["reasoning"] == legacy["extra_body"]["reasoning"]
|
|
|
|
def test_default_reasoning(self, transport):
|
|
legacy = transport.build_kwargs(
|
|
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
|
is_openrouter=True, supports_reasoning=True,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="anthropic/claude-sonnet-4.6", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("openrouter"),
|
|
supports_reasoning=True,
|
|
)
|
|
assert profile["extra_body"]["reasoning"] == legacy["extra_body"]["reasoning"]
|
|
|
|
|
|
class TestNousProfileParity:
|
|
def test_tags(self, transport):
|
|
legacy = transport.build_kwargs(
|
|
model="hermes-3", messages=_msgs(), tools=None, is_nous=True,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="hermes-3", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("nous"),
|
|
)
|
|
assert profile["extra_body"]["tags"] == legacy["extra_body"]["tags"]
|
|
|
|
def test_reasoning_omitted_when_disabled(self, transport):
|
|
rc = {"enabled": False}
|
|
legacy = transport.build_kwargs(
|
|
model="hermes-3", messages=_msgs(), tools=None,
|
|
is_nous=True, supports_reasoning=True, reasoning_config=rc,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="hermes-3", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("nous"),
|
|
supports_reasoning=True, reasoning_config=rc,
|
|
)
|
|
assert "reasoning" not in legacy.get("extra_body", {})
|
|
assert "reasoning" not in profile.get("extra_body", {})
|
|
|
|
|
|
class TestQwenProfileParity:
|
|
def test_max_tokens(self, transport):
|
|
legacy = transport.build_kwargs(
|
|
model="qwen3.5", messages=_msgs(), tools=None,
|
|
is_qwen_portal=True, max_tokens_param_fn=_max_tokens_fn,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="qwen3.5", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("qwen"),
|
|
max_tokens_param_fn=_max_tokens_fn,
|
|
)
|
|
assert profile["max_completion_tokens"] == legacy["max_completion_tokens"] == 65536
|
|
|
|
def test_vl_high_resolution(self, transport):
|
|
legacy = transport.build_kwargs(
|
|
model="qwen3.5", messages=_msgs(), tools=None, is_qwen_portal=True,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="qwen3.5", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("qwen"),
|
|
)
|
|
assert profile["extra_body"]["vl_high_resolution_images"] == legacy["extra_body"]["vl_high_resolution_images"]
|
|
|
|
def test_metadata_top_level(self, transport):
|
|
meta = {"sessionId": "s123", "promptId": "p456"}
|
|
legacy = transport.build_kwargs(
|
|
model="qwen3.5", messages=_msgs(), tools=None,
|
|
is_qwen_portal=True, qwen_session_metadata=meta,
|
|
)
|
|
profile = transport.build_kwargs(
|
|
model="qwen3.5", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("qwen"),
|
|
qwen_session_metadata=meta,
|
|
)
|
|
assert profile["metadata"] == legacy["metadata"] == meta
|
|
assert "metadata" not in profile.get("extra_body", {})
|
|
|
|
def test_message_preprocessing(self, transport):
|
|
"""Qwen profile normalizes string content to list-of-parts."""
|
|
msgs = [
|
|
{"role": "system", "content": "You are helpful."},
|
|
{"role": "user", "content": "hello"},
|
|
]
|
|
profile = transport.build_kwargs(
|
|
model="qwen3.5", messages=msgs, tools=None,
|
|
provider_profile=get_provider_profile("qwen"),
|
|
)
|
|
out_msgs = profile["messages"]
|
|
# System message content normalized + cache_control injected
|
|
assert isinstance(out_msgs[0]["content"], list)
|
|
assert out_msgs[0]["content"][0]["type"] == "text"
|
|
assert "cache_control" in out_msgs[0]["content"][-1]
|
|
# User message content normalized
|
|
assert isinstance(out_msgs[1]["content"], list)
|
|
assert out_msgs[1]["content"][0] == {"type": "text", "text": "hello"}
|
|
|
|
|
|
class TestDeveloperRoleParity:
|
|
"""Developer role swap must work on BOTH legacy and profile paths."""
|
|
|
|
def test_legacy_path_swaps_for_gpt5(self, transport):
|
|
msgs = [{"role": "system", "content": "Be helpful"}, {"role": "user", "content": "hi"}]
|
|
kw = transport.build_kwargs(
|
|
model="gpt-5.4", messages=msgs, tools=None,
|
|
)
|
|
assert kw["messages"][0]["role"] == "developer"
|
|
|
|
def test_profile_path_swaps_for_gpt5(self, transport):
|
|
msgs = [{"role": "system", "content": "Be helpful"}, {"role": "user", "content": "hi"}]
|
|
kw = transport.build_kwargs(
|
|
model="gpt-5.4", messages=msgs, tools=None,
|
|
provider_profile=get_provider_profile("openrouter"),
|
|
)
|
|
assert kw["messages"][0]["role"] == "developer"
|
|
|
|
def test_profile_path_no_swap_for_claude(self, transport):
|
|
msgs = [{"role": "system", "content": "Be helpful"}, {"role": "user", "content": "hi"}]
|
|
kw = transport.build_kwargs(
|
|
model="anthropic/claude-sonnet-4.6", messages=msgs, tools=None,
|
|
provider_profile=get_provider_profile("openrouter"),
|
|
)
|
|
assert kw["messages"][0]["role"] == "system"
|
|
|
|
|
|
class TestRequestOverridesParity:
|
|
"""request_overrides with extra_body must merge identically on both paths."""
|
|
|
|
def test_extra_body_override_legacy(self, transport):
|
|
kw = transport.build_kwargs(
|
|
model="gpt-5.4", messages=_msgs(), tools=None,
|
|
is_openrouter=True,
|
|
request_overrides={"extra_body": {"custom_key": "custom_val"}},
|
|
)
|
|
assert kw["extra_body"]["custom_key"] == "custom_val"
|
|
|
|
def test_extra_body_override_profile(self, transport):
|
|
kw = transport.build_kwargs(
|
|
model="gpt-5.4", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("openrouter"),
|
|
request_overrides={"extra_body": {"custom_key": "custom_val"}},
|
|
)
|
|
assert kw["extra_body"]["custom_key"] == "custom_val"
|
|
|
|
def test_extra_body_override_merges_with_provider_body(self, transport):
|
|
"""Override extra_body merges WITH provider extra_body, not replaces."""
|
|
kw = transport.build_kwargs(
|
|
model="hermes-3", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("nous"),
|
|
request_overrides={"extra_body": {"custom": True}},
|
|
)
|
|
assert kw["extra_body"]["tags"] == ["product=hermes-agent"] # from profile
|
|
assert kw["extra_body"]["custom"] is True # from override
|
|
|
|
def test_top_level_override(self, transport):
|
|
kw = transport.build_kwargs(
|
|
model="gpt-5.4", messages=_msgs(), tools=None,
|
|
provider_profile=get_provider_profile("openrouter"),
|
|
request_overrides={"top_p": 0.9},
|
|
)
|
|
assert kw["top_p"] == 0.9
|