hermes-agent/tests/providers/test_e2e_wiring.py
kshitijk4poor 84d1673e2f feat: provider modules — ProviderProfile ABC, 30 providers, fetch_models, transport single-path
feat: provider modules — ProviderProfile ABC, 29 providers, fetch_models, transport single-path

Introduces providers/ as the single source of truth for every inference
provider. All 29 providers declared with correct data cross-checked against
auth.py, runtime_provider.py and auxiliary_client.py.

Rebased onto main (30307a980). Incorporates post-salvage fixes from
56724147e (gmi aux model google/gemini-3.1-flash-lite-preview, already set in providers/gmi.py).
2026-04-29 20:10:09 +05:30

118 lines
3.9 KiB
Python

"""E2E tests: verify _build_kwargs_from_profile produces correct output.
These tests call _build_kwargs_from_profile on the transport directly,
without importing run_agent (which would cause xdist worker contamination).
"""
import pytest
from agent.transports.chat_completions import ChatCompletionsTransport
from providers import get_provider_profile
@pytest.fixture
def transport():
return ChatCompletionsTransport()
def _msgs():
return [{"role": "user", "content": "hi"}]
class TestNvidiaProfileWiring:
def test_nvidia_gets_default_max_tokens(self, transport):
profile = get_provider_profile("nvidia")
kwargs = transport.build_kwargs(
model="nvidia/llama-3.1-nemotron-70b-instruct",
messages=_msgs(),
tools=None,
provider_profile=profile,
max_tokens=None,
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
timeout=300,
reasoning_config=None,
request_overrides=None,
session_id="test",
ollama_num_ctx=None,
)
# NVIDIA profile sets default_max_tokens=16384
assert kwargs.get("max_tokens") == 16384
def test_nvidia_nim_alias(self, transport):
profile = get_provider_profile("nvidia-nim")
assert profile is not None
assert profile.name == "nvidia"
assert profile.default_max_tokens == 16384
def test_nvidia_model_passed(self, transport):
profile = get_provider_profile("nvidia")
kwargs = transport.build_kwargs(
model="nvidia/test-model",
messages=_msgs(),
tools=None,
provider_profile=profile,
max_tokens=None,
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
timeout=300,
reasoning_config=None,
request_overrides=None,
session_id="test",
ollama_num_ctx=None,
)
assert kwargs["model"] == "nvidia/test-model"
def test_nvidia_messages_passed(self, transport):
profile = get_provider_profile("nvidia")
msgs = _msgs()
kwargs = transport.build_kwargs(
model="nvidia/test",
messages=msgs,
tools=None,
provider_profile=profile,
max_tokens=None,
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
timeout=300,
reasoning_config=None,
request_overrides=None,
session_id="test",
ollama_num_ctx=None,
)
assert kwargs["messages"] == msgs
class TestDeepSeekProfileWiring:
def test_deepseek_no_forced_max_tokens(self, transport):
profile = get_provider_profile("deepseek")
kwargs = transport.build_kwargs(
model="deepseek-chat",
messages=_msgs(),
tools=None,
provider_profile=profile,
max_tokens=None,
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
timeout=300,
reasoning_config=None,
request_overrides=None,
session_id="test",
ollama_num_ctx=None,
)
# DeepSeek has no default_max_tokens
assert kwargs["model"] == "deepseek-chat"
assert kwargs.get("max_tokens") is None or "max_tokens" not in kwargs
def test_deepseek_messages_passed(self, transport):
profile = get_provider_profile("deepseek")
msgs = _msgs()
kwargs = transport.build_kwargs(
model="deepseek-chat",
messages=msgs,
tools=None,
provider_profile=profile,
max_tokens=None,
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
timeout=300,
reasoning_config=None,
request_overrides=None,
session_id="test",
ollama_num_ctx=None,
)
assert kwargs["messages"] == msgs