mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 03:01:47 +00:00
Introduces providers/ package — single source of truth for every inference provider. Adding a simple api-key provider now requires one providers/<name>.py file with zero edits anywhere else. What this PR ships: - providers/ package (ProviderProfile ABC + 33 profiles across 4 api_modes) - ProviderProfile declarative fields: name, api_mode, aliases, display_name, env_vars, base_url, models_url, auth_type, fallback_models, hostname, default_headers, fixed_temperature, default_max_tokens, default_aux_model - 4 overridable hooks: prepare_messages, build_extra_body, build_api_kwargs_extras, fetch_models - chat_completions.build_kwargs: profile path via _build_kwargs_from_profile, legacy flag path retained for lmstudio/tencent-tokenhub (which have session-aware reasoning probing that doesn't map cleanly to hooks yet) - run_agent.py: profile path for all registered providers; legacy path variable scoping fixed (all flags defined before branching) - Auto-wires: auth.PROVIDER_REGISTRY, models.CANONICAL_PROVIDERS, doctor health checks, config.OPTIONAL_ENV_VARS, model_metadata._URL_TO_PROVIDER - GeminiProfile: thinking_config translation (native + openai-compat nested) - New tests/providers/ (79 tests covering profile declarations, transport parity, hook overrides, e2e kwargs assembly) Deltas vs original PR (salvaged onto current main): - Added profiles: alibaba-coding-plan, azure-foundry, minimax-oauth (were added to main since original PR) - Skipped profiles: lmstudio, tencent-tokenhub stay on legacy path (their reasoning_effort probing has no clean hook equivalent yet) - Removed lmstudio alias from custom profile (it's a separate provider now) - Skipped openrouter/custom from PROVIDER_REGISTRY auto-extension (resolve_provider special-cases them; adding breaks runtime resolution) - runtime_provider: profile.api_mode only as fallback when URL detection finds nothing (was breaking minimax /v1 override) - Preserved main's legacy-path improvements: deepseek reasoning_content preserve, gemini Gemma skip, OpenRouter response caching, Anthropic 1M beta recovery, etc. - Kept agent/copilot_acp_client.py in place (rejected PR's relocation — main has 7 fixes landed since; relocation would revert them) - _API_KEY_PROVIDER_AUX_MODELS alias kept for backward compat with existing test imports Co-authored-by: kshitijk4poor <82637225+kshitijk4poor@users.noreply.github.com> Closes #14418
118 lines
3.9 KiB
Python
118 lines
3.9 KiB
Python
"""E2E tests: verify _build_kwargs_from_profile produces correct output.
|
|
|
|
These tests call _build_kwargs_from_profile on the transport directly,
|
|
without importing run_agent (which would cause xdist worker contamination).
|
|
"""
|
|
|
|
import pytest
|
|
from agent.transports.chat_completions import ChatCompletionsTransport
|
|
from providers import get_provider_profile
|
|
|
|
|
|
@pytest.fixture
|
|
def transport():
|
|
return ChatCompletionsTransport()
|
|
|
|
|
|
def _msgs():
|
|
return [{"role": "user", "content": "hi"}]
|
|
|
|
|
|
class TestNvidiaProfileWiring:
|
|
def test_nvidia_gets_default_max_tokens(self, transport):
|
|
profile = get_provider_profile("nvidia")
|
|
kwargs = transport.build_kwargs(
|
|
model="nvidia/llama-3.1-nemotron-70b-instruct",
|
|
messages=_msgs(),
|
|
tools=None,
|
|
provider_profile=profile,
|
|
max_tokens=None,
|
|
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
|
timeout=300,
|
|
reasoning_config=None,
|
|
request_overrides=None,
|
|
session_id="test",
|
|
ollama_num_ctx=None,
|
|
)
|
|
# NVIDIA profile sets default_max_tokens=16384
|
|
assert kwargs.get("max_tokens") == 16384
|
|
|
|
def test_nvidia_nim_alias(self, transport):
|
|
profile = get_provider_profile("nvidia-nim")
|
|
assert profile is not None
|
|
assert profile.name == "nvidia"
|
|
assert profile.default_max_tokens == 16384
|
|
|
|
def test_nvidia_model_passed(self, transport):
|
|
profile = get_provider_profile("nvidia")
|
|
kwargs = transport.build_kwargs(
|
|
model="nvidia/test-model",
|
|
messages=_msgs(),
|
|
tools=None,
|
|
provider_profile=profile,
|
|
max_tokens=None,
|
|
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
|
timeout=300,
|
|
reasoning_config=None,
|
|
request_overrides=None,
|
|
session_id="test",
|
|
ollama_num_ctx=None,
|
|
)
|
|
assert kwargs["model"] == "nvidia/test-model"
|
|
|
|
def test_nvidia_messages_passed(self, transport):
|
|
profile = get_provider_profile("nvidia")
|
|
msgs = _msgs()
|
|
kwargs = transport.build_kwargs(
|
|
model="nvidia/test",
|
|
messages=msgs,
|
|
tools=None,
|
|
provider_profile=profile,
|
|
max_tokens=None,
|
|
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
|
timeout=300,
|
|
reasoning_config=None,
|
|
request_overrides=None,
|
|
session_id="test",
|
|
ollama_num_ctx=None,
|
|
)
|
|
assert kwargs["messages"] == msgs
|
|
|
|
|
|
class TestDeepSeekProfileWiring:
|
|
def test_deepseek_no_forced_max_tokens(self, transport):
|
|
profile = get_provider_profile("deepseek")
|
|
kwargs = transport.build_kwargs(
|
|
model="deepseek-chat",
|
|
messages=_msgs(),
|
|
tools=None,
|
|
provider_profile=profile,
|
|
max_tokens=None,
|
|
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
|
timeout=300,
|
|
reasoning_config=None,
|
|
request_overrides=None,
|
|
session_id="test",
|
|
ollama_num_ctx=None,
|
|
)
|
|
# DeepSeek has no default_max_tokens
|
|
assert kwargs["model"] == "deepseek-chat"
|
|
assert kwargs.get("max_tokens") is None or "max_tokens" not in kwargs
|
|
|
|
def test_deepseek_messages_passed(self, transport):
|
|
profile = get_provider_profile("deepseek")
|
|
msgs = _msgs()
|
|
kwargs = transport.build_kwargs(
|
|
model="deepseek-chat",
|
|
messages=msgs,
|
|
tools=None,
|
|
provider_profile=profile,
|
|
max_tokens=None,
|
|
max_tokens_param_fn=lambda x: {"max_tokens": x} if x else {},
|
|
timeout=300,
|
|
reasoning_config=None,
|
|
request_overrides=None,
|
|
session_id="test",
|
|
ollama_num_ctx=None,
|
|
)
|
|
assert kwargs["messages"] == msgs
|