mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-14 04:02:26 +00:00
feat: provider modules — ProviderProfile ABC, 33 providers, fetch_models, transport single-path
Introduces providers/ package — single source of truth for every inference provider. Adding a simple api-key provider now requires one providers/<name>.py file with zero edits anywhere else. What this PR ships: - providers/ package (ProviderProfile ABC + 33 profiles across 4 api_modes) - ProviderProfile declarative fields: name, api_mode, aliases, display_name, env_vars, base_url, models_url, auth_type, fallback_models, hostname, default_headers, fixed_temperature, default_max_tokens, default_aux_model - 4 overridable hooks: prepare_messages, build_extra_body, build_api_kwargs_extras, fetch_models - chat_completions.build_kwargs: profile path via _build_kwargs_from_profile, legacy flag path retained for lmstudio/tencent-tokenhub (which have session-aware reasoning probing that doesn't map cleanly to hooks yet) - run_agent.py: profile path for all registered providers; legacy path variable scoping fixed (all flags defined before branching) - Auto-wires: auth.PROVIDER_REGISTRY, models.CANONICAL_PROVIDERS, doctor health checks, config.OPTIONAL_ENV_VARS, model_metadata._URL_TO_PROVIDER - GeminiProfile: thinking_config translation (native + openai-compat nested) - New tests/providers/ (79 tests covering profile declarations, transport parity, hook overrides, e2e kwargs assembly) Deltas vs original PR (salvaged onto current main): - Added profiles: alibaba-coding-plan, azure-foundry, minimax-oauth (were added to main since original PR) - Skipped profiles: lmstudio, tencent-tokenhub stay on legacy path (their reasoning_effort probing has no clean hook equivalent yet) - Removed lmstudio alias from custom profile (it's a separate provider now) - Skipped openrouter/custom from PROVIDER_REGISTRY auto-extension (resolve_provider special-cases them; adding breaks runtime resolution) - runtime_provider: profile.api_mode only as fallback when URL detection finds nothing (was breaking minimax /v1 override) - Preserved main's legacy-path improvements: deepseek reasoning_content preserve, gemini Gemma skip, OpenRouter response caching, Anthropic 1M beta recovery, etc. - Kept agent/copilot_acp_client.py in place (rejected PR's relocation — main has 7 fixes landed since; relocation would revert them) - _API_KEY_PROVIDER_AUX_MODELS alias kept for backward compat with existing test imports Co-authored-by: kshitijk4poor <82637225+kshitijk4poor@users.noreply.github.com> Closes #14418
This commit is contained in:
parent
2b500ed68a
commit
20a4f79ed1
57 changed files with 3149 additions and 177 deletions
86
providers/openrouter.py
Normal file
86
providers/openrouter.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
"""OpenRouter provider profile."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from providers import register_provider
|
||||
from providers.base import ProviderProfile
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_CACHE: list[str] | None = None
|
||||
|
||||
|
||||
class OpenRouterProfile(ProviderProfile):
|
||||
"""OpenRouter aggregator — provider preferences, reasoning config passthrough."""
|
||||
|
||||
def fetch_models(
|
||||
self,
|
||||
*,
|
||||
api_key: str | None = None,
|
||||
timeout: float = 8.0,
|
||||
) -> list[str] | None:
|
||||
"""Fetch from public OpenRouter catalog — no auth required.
|
||||
|
||||
Note: Tool-call capability filtering is applied by hermes_cli/models.py
|
||||
via fetch_openrouter_models() → _openrouter_model_supports_tools(), not
|
||||
here. The picker early-returns via the dedicated openrouter path before
|
||||
reaching this method, so filtering here would be unreachable.
|
||||
"""
|
||||
global _CACHE # noqa: PLW0603
|
||||
if _CACHE is not None:
|
||||
return _CACHE
|
||||
try:
|
||||
result = super().fetch_models(api_key=None, timeout=timeout)
|
||||
if result is not None:
|
||||
_CACHE = result
|
||||
return result
|
||||
except Exception as exc:
|
||||
logger.debug("fetch_models(openrouter): %s", exc)
|
||||
return None
|
||||
|
||||
def build_extra_body(
|
||||
self, *, session_id: str | None = None, **context: Any
|
||||
) -> dict[str, Any]:
|
||||
body: dict[str, Any] = {}
|
||||
prefs = context.get("provider_preferences")
|
||||
if prefs:
|
||||
body["provider"] = prefs
|
||||
return body
|
||||
|
||||
def build_api_kwargs_extras(
|
||||
self,
|
||||
*,
|
||||
reasoning_config: dict | None = None,
|
||||
supports_reasoning: bool = False,
|
||||
**context: Any,
|
||||
) -> tuple[dict[str, Any], dict[str, Any]]:
|
||||
"""OpenRouter passes the full reasoning_config dict as extra_body.reasoning."""
|
||||
extra_body: dict[str, Any] = {}
|
||||
if supports_reasoning:
|
||||
if reasoning_config is not None:
|
||||
extra_body["reasoning"] = dict(reasoning_config)
|
||||
else:
|
||||
extra_body["reasoning"] = {"enabled": True, "effort": "medium"}
|
||||
return extra_body, {}
|
||||
|
||||
|
||||
openrouter = OpenRouterProfile(
|
||||
name="openrouter",
|
||||
aliases=("or",),
|
||||
env_vars=("OPENROUTER_API_KEY",),
|
||||
display_name="OpenRouter",
|
||||
description="OpenRouter — unified API for 200+ models",
|
||||
signup_url="https://openrouter.ai/keys",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
models_url="https://openrouter.ai/api/v1/models",
|
||||
fallback_models=(
|
||||
"anthropic/claude-sonnet-4.6",
|
||||
"openai/gpt-5.4",
|
||||
"deepseek/deepseek-chat",
|
||||
"google/gemini-3-flash-preview",
|
||||
"qwen/qwen3-plus",
|
||||
),
|
||||
)
|
||||
|
||||
register_provider(openrouter)
|
||||
Loading…
Add table
Add a link
Reference in a new issue