mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-13 03:52:00 +00:00
feat: provider modules — ProviderProfile ABC, 33 providers, fetch_models, transport single-path
Introduces providers/ package — single source of truth for every inference provider. Adding a simple api-key provider now requires one providers/<name>.py file with zero edits anywhere else. What this PR ships: - providers/ package (ProviderProfile ABC + 33 profiles across 4 api_modes) - ProviderProfile declarative fields: name, api_mode, aliases, display_name, env_vars, base_url, models_url, auth_type, fallback_models, hostname, default_headers, fixed_temperature, default_max_tokens, default_aux_model - 4 overridable hooks: prepare_messages, build_extra_body, build_api_kwargs_extras, fetch_models - chat_completions.build_kwargs: profile path via _build_kwargs_from_profile, legacy flag path retained for lmstudio/tencent-tokenhub (which have session-aware reasoning probing that doesn't map cleanly to hooks yet) - run_agent.py: profile path for all registered providers; legacy path variable scoping fixed (all flags defined before branching) - Auto-wires: auth.PROVIDER_REGISTRY, models.CANONICAL_PROVIDERS, doctor health checks, config.OPTIONAL_ENV_VARS, model_metadata._URL_TO_PROVIDER - GeminiProfile: thinking_config translation (native + openai-compat nested) - New tests/providers/ (79 tests covering profile declarations, transport parity, hook overrides, e2e kwargs assembly) Deltas vs original PR (salvaged onto current main): - Added profiles: alibaba-coding-plan, azure-foundry, minimax-oauth (were added to main since original PR) - Skipped profiles: lmstudio, tencent-tokenhub stay on legacy path (their reasoning_effort probing has no clean hook equivalent yet) - Removed lmstudio alias from custom profile (it's a separate provider now) - Skipped openrouter/custom from PROVIDER_REGISTRY auto-extension (resolve_provider special-cases them; adding breaks runtime resolution) - runtime_provider: profile.api_mode only as fallback when URL detection finds nothing (was breaking minimax /v1 override) - Preserved main's legacy-path improvements: deepseek reasoning_content preserve, gemini Gemma skip, OpenRouter response caching, Anthropic 1M beta recovery, etc. - Kept agent/copilot_acp_client.py in place (rejected PR's relocation — main has 7 fixes landed since; relocation would revert them) - _API_KEY_PROVIDER_AUX_MODELS alias kept for backward compat with existing test imports Co-authored-by: kshitijk4poor <82637225+kshitijk4poor@users.noreply.github.com> Closes #14418
This commit is contained in:
parent
2b500ed68a
commit
20a4f79ed1
57 changed files with 3149 additions and 177 deletions
73
run_agent.py
73
run_agent.py
|
|
@ -1461,6 +1461,17 @@ class AIAgent:
|
|||
elif base_url_host_matches(effective_base, "chatgpt.com"):
|
||||
from agent.auxiliary_client import _codex_cloudflare_headers
|
||||
client_kwargs["default_headers"] = _codex_cloudflare_headers(api_key)
|
||||
elif "default_headers" not in client_kwargs:
|
||||
# Fall back to profile.default_headers for providers that
|
||||
# declare custom headers (e.g. Vercel AI Gateway attribution,
|
||||
# Kimi User-Agent on non-kimi.com endpoints).
|
||||
try:
|
||||
from providers import get_provider_profile as _gpf
|
||||
_ph = _gpf(self.provider)
|
||||
if _ph and _ph.default_headers:
|
||||
client_kwargs["default_headers"] = dict(_ph.default_headers)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
# No explicit creds — use the centralized provider router
|
||||
from agent.auxiliary_client import resolve_provider_client
|
||||
|
|
@ -6261,7 +6272,19 @@ class AIAgent:
|
|||
self._client_kwargs.get("api_key", "")
|
||||
)
|
||||
else:
|
||||
self._client_kwargs.pop("default_headers", None)
|
||||
# No URL-specific headers — check profile.default_headers before clearing.
|
||||
_ph_headers = None
|
||||
try:
|
||||
from providers import get_provider_profile as _gpf2
|
||||
_ph2 = _gpf2(self.provider)
|
||||
if _ph2 and _ph2.default_headers:
|
||||
_ph_headers = dict(_ph2.default_headers)
|
||||
except Exception:
|
||||
pass
|
||||
if _ph_headers:
|
||||
self._client_kwargs["default_headers"] = _ph_headers
|
||||
else:
|
||||
self._client_kwargs.pop("default_headers", None)
|
||||
|
||||
def _swap_credential(self, entry) -> None:
|
||||
runtime_key = getattr(entry, "runtime_api_key", None) or getattr(entry, "access_token", "")
|
||||
|
|
@ -8494,7 +8517,7 @@ class AIAgent:
|
|||
_omit_temp = False
|
||||
_fixed_temp = None
|
||||
|
||||
# Provider preferences (OpenRouter-specific)
|
||||
# Provider preferences (OpenRouter-style)
|
||||
_prefs: Dict[str, Any] = {}
|
||||
if self.providers_allowed:
|
||||
_prefs["only"] = self.providers_allowed
|
||||
|
|
@ -8509,16 +8532,16 @@ class AIAgent:
|
|||
if self.provider_data_collection:
|
||||
_prefs["data_collection"] = self.provider_data_collection
|
||||
|
||||
# Anthropic max output for Claude on OpenRouter/Nous
|
||||
# Claude max-output override on aggregators
|
||||
_ant_max = None
|
||||
if (_is_or or _is_nous) and "claude" in (self.model or "").lower():
|
||||
try:
|
||||
from agent.anthropic_adapter import _get_anthropic_max_output
|
||||
_ant_max = _get_anthropic_max_output(self.model)
|
||||
except Exception:
|
||||
pass # fail open — let the proxy pick its default
|
||||
pass
|
||||
|
||||
# Qwen session metadata precomputed here (promptId is per-call random)
|
||||
# Qwen session metadata
|
||||
_qwen_meta = None
|
||||
if _is_qwen:
|
||||
_qwen_meta = {
|
||||
|
|
@ -8526,8 +8549,44 @@ class AIAgent:
|
|||
"promptId": str(uuid.uuid4()),
|
||||
}
|
||||
|
||||
# Ephemeral max output override — consume immediately so the next
|
||||
# turn doesn't inherit it.
|
||||
# ── Provider profile path (registered providers) ───────────────────
|
||||
# Profiles handle per-provider quirks via hooks. When a profile is
|
||||
# found, delegate fully; otherwise fall through to the legacy flag path.
|
||||
try:
|
||||
from providers import get_provider_profile
|
||||
_profile = get_provider_profile(self.provider)
|
||||
except Exception:
|
||||
_profile = None
|
||||
|
||||
if _profile:
|
||||
_ephemeral_out = getattr(self, "_ephemeral_max_output_tokens", None)
|
||||
if _ephemeral_out is not None:
|
||||
self._ephemeral_max_output_tokens = None
|
||||
|
||||
return _ct.build_kwargs(
|
||||
model=self.model,
|
||||
messages=api_messages,
|
||||
tools=self.tools,
|
||||
base_url=self.base_url,
|
||||
timeout=self._resolved_api_call_timeout(),
|
||||
max_tokens=self.max_tokens,
|
||||
ephemeral_max_output_tokens=_ephemeral_out,
|
||||
max_tokens_param_fn=self._max_tokens_param,
|
||||
reasoning_config=self.reasoning_config,
|
||||
request_overrides=self.request_overrides,
|
||||
session_id=getattr(self, "session_id", None),
|
||||
provider_profile=_profile,
|
||||
ollama_num_ctx=self._ollama_num_ctx,
|
||||
# Context forwarded to profile hooks:
|
||||
provider_preferences=_prefs or None,
|
||||
anthropic_max_output=_ant_max,
|
||||
supports_reasoning=self._supports_reasoning_extra_body(),
|
||||
qwen_session_metadata=_qwen_meta,
|
||||
)
|
||||
|
||||
# ── Legacy flag path ────────────────────────────────────────────
|
||||
# Reached only when get_provider_profile() returns None — i.e. a
|
||||
# completely unknown provider not in providers/ registry.
|
||||
_ephemeral_out = getattr(self, "_ephemeral_max_output_tokens", None)
|
||||
if _ephemeral_out is not None:
|
||||
self._ephemeral_max_output_tokens = None
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue