mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 03:01:47 +00:00
Introduces providers/ package — single source of truth for every inference provider. Adding a simple api-key provider now requires one providers/<name>.py file with zero edits anywhere else. What this PR ships: - providers/ package (ProviderProfile ABC + 33 profiles across 4 api_modes) - ProviderProfile declarative fields: name, api_mode, aliases, display_name, env_vars, base_url, models_url, auth_type, fallback_models, hostname, default_headers, fixed_temperature, default_max_tokens, default_aux_model - 4 overridable hooks: prepare_messages, build_extra_body, build_api_kwargs_extras, fetch_models - chat_completions.build_kwargs: profile path via _build_kwargs_from_profile, legacy flag path retained for lmstudio/tencent-tokenhub (which have session-aware reasoning probing that doesn't map cleanly to hooks yet) - run_agent.py: profile path for all registered providers; legacy path variable scoping fixed (all flags defined before branching) - Auto-wires: auth.PROVIDER_REGISTRY, models.CANONICAL_PROVIDERS, doctor health checks, config.OPTIONAL_ENV_VARS, model_metadata._URL_TO_PROVIDER - GeminiProfile: thinking_config translation (native + openai-compat nested) - New tests/providers/ (79 tests covering profile declarations, transport parity, hook overrides, e2e kwargs assembly) Deltas vs original PR (salvaged onto current main): - Added profiles: alibaba-coding-plan, azure-foundry, minimax-oauth (were added to main since original PR) - Skipped profiles: lmstudio, tencent-tokenhub stay on legacy path (their reasoning_effort probing has no clean hook equivalent yet) - Removed lmstudio alias from custom profile (it's a separate provider now) - Skipped openrouter/custom from PROVIDER_REGISTRY auto-extension (resolve_provider special-cases them; adding breaks runtime resolution) - runtime_provider: profile.api_mode only as fallback when URL detection finds nothing (was breaking minimax /v1 override) - Preserved main's legacy-path improvements: deepseek reasoning_content preserve, gemini Gemma skip, OpenRouter response caching, Anthropic 1M beta recovery, etc. - Kept agent/copilot_acp_client.py in place (rejected PR's relocation — main has 7 fixes landed since; relocation would revert them) - _API_KEY_PROVIDER_AUX_MODELS alias kept for backward compat with existing test imports Co-authored-by: kshitijk4poor <82637225+kshitijk4poor@users.noreply.github.com> Closes #14418
72 lines
2.6 KiB
Python
72 lines
2.6 KiB
Python
"""Google Gemini provider profiles.
|
|
|
|
gemini: Google AI Studio (API key) — uses GeminiNativeClient
|
|
google-gemini-cli: Google Cloud Code Assist (OAuth) — uses GeminiCloudCodeClient
|
|
|
|
Both report api_mode="chat_completions" but use custom native clients
|
|
that bypass the standard OpenAI transport. The profile captures auth
|
|
and endpoint metadata for auth.py / runtime_provider.py migration, and
|
|
carries the thinking_config translation hook so the transport's profile
|
|
path produces the same extra_body shape the legacy flag path did.
|
|
"""
|
|
|
|
from typing import Any
|
|
|
|
from providers import register_provider
|
|
from providers.base import ProviderProfile
|
|
|
|
|
|
class GeminiProfile(ProviderProfile):
|
|
"""Gemini — translate reasoning_config to thinking_config in extra_body."""
|
|
|
|
def build_extra_body(
|
|
self, *, session_id: str | None = None, **context: Any
|
|
) -> dict[str, Any]:
|
|
"""Emit extra_body.thinking_config (native) or extra_body.extra_body.google.thinking_config
|
|
(OpenAI-compat /openai subpath), mirroring the legacy path's behavior.
|
|
"""
|
|
from agent.transports.chat_completions import (
|
|
_build_gemini_thinking_config,
|
|
_is_gemini_openai_compat_base_url,
|
|
_snake_case_gemini_thinking_config,
|
|
)
|
|
|
|
model = context.get("model") or ""
|
|
reasoning_config = context.get("reasoning_config")
|
|
base_url = context.get("base_url") or self.base_url
|
|
|
|
raw_thinking_config = _build_gemini_thinking_config(model, reasoning_config)
|
|
if not raw_thinking_config:
|
|
return {}
|
|
|
|
body: dict[str, Any] = {}
|
|
if self.name == "gemini" and _is_gemini_openai_compat_base_url(base_url):
|
|
thinking_config = _snake_case_gemini_thinking_config(raw_thinking_config)
|
|
if thinking_config:
|
|
body["extra_body"] = {"google": {"thinking_config": thinking_config}}
|
|
else:
|
|
body["thinking_config"] = raw_thinking_config
|
|
return body
|
|
|
|
|
|
gemini = GeminiProfile(
|
|
name="gemini",
|
|
aliases=("google", "google-gemini", "google-ai-studio"),
|
|
api_mode="chat_completions",
|
|
env_vars=("GOOGLE_API_KEY", "GEMINI_API_KEY"),
|
|
base_url="https://generativelanguage.googleapis.com/v1beta",
|
|
auth_type="api_key",
|
|
default_aux_model="gemini-3-flash-preview",
|
|
)
|
|
|
|
google_gemini_cli = GeminiProfile(
|
|
name="google-gemini-cli",
|
|
aliases=("gemini-cli", "gemini-oauth"),
|
|
api_mode="chat_completions",
|
|
env_vars=(), # OAuth — no API key
|
|
base_url="cloudcode-pa://google", # Cloud Code Assist internal scheme
|
|
auth_type="oauth_external",
|
|
)
|
|
|
|
register_provider(gemini)
|
|
register_provider(google_gemini_cli)
|