mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 03:01:47 +00:00
Introduces providers/ package — single source of truth for every inference provider. Adding a simple api-key provider now requires one providers/<name>.py file with zero edits anywhere else. What this PR ships: - providers/ package (ProviderProfile ABC + 33 profiles across 4 api_modes) - ProviderProfile declarative fields: name, api_mode, aliases, display_name, env_vars, base_url, models_url, auth_type, fallback_models, hostname, default_headers, fixed_temperature, default_max_tokens, default_aux_model - 4 overridable hooks: prepare_messages, build_extra_body, build_api_kwargs_extras, fetch_models - chat_completions.build_kwargs: profile path via _build_kwargs_from_profile, legacy flag path retained for lmstudio/tencent-tokenhub (which have session-aware reasoning probing that doesn't map cleanly to hooks yet) - run_agent.py: profile path for all registered providers; legacy path variable scoping fixed (all flags defined before branching) - Auto-wires: auth.PROVIDER_REGISTRY, models.CANONICAL_PROVIDERS, doctor health checks, config.OPTIONAL_ENV_VARS, model_metadata._URL_TO_PROVIDER - GeminiProfile: thinking_config translation (native + openai-compat nested) - New tests/providers/ (79 tests covering profile declarations, transport parity, hook overrides, e2e kwargs assembly) Deltas vs original PR (salvaged onto current main): - Added profiles: alibaba-coding-plan, azure-foundry, minimax-oauth (were added to main since original PR) - Skipped profiles: lmstudio, tencent-tokenhub stay on legacy path (their reasoning_effort probing has no clean hook equivalent yet) - Removed lmstudio alias from custom profile (it's a separate provider now) - Skipped openrouter/custom from PROVIDER_REGISTRY auto-extension (resolve_provider special-cases them; adding breaks runtime resolution) - runtime_provider: profile.api_mode only as fallback when URL detection finds nothing (was breaking minimax /v1 override) - Preserved main's legacy-path improvements: deepseek reasoning_content preserve, gemini Gemma skip, OpenRouter response caching, Anthropic 1M beta recovery, etc. - Kept agent/copilot_acp_client.py in place (rejected PR's relocation — main has 7 fixes landed since; relocation would revert them) - _API_KEY_PROVIDER_AUX_MODELS alias kept for backward compat with existing test imports Co-authored-by: kshitijk4poor <82637225+kshitijk4poor@users.noreply.github.com> Closes #14418
52 lines
1.6 KiB
Python
52 lines
1.6 KiB
Python
"""Native Anthropic provider profile."""
|
|
|
|
import json
|
|
import logging
|
|
import urllib.request
|
|
|
|
from providers import register_provider
|
|
from providers.base import ProviderProfile
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class AnthropicProfile(ProviderProfile):
|
|
"""Native Anthropic — uses x-api-key header, not Bearer."""
|
|
|
|
def fetch_models(
|
|
self,
|
|
*,
|
|
api_key: str | None = None,
|
|
timeout: float = 8.0,
|
|
) -> list[str] | None:
|
|
"""Anthropic uses x-api-key header and anthropic-version."""
|
|
if not api_key:
|
|
return None
|
|
try:
|
|
req = urllib.request.Request("https://api.anthropic.com/v1/models")
|
|
req.add_header("x-api-key", api_key)
|
|
req.add_header("anthropic-version", "2023-06-01")
|
|
req.add_header("Accept", "application/json")
|
|
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
|
data = json.loads(resp.read().decode())
|
|
return [
|
|
m["id"]
|
|
for m in data.get("data", [])
|
|
if isinstance(m, dict) and "id" in m
|
|
]
|
|
except Exception as exc:
|
|
logger.debug("fetch_models(anthropic): %s", exc)
|
|
return None
|
|
|
|
|
|
anthropic = AnthropicProfile(
|
|
name="anthropic",
|
|
aliases=("claude", "claude-oauth", "claude-code"),
|
|
api_mode="anthropic_messages",
|
|
env_vars=("ANTHROPIC_API_KEY", "ANTHROPIC_TOKEN", "CLAUDE_CODE_OAUTH_TOKEN"),
|
|
base_url="https://api.anthropic.com",
|
|
auth_type="api_key",
|
|
default_aux_model="claude-haiku-4-5-20251001",
|
|
)
|
|
|
|
register_provider(anthropic)
|