mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 03:01:47 +00:00
Every provider profile is now a self-contained plugin under plugins/model-providers/<name>/, mirroring the plugins/platforms/ pattern established for IRC and Teams. The ProviderProfile ABC stays in providers/; the per-provider profile data moves out. - plugins/model-providers/<name>/__init__.py calls register_provider() - plugins/model-providers/<name>/plugin.yaml declares kind: model-provider - providers/__init__.py._discover_providers() lazily scans bundled plugins then $HERMES_HOME/plugins/model-providers/<name>/ (user override path) - User plugins with the same name override bundled ones (last-writer-wins in register_provider) - Legacy providers/<name>.py layout still supported for back-compat with out-of-tree editable installs - Hermes PluginManager: new kind=model-provider; skipped like memory plugins (providers/ discovery owns them); standalone plugins with register_provider+ProviderProfile in their __init__.py auto-coerce to this kind (same heuristic as memory providers) - skip_names extended to include 'model-providers' so the general PluginManager doesn't double-scan the category - 4 new tests in tests/providers/test_plugin_discovery.py covering bundled discovery, user override, and general-loader isolation - Docs updated: website/docs/developer-guide/adding-providers.md, provider-runtime.md, providers/README.md, plugins/model-providers/README.md No API break: auth.py / config.py / doctor.py / models.py / runtime_provider.py / model_metadata.py / auxiliary_client.py / chat_completions.py / run_agent.py all still consume providers via get_provider_profile() / list_providers() — they just now see plugin-discovered entries instead of pkgutil-iterated ones. Third parties can now drop a single directory into ~/.hermes/plugins/model-providers/<name>/ to add or override an inference provider without touching the repo.
43 lines
1.3 KiB
Python
43 lines
1.3 KiB
Python
"""Vercel AI Gateway provider profile.
|
|
|
|
AI Gateway routes to multiple backends. Hermes sends attribution
|
|
headers and full reasoning config passthrough.
|
|
"""
|
|
|
|
from typing import Any
|
|
|
|
from providers import register_provider
|
|
from providers.base import ProviderProfile
|
|
|
|
|
|
class VercelAIGatewayProfile(ProviderProfile):
|
|
"""Vercel AI Gateway — attribution headers + reasoning passthrough."""
|
|
|
|
def build_api_kwargs_extras(
|
|
self,
|
|
*,
|
|
reasoning_config: dict | None = None,
|
|
supports_reasoning: bool = True,
|
|
**ctx: Any,
|
|
) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
extra_body: dict[str, Any] = {}
|
|
if supports_reasoning and reasoning_config is not None:
|
|
extra_body["reasoning"] = dict(reasoning_config)
|
|
elif supports_reasoning:
|
|
extra_body["reasoning"] = {"enabled": True, "effort": "medium"}
|
|
return extra_body, {}
|
|
|
|
|
|
vercel = VercelAIGatewayProfile(
|
|
name="ai-gateway",
|
|
aliases=("vercel", "vercel-ai-gateway", "ai_gateway", "aigateway"),
|
|
env_vars=("AI_GATEWAY_API_KEY",),
|
|
base_url="https://ai-gateway.vercel.sh/v1",
|
|
default_headers={
|
|
"HTTP-Referer": "https://hermes-agent.nousresearch.com",
|
|
"X-Title": "Hermes Agent",
|
|
},
|
|
default_aux_model="google/gemini-3-flash",
|
|
)
|
|
|
|
register_provider(vercel)
|