mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-13 03:52:00 +00:00
feat(providers): make all 33 providers pluggable under plugins/model-providers/
Every provider profile is now a self-contained plugin under plugins/model-providers/<name>/, mirroring the plugins/platforms/ pattern established for IRC and Teams. The ProviderProfile ABC stays in providers/; the per-provider profile data moves out. - plugins/model-providers/<name>/__init__.py calls register_provider() - plugins/model-providers/<name>/plugin.yaml declares kind: model-provider - providers/__init__.py._discover_providers() lazily scans bundled plugins then $HERMES_HOME/plugins/model-providers/<name>/ (user override path) - User plugins with the same name override bundled ones (last-writer-wins in register_provider) - Legacy providers/<name>.py layout still supported for back-compat with out-of-tree editable installs - Hermes PluginManager: new kind=model-provider; skipped like memory plugins (providers/ discovery owns them); standalone plugins with register_provider+ProviderProfile in their __init__.py auto-coerce to this kind (same heuristic as memory providers) - skip_names extended to include 'model-providers' so the general PluginManager doesn't double-scan the category - 4 new tests in tests/providers/test_plugin_discovery.py covering bundled discovery, user override, and general-loader isolation - Docs updated: website/docs/developer-guide/adding-providers.md, provider-runtime.md, providers/README.md, plugins/model-providers/README.md No API break: auth.py / config.py / doctor.py / models.py / runtime_provider.py / model_metadata.py / auxiliary_client.py / chat_completions.py / run_agent.py all still consume providers via get_provider_profile() / list_providers() — they just now see plugin-discovered entries instead of pkgutil-iterated ones. Third parties can now drop a single directory into ~/.hermes/plugins/model-providers/<name>/ to add or override an inference provider without touching the repo.
This commit is contained in:
parent
20a4f79ed1
commit
9022804d78
63 changed files with 585 additions and 309 deletions
68
plugins/model-providers/custom/__init__.py
Normal file
68
plugins/model-providers/custom/__init__.py
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
"""Custom / Ollama (local) provider profile.
|
||||
|
||||
Covers any endpoint registered as provider="custom", including local
|
||||
Ollama instances. Key quirks:
|
||||
- ollama_num_ctx → extra_body.options.num_ctx (local context window)
|
||||
- reasoning_config disabled → extra_body.think = False
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from providers import register_provider
|
||||
from providers.base import ProviderProfile
|
||||
|
||||
|
||||
class CustomProfile(ProviderProfile):
|
||||
"""Custom/Ollama local provider — think=false and num_ctx support."""
|
||||
|
||||
def build_api_kwargs_extras(
|
||||
self,
|
||||
*,
|
||||
reasoning_config: dict | None = None,
|
||||
ollama_num_ctx: int | None = None,
|
||||
**ctx: Any,
|
||||
) -> tuple[dict[str, Any], dict[str, Any]]:
|
||||
extra_body: dict[str, Any] = {}
|
||||
|
||||
# Ollama context window
|
||||
if ollama_num_ctx:
|
||||
options = extra_body.get("options", {})
|
||||
options["num_ctx"] = ollama_num_ctx
|
||||
extra_body["options"] = options
|
||||
|
||||
# Disable thinking when reasoning is turned off
|
||||
if reasoning_config and isinstance(reasoning_config, dict):
|
||||
_effort = (reasoning_config.get("effort") or "").strip().lower()
|
||||
_enabled = reasoning_config.get("enabled", True)
|
||||
if _effort == "none" or _enabled is False:
|
||||
extra_body["think"] = False
|
||||
|
||||
return extra_body, {}
|
||||
|
||||
def fetch_models(
|
||||
self,
|
||||
*,
|
||||
api_key: str | None = None,
|
||||
timeout: float = 8.0,
|
||||
) -> list[str] | None:
|
||||
"""Custom/Ollama: base_url is user-configured; fetch if set."""
|
||||
if not self.base_url:
|
||||
return None
|
||||
return super().fetch_models(api_key=api_key, timeout=timeout)
|
||||
|
||||
|
||||
custom = CustomProfile(
|
||||
name="custom",
|
||||
aliases=(
|
||||
"ollama",
|
||||
"local",
|
||||
"vllm",
|
||||
"llamacpp",
|
||||
"llama.cpp",
|
||||
"llama-cpp",
|
||||
),
|
||||
env_vars=(), # No fixed key — custom endpoint
|
||||
base_url="", # User-configured
|
||||
)
|
||||
|
||||
register_provider(custom)
|
||||
Loading…
Add table
Add a link
Reference in a new issue