mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-02 02:01:47 +00:00
feat: provider modules — ProviderProfile ABC, 29 providers, fetch_models, transport single-path Introduces providers/ as the single source of truth for every inference provider. All 29 providers declared with correct data cross-checked against auth.py, runtime_provider.py and auxiliary_client.py. Rebased onto main (30307a980). Incorporates post-salvage fixes from56724147e(gmi aux model google/gemini-3.1-flash-lite-preview, already set in providers/gmi.py).
71 lines
2 KiB
Python
71 lines
2 KiB
Python
"""Custom / Ollama (local) provider profile.
|
|
|
|
Covers any endpoint registered as provider="custom", including local
|
|
Ollama instances. Key quirks:
|
|
- ollama_num_ctx → extra_body.options.num_ctx (local context window)
|
|
- reasoning_config disabled → extra_body.think = False
|
|
"""
|
|
|
|
from typing import Any
|
|
|
|
from providers import register_provider
|
|
from providers.base import ProviderProfile
|
|
|
|
|
|
class CustomProfile(ProviderProfile):
|
|
"""Custom/Ollama local provider — think=false and num_ctx support."""
|
|
|
|
def build_api_kwargs_extras(
|
|
self,
|
|
*,
|
|
reasoning_config: dict | None = None,
|
|
ollama_num_ctx: int | None = None,
|
|
**ctx: Any,
|
|
) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
extra_body: dict[str, Any] = {}
|
|
|
|
# Ollama context window
|
|
if ollama_num_ctx:
|
|
options = extra_body.get("options", {})
|
|
options["num_ctx"] = ollama_num_ctx
|
|
extra_body["options"] = options
|
|
|
|
# Disable thinking when reasoning is turned off
|
|
if reasoning_config and isinstance(reasoning_config, dict):
|
|
_effort = (reasoning_config.get("effort") or "").strip().lower()
|
|
_enabled = reasoning_config.get("enabled", True)
|
|
if _effort == "none" or _enabled is False:
|
|
extra_body["think"] = False
|
|
|
|
return extra_body, {}
|
|
|
|
def fetch_models(
|
|
self,
|
|
*,
|
|
api_key: str | None = None,
|
|
timeout: float = 8.0,
|
|
) -> list[str] | None:
|
|
"""Custom/Ollama: base_url is user-configured; fetch if set."""
|
|
if not self.base_url:
|
|
return None
|
|
return super().fetch_models(api_key=api_key, timeout=timeout)
|
|
|
|
|
|
custom = CustomProfile(
|
|
name="custom",
|
|
aliases=(
|
|
"ollama",
|
|
"local",
|
|
"lmstudio",
|
|
"lm-studio",
|
|
"lm_studio",
|
|
"vllm",
|
|
"llamacpp",
|
|
"llama.cpp",
|
|
"llama-cpp",
|
|
),
|
|
env_vars=(), # No fixed key — custom endpoint
|
|
base_url="", # User-configured
|
|
)
|
|
|
|
register_provider(custom)
|