mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-30 01:41:43 +00:00
feat(agent): add lmstudio integration
This commit is contained in:
parent
7d4648461a
commit
214ca943ac
26 changed files with 1137 additions and 40 deletions
|
|
@ -110,6 +110,12 @@ SERVICE_PROVIDER_NAMES: Dict[str, str] = {
|
|||
DEFAULT_GEMINI_CLOUDCODE_BASE_URL = "cloudcode-pa://google"
|
||||
GEMINI_OAUTH_ACCESS_TOKEN_REFRESH_SKEW_SECONDS = 60 # refresh 60s before expiry
|
||||
|
||||
# LM Studio's default no-auth mode still requires *some* non-empty bearer for
|
||||
# the API-key code paths (auxiliary_client, runtime resolver) to treat the
|
||||
# provider as configured. This sentinel is sent only to LM Studio, never to
|
||||
# any remote service.
|
||||
LMSTUDIO_NOAUTH_PLACEHOLDER = "dummy-lm-api-key"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Provider Registry
|
||||
|
|
@ -160,6 +166,14 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
|
|||
auth_type="oauth_external",
|
||||
inference_base_url=DEFAULT_GEMINI_CLOUDCODE_BASE_URL,
|
||||
),
|
||||
"lmstudio": ProviderConfig(
|
||||
id="lmstudio",
|
||||
name="LM Studio",
|
||||
auth_type="api_key",
|
||||
inference_base_url="http://127.0.0.1:1234/v1",
|
||||
api_key_env_vars=("LM_API_KEY",),
|
||||
base_url_env_var="LM_BASE_URL",
|
||||
),
|
||||
"copilot": ProviderConfig(
|
||||
id="copilot",
|
||||
name="GitHub Copilot",
|
||||
|
|
@ -1155,8 +1169,8 @@ def resolve_provider(
|
|||
"aws": "bedrock", "aws-bedrock": "bedrock", "amazon-bedrock": "bedrock", "amazon": "bedrock",
|
||||
"go": "opencode-go", "opencode-go-sub": "opencode-go",
|
||||
"kilo": "kilocode", "kilo-code": "kilocode", "kilo-gateway": "kilocode",
|
||||
"lmstudio": "lmstudio", "lm-studio": "lmstudio", "lm_studio": "lmstudio",
|
||||
# Local server aliases — route through the generic custom provider
|
||||
"lmstudio": "custom", "lm-studio": "custom", "lm_studio": "custom",
|
||||
"ollama": "custom", "ollama_cloud": "ollama-cloud",
|
||||
"vllm": "custom", "llamacpp": "custom",
|
||||
"llama.cpp": "custom", "llama-cpp": "custom",
|
||||
|
|
@ -1203,8 +1217,11 @@ def resolve_provider(
|
|||
continue
|
||||
# GitHub tokens are commonly present for repo/tool access but should not
|
||||
# hijack inference auto-selection unless the user explicitly chooses
|
||||
# Copilot/GitHub Models as the provider.
|
||||
if pid == "copilot":
|
||||
# Copilot/GitHub Models as the provider. LM Studio is a local server
|
||||
# whose availability isn't implied by LM_API_KEY presence (it may be
|
||||
# offline, and the no-auth setup uses a placeholder value), so it
|
||||
# also requires explicit selection.
|
||||
if pid in ("copilot", "lmstudio"):
|
||||
continue
|
||||
for env_var in pconfig.api_key_env_vars:
|
||||
if has_usable_secret(os.getenv(env_var, "")):
|
||||
|
|
@ -3482,6 +3499,13 @@ def resolve_api_key_provider_credentials(provider_id: str) -> Dict[str, Any]:
|
|||
key_source = ""
|
||||
api_key, key_source = _resolve_api_key_provider_secret(provider_id, pconfig)
|
||||
|
||||
# No-auth LM Studio: substitute a placeholder so runtime / auxiliary_client
|
||||
# see the local server as configured. doctor still reports unconfigured
|
||||
# because get_api_key_provider_status uses the raw secret resolver.
|
||||
if not api_key and provider_id == "lmstudio":
|
||||
api_key = LMSTUDIO_NOAUTH_PLACEHOLDER
|
||||
key_source = key_source or "default"
|
||||
|
||||
env_url = ""
|
||||
if pconfig.base_url_env_var:
|
||||
env_url = os.getenv(pconfig.base_url_env_var, "").strip()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue