mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
fix: resolve ollama provider alias mismatch
This commit is contained in:
parent
1ccd063786
commit
80c3cffbff
3 changed files with 75 additions and 3 deletions
|
|
@ -291,6 +291,14 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
|
|||
api_key_env_vars=(),
|
||||
base_url_env_var="BEDROCK_BASE_URL",
|
||||
),
|
||||
"ollama": ProviderConfig(
|
||||
id="ollama",
|
||||
name="Ollama",
|
||||
auth_type="api_key",
|
||||
inference_base_url="http://127.0.0.1:11434/v1",
|
||||
api_key_env_vars=("OLLAMA_API_KEY",),
|
||||
base_url_env_var="OLLAMA_BASE_URL",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -945,9 +953,10 @@ def resolve_provider(
|
|||
"aws": "bedrock", "aws-bedrock": "bedrock", "amazon-bedrock": "bedrock", "amazon": "bedrock",
|
||||
"go": "opencode-go", "opencode-go-sub": "opencode-go",
|
||||
"kilo": "kilocode", "kilo-code": "kilocode", "kilo-gateway": "kilocode",
|
||||
# Local server aliases — route through the generic custom provider
|
||||
# Local server aliases — route named built-ins directly and keep the
|
||||
# generic custom provider for endpoints without first-class registry entries.
|
||||
"lmstudio": "custom", "lm-studio": "custom", "lm_studio": "custom",
|
||||
"ollama": "custom", "ollama_cloud": "ollama-cloud",
|
||||
"ollama": "ollama", "ollama_cloud": "ollama-cloud",
|
||||
"vllm": "custom", "llamacpp": "custom",
|
||||
"llama.cpp": "custom", "llama-cpp": "custom",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -145,6 +145,12 @@ HERMES_OVERLAYS: Dict[str, HermesOverlay] = {
|
|||
transport="openai_chat",
|
||||
base_url_env_var="OLLAMA_BASE_URL",
|
||||
),
|
||||
"ollama": HermesOverlay(
|
||||
transport="openai_chat",
|
||||
extra_env_vars=("OLLAMA_API_KEY",),
|
||||
base_url_override="http://127.0.0.1:11434/v1",
|
||||
base_url_env_var="OLLAMA_BASE_URL",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -255,7 +261,7 @@ ALIASES: Dict[str, str] = {
|
|||
"lmstudio": "lmstudio",
|
||||
"lm-studio": "lmstudio",
|
||||
"lm_studio": "lmstudio",
|
||||
"ollama": "custom", # bare "ollama" = local; use "ollama-cloud" for cloud
|
||||
"ollama": "ollama",
|
||||
"vllm": "local",
|
||||
"llamacpp": "local",
|
||||
"llama.cpp": "local",
|
||||
|
|
@ -275,6 +281,7 @@ _LABEL_OVERRIDES: Dict[str, str] = {
|
|||
"local": "Local endpoint",
|
||||
"bedrock": "AWS Bedrock",
|
||||
"ollama-cloud": "Ollama Cloud",
|
||||
"ollama": "Ollama",
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,62 @@ from unittest.mock import patch, MagicMock
|
|||
class TestOllamaCloudCredentials:
|
||||
"""runtime_provider should use OLLAMA_API_KEY for ollama.com endpoints."""
|
||||
|
||||
|
||||
class TestOllamaProviderAliasRegression:
|
||||
"""Regression tests for the built-in ollama provider alias."""
|
||||
|
||||
def test_resolve_provider_full_keeps_ollama_canonical(self):
|
||||
"""`ollama` should resolve to the built-in ollama provider, not ollama-cloud."""
|
||||
from hermes_cli.providers import resolve_provider_full
|
||||
from hermes_cli.auth import resolve_provider
|
||||
|
||||
resolved = resolve_provider_full("ollama", user_providers={}, custom_providers=[])
|
||||
|
||||
assert resolved is not None
|
||||
assert resolved.id == "ollama"
|
||||
assert resolved.name == "Ollama"
|
||||
assert resolve_provider("ollama") == "ollama"
|
||||
|
||||
def test_switch_model_explicit_ollama_provider_does_not_map_to_ollama_cloud(self, monkeypatch):
|
||||
"""`/model ... --provider ollama` should stay on the built-in ollama provider."""
|
||||
from hermes_cli.model_switch import switch_model
|
||||
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.runtime_provider.resolve_runtime_provider",
|
||||
lambda requested: {
|
||||
"provider": requested,
|
||||
"api_key": "ollama-test-key",
|
||||
"base_url": "http://127.0.0.1:11434/v1",
|
||||
"api_mode": "chat_completions",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr("hermes_cli.models.validate_requested_model", lambda *a, **k: {
|
||||
"accepted": True,
|
||||
"persist": True,
|
||||
"recognized": True,
|
||||
"message": None,
|
||||
})
|
||||
monkeypatch.setattr("hermes_cli.model_switch.get_model_info", lambda *a, **k: None)
|
||||
monkeypatch.setattr("hermes_cli.model_switch.get_model_capabilities", lambda *a, **k: None)
|
||||
|
||||
result = switch_model(
|
||||
raw_input="qwen3.5:cloud",
|
||||
current_provider="openai-codex",
|
||||
current_model="gpt-5.4",
|
||||
current_base_url="https://chatgpt.com/backend-api/codex",
|
||||
current_api_key="dummy",
|
||||
explicit_provider="ollama",
|
||||
user_providers={},
|
||||
custom_providers=[],
|
||||
)
|
||||
|
||||
assert result.success is True
|
||||
assert result.target_provider == "ollama"
|
||||
assert result.provider_label == "Ollama"
|
||||
assert result.new_model == "qwen3.5:cloud"
|
||||
assert result.base_url == "http://127.0.0.1:11434/v1"
|
||||
|
||||
|
||||
def test_ollama_api_key_used_for_ollama_endpoint(self, monkeypatch, tmp_path):
|
||||
"""When base_url contains ollama.com, OLLAMA_API_KEY is in the candidate chain."""
|
||||
monkeypatch.setenv("OLLAMA_API_KEY", "test-ollama-key-12345")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue