mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-28 01:21:43 +00:00
feat: add managed tool gateway and Nous subscription support
- add managed modal and gateway-backed tool integrations\n- improve CLI setup, auth, and configuration for subscriber flows\n- expand tests and docs for managed tool support
This commit is contained in:
parent
cbf195e806
commit
95dc9aaa75
44 changed files with 4567 additions and 423 deletions
|
|
@ -1,4 +1,6 @@
|
|||
import json
|
||||
import sys
|
||||
import types
|
||||
|
||||
from hermes_cli.auth import _update_config_for_provider, get_active_provider
|
||||
from hermes_cli.config import load_config, save_config
|
||||
|
|
@ -136,6 +138,8 @@ def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, mon
|
|||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 2 # OpenAI Codex
|
||||
if question == "Configure vision:":
|
||||
return len(choices) - 1
|
||||
if question == "Select default model:":
|
||||
return 0
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
|
|
@ -176,3 +180,171 @@ def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, mon
|
|||
assert reloaded["model"]["provider"] == "openai-codex"
|
||||
assert reloaded["model"]["default"] == "gpt-5.2-codex"
|
||||
assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex"
|
||||
|
||||
|
||||
def test_nous_setup_sets_managed_openai_tts_when_unconfigured(tmp_path, monkeypatch, capsys):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 1
|
||||
if question == "Configure vision:":
|
||||
return len(choices) - 1
|
||||
if question == "Select default model:":
|
||||
return len(choices) - 1
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
|
||||
def _fake_login_nous(*args, **kwargs):
|
||||
auth_path = tmp_path / "auth.json"
|
||||
auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {"nous": {"access_token": "nous-token"}}}))
|
||||
_update_config_for_provider("nous", "https://inference.example.com/v1")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.auth._login_nous", _fake_login_nous)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.resolve_nous_runtime_credentials",
|
||||
lambda *args, **kwargs: {
|
||||
"base_url": "https://inference.example.com/v1",
|
||||
"api_key": "nous-key",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.fetch_nous_models",
|
||||
lambda *args, **kwargs: ["gemini-3-flash"],
|
||||
)
|
||||
|
||||
setup_model_provider(config)
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert config["tts"]["provider"] == "openai"
|
||||
assert "Nous subscription enables managed web tools" in out
|
||||
assert "OpenAI TTS via your Nous subscription" in out
|
||||
|
||||
|
||||
def test_nous_setup_preserves_existing_tts_provider(tmp_path, monkeypatch):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
config["tts"] = {"provider": "elevenlabs"}
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 1
|
||||
if question == "Configure vision:":
|
||||
return len(choices) - 1
|
||||
if question == "Select default model:":
|
||||
return len(choices) - 1
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth._login_nous",
|
||||
lambda *args, **kwargs: (tmp_path / "auth.json").write_text(
|
||||
json.dumps({"active_provider": "nous", "providers": {"nous": {"access_token": "nous-token"}}})
|
||||
),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.resolve_nous_runtime_credentials",
|
||||
lambda *args, **kwargs: {
|
||||
"base_url": "https://inference.example.com/v1",
|
||||
"api_key": "nous-key",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.fetch_nous_models",
|
||||
lambda *args, **kwargs: ["gemini-3-flash"],
|
||||
)
|
||||
|
||||
setup_model_provider(config)
|
||||
|
||||
assert config["tts"]["provider"] == "elevenlabs"
|
||||
|
||||
|
||||
def test_modal_setup_can_use_nous_subscription_without_modal_creds(tmp_path, monkeypatch, capsys):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select terminal backend:":
|
||||
return 2
|
||||
if question == "Select how Modal execution should be billed:":
|
||||
return 0
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
|
||||
def fake_prompt(message, *args, **kwargs):
|
||||
assert "Modal Token" not in message
|
||||
raise AssertionError(f"Unexpected prompt call: {message}")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", fake_prompt)
|
||||
monkeypatch.setattr("hermes_cli.setup._prompt_container_resources", lambda config: None)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.setup.get_nous_subscription_features",
|
||||
lambda config: type("Features", (), {"nous_auth_present": True})(),
|
||||
)
|
||||
monkeypatch.setitem(
|
||||
sys.modules,
|
||||
"tools.managed_tool_gateway",
|
||||
types.SimpleNamespace(
|
||||
is_managed_tool_gateway_ready=lambda vendor: vendor == "modal",
|
||||
resolve_managed_tool_gateway=lambda vendor: None,
|
||||
),
|
||||
)
|
||||
|
||||
from hermes_cli.setup import setup_terminal_backend
|
||||
|
||||
setup_terminal_backend(config)
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert config["terminal"]["backend"] == "modal"
|
||||
assert config["terminal"]["modal_mode"] == "managed"
|
||||
assert "bill to your subscription" in out
|
||||
|
||||
|
||||
def test_modal_setup_persists_direct_mode_when_user_chooses_their_own_account(tmp_path, monkeypatch):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
monkeypatch.delenv("MODAL_TOKEN_ID", raising=False)
|
||||
monkeypatch.delenv("MODAL_TOKEN_SECRET", raising=False)
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select terminal backend:":
|
||||
return 2
|
||||
if question == "Select how Modal execution should be billed:":
|
||||
return 1
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
|
||||
prompt_values = iter(["token-id", "token-secret", ""])
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: next(prompt_values))
|
||||
monkeypatch.setattr("hermes_cli.setup._prompt_container_resources", lambda config: None)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.setup.get_nous_subscription_features",
|
||||
lambda config: type("Features", (), {"nous_auth_present": True})(),
|
||||
)
|
||||
monkeypatch.setitem(
|
||||
sys.modules,
|
||||
"tools.managed_tool_gateway",
|
||||
types.SimpleNamespace(
|
||||
is_managed_tool_gateway_ready=lambda vendor: vendor == "modal",
|
||||
resolve_managed_tool_gateway=lambda vendor: None,
|
||||
),
|
||||
)
|
||||
monkeypatch.setitem(sys.modules, "swe_rex", object())
|
||||
|
||||
from hermes_cli.setup import setup_terminal_backend
|
||||
|
||||
setup_terminal_backend(config)
|
||||
|
||||
assert config["terminal"]["backend"] == "modal"
|
||||
assert config["terminal"]["modal_mode"] == "direct"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue