mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-26 01:01:40 +00:00
fix(config): add stale timeout settings
This commit is contained in:
parent
440764e013
commit
03e3c22e86
6 changed files with 267 additions and 31 deletions
|
|
@ -2,7 +2,10 @@ from __future__ import annotations
|
|||
|
||||
import textwrap
|
||||
|
||||
from hermes_cli.timeouts import get_provider_request_timeout
|
||||
from hermes_cli.timeouts import (
|
||||
get_provider_request_timeout,
|
||||
get_provider_stale_timeout,
|
||||
)
|
||||
|
||||
|
||||
def _write_config(tmp_path, body: str) -> None:
|
||||
|
|
@ -40,6 +43,37 @@ def test_provider_timeout_used_when_no_model_override(monkeypatch, tmp_path):
|
|||
assert get_provider_request_timeout("ollama-local", "qwen3:32b") == 300.0
|
||||
|
||||
|
||||
def test_model_stale_timeout_override_wins(monkeypatch, tmp_path):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_write_config(
|
||||
tmp_path,
|
||||
"""\
|
||||
providers:
|
||||
openai-codex:
|
||||
stale_timeout_seconds: 600
|
||||
models:
|
||||
gpt-5.4:
|
||||
stale_timeout_seconds: 1800
|
||||
""",
|
||||
)
|
||||
|
||||
assert get_provider_stale_timeout("openai-codex", "gpt-5.4") == 1800.0
|
||||
|
||||
|
||||
def test_provider_stale_timeout_used_when_no_model_override(monkeypatch, tmp_path):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_write_config(
|
||||
tmp_path,
|
||||
"""\
|
||||
providers:
|
||||
openai-codex:
|
||||
stale_timeout_seconds: 900
|
||||
""",
|
||||
)
|
||||
|
||||
assert get_provider_stale_timeout("openai-codex", "gpt-5.4") == 900.0
|
||||
|
||||
|
||||
def test_missing_timeout_returns_none(monkeypatch, tmp_path):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_write_config(
|
||||
|
|
@ -78,6 +112,24 @@ def test_invalid_timeout_values_return_none(monkeypatch, tmp_path):
|
|||
assert get_provider_request_timeout("ollama-local") is None
|
||||
|
||||
|
||||
def test_invalid_stale_timeout_values_return_none(monkeypatch, tmp_path):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_write_config(
|
||||
tmp_path,
|
||||
"""\
|
||||
providers:
|
||||
openai-codex:
|
||||
stale_timeout_seconds: "slow"
|
||||
models:
|
||||
gpt-5.4:
|
||||
stale_timeout_seconds: -1
|
||||
""",
|
||||
)
|
||||
|
||||
assert get_provider_stale_timeout("openai-codex", "gpt-5.4") is None
|
||||
assert get_provider_stale_timeout("openai-codex", "gpt-5.5") is None
|
||||
|
||||
|
||||
def test_anthropic_adapter_honors_timeout_kwarg():
|
||||
"""build_anthropic_client(timeout=X) overrides the 900s default read timeout."""
|
||||
pytest = __import__("pytest")
|
||||
|
|
@ -158,3 +210,99 @@ def test_resolved_api_call_timeout_priority(monkeypatch, tmp_path):
|
|||
# Case C: no config, no env → 1800.0 default
|
||||
monkeypatch.delenv("HERMES_API_TIMEOUT", raising=False)
|
||||
assert agent2._resolved_api_call_timeout() == 1800.0
|
||||
|
||||
|
||||
def test_resolved_api_call_stale_timeout_priority(monkeypatch, tmp_path):
|
||||
"""AIAgent stale timeout honors config > env > default priority."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
(tmp_path / ".env").write_text("", encoding="utf-8")
|
||||
|
||||
_write_config(tmp_path, """\
|
||||
providers:
|
||||
openai-codex:
|
||||
stale_timeout_seconds: 600
|
||||
models:
|
||||
gpt-5.4:
|
||||
stale_timeout_seconds: 1800
|
||||
""")
|
||||
monkeypatch.setenv("HERMES_API_CALL_STALE_TIMEOUT", "999")
|
||||
|
||||
from run_agent import AIAgent
|
||||
agent = AIAgent(
|
||||
model="gpt-5.4",
|
||||
provider="openai-codex",
|
||||
api_key="sk-dummy",
|
||||
base_url="https://chatgpt.com/backend-api/codex",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
platform="cli",
|
||||
)
|
||||
assert agent._resolved_api_call_stale_timeout_base() == (1800.0, False)
|
||||
|
||||
agent.model = "gpt-5.5"
|
||||
assert agent._resolved_api_call_stale_timeout_base() == (600.0, False)
|
||||
|
||||
_write_config(tmp_path, "")
|
||||
import importlib
|
||||
from hermes_cli import config as cfg_mod
|
||||
importlib.reload(cfg_mod)
|
||||
from hermes_cli import timeouts as to_mod
|
||||
importlib.reload(to_mod)
|
||||
import run_agent as ra_mod
|
||||
importlib.reload(ra_mod)
|
||||
|
||||
agent2 = ra_mod.AIAgent(
|
||||
model="gpt-5.4",
|
||||
provider="openai-codex",
|
||||
api_key="sk-dummy",
|
||||
base_url="https://chatgpt.com/backend-api/codex",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
platform="cli",
|
||||
)
|
||||
assert agent2._resolved_api_call_stale_timeout_base() == (999.0, False)
|
||||
|
||||
monkeypatch.delenv("HERMES_API_CALL_STALE_TIMEOUT", raising=False)
|
||||
assert agent2._resolved_api_call_stale_timeout_base() == (300.0, True)
|
||||
|
||||
|
||||
def test_default_non_stream_stale_timeout_auto_disables_for_local_endpoints(monkeypatch, tmp_path):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
(tmp_path / ".env").write_text("", encoding="utf-8")
|
||||
monkeypatch.delenv("HERMES_API_CALL_STALE_TIMEOUT", raising=False)
|
||||
|
||||
from run_agent import AIAgent
|
||||
agent = AIAgent(
|
||||
model="qwen3:32b",
|
||||
provider="ollama-local",
|
||||
api_key="sk-dummy",
|
||||
base_url="http://127.0.0.1:11434/v1",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
platform="cli",
|
||||
)
|
||||
|
||||
assert agent._compute_non_stream_stale_timeout([]) == float("inf")
|
||||
|
||||
|
||||
def test_explicit_non_stream_stale_timeout_is_honored_for_local_endpoints(monkeypatch, tmp_path):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
(tmp_path / ".env").write_text("", encoding="utf-8")
|
||||
monkeypatch.setenv("HERMES_API_CALL_STALE_TIMEOUT", "300")
|
||||
|
||||
from run_agent import AIAgent
|
||||
agent = AIAgent(
|
||||
model="qwen3:32b",
|
||||
provider="ollama-local",
|
||||
api_key="sk-dummy",
|
||||
base_url="http://127.0.0.1:11434/v1",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
platform="cli",
|
||||
)
|
||||
|
||||
assert agent._compute_non_stream_stale_timeout([]) == 300.0
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue