This commit is contained in:
Yanbo 2026-04-24 19:24:27 -05:00 committed by GitHub
commit 8f4a6fbef6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 149 additions and 0 deletions

View file

@ -18,6 +18,7 @@ suppress delivery.
"""
import logging
import os
import threading
logger = logging.getLogger("hooks.boot-md")
@ -42,17 +43,74 @@ def _build_boot_prompt(content: str) -> str:
)
def _load_boot_model() -> str:
"""Read the configured default model for boot agent runs."""
try:
from hermes_cli.config import load_config
cfg = load_config() or {}
model_cfg = cfg.get("model", {})
if isinstance(model_cfg, str):
return model_cfg
if isinstance(model_cfg, dict):
return model_cfg.get("default") or model_cfg.get("model") or ""
except Exception as exc:
logger.debug("boot-md could not load configured model: %s", exc)
return ""
def _resolve_boot_agent_kwargs() -> dict:
"""Resolve model/provider credentials for the boot agent.
Gateway-created agents resolve runtime provider settings before constructing
AIAgent. The boot hook runs outside normal message handling, so it must do
the same explicitly instead of relying on AIAgent's legacy constructor
defaults.
"""
model = _load_boot_model()
runtime = {}
try:
from hermes_cli.runtime_provider import resolve_runtime_provider
runtime = resolve_runtime_provider(
requested=os.getenv("HERMES_INFERENCE_PROVIDER"),
) or {}
except Exception as exc:
logger.warning("boot-md could not resolve runtime provider: %s", exc)
if not model and runtime.get("provider"):
try:
from hermes_cli.models import get_default_model_for_provider
model = get_default_model_for_provider(runtime["provider"]) or ""
except Exception:
pass
return {
"model": model,
"api_key": runtime.get("api_key"),
"base_url": runtime.get("base_url"),
"provider": runtime.get("provider"),
"api_mode": runtime.get("api_mode"),
"command": runtime.get("command"),
"args": list(runtime.get("args") or []),
"credential_pool": runtime.get("credential_pool"),
}
def _run_boot_agent(content: str) -> None:
"""Spawn a one-shot agent session to execute the boot instructions."""
try:
from run_agent import AIAgent
prompt = _build_boot_prompt(content)
runtime_kwargs = _resolve_boot_agent_kwargs()
agent = AIAgent(
quiet_mode=True,
skip_context_files=True,
skip_memory=True,
max_iterations=20,
**runtime_kwargs,
)
result = agent.run_conversation(prompt)
response = result.get("final_response", "")

View file

@ -0,0 +1,91 @@
"""Tests for the built-in BOOT.md gateway hook."""
from unittest.mock import patch
from gateway.builtin_hooks import boot_md
def test_resolve_boot_agent_kwargs_uses_configured_runtime_provider(monkeypatch):
"""BOOT.md agents should use the same configured model/provider as gateway chat.
The hook runs outside normal message handling, so it must explicitly resolve
runtime provider settings before constructing AIAgent.
"""
from hermes_cli import config as hermes_config
from hermes_cli import runtime_provider
monkeypatch.setenv("HERMES_INFERENCE_PROVIDER", "openai-codex")
monkeypatch.setattr(
hermes_config,
"load_config",
lambda: {"model": {"default": "gpt-5.5"}},
)
def fake_resolve_runtime_provider(requested=None):
assert requested == "openai-codex"
return {
"provider": "openai-codex",
"api_key": "test-key",
"base_url": "https://chatgpt.com/backend-api/codex",
"api_mode": "codex_responses",
"command": None,
"args": (),
"credential_pool": object(),
}
monkeypatch.setattr(
runtime_provider,
"resolve_runtime_provider",
fake_resolve_runtime_provider,
)
kwargs = boot_md._resolve_boot_agent_kwargs()
assert kwargs["model"] == "gpt-5.5"
assert kwargs["provider"] == "openai-codex"
assert kwargs["api_key"] == "test-key"
assert kwargs["base_url"] == "https://chatgpt.com/backend-api/codex"
assert kwargs["api_mode"] == "codex_responses"
assert kwargs["args"] == []
assert kwargs["credential_pool"] is not None
def test_run_boot_agent_passes_runtime_kwargs_to_ai_agent(monkeypatch):
"""The boot hook should not rely on AIAgent constructor defaults."""
captured = {}
class FakeAgent:
def __init__(self, **kwargs):
captured.update(kwargs)
def run_conversation(self, prompt):
captured["prompt"] = prompt
return {"final_response": "[SILENT]"}
monkeypatch.setattr(
boot_md,
"_resolve_boot_agent_kwargs",
lambda: {
"model": "gpt-5.5",
"provider": "openai-codex",
"api_key": "test-key",
"base_url": "https://chatgpt.com/backend-api/codex",
"api_mode": "codex_responses",
"command": None,
"args": [],
"credential_pool": "pool",
},
)
with patch("run_agent.AIAgent", FakeAgent):
boot_md._run_boot_agent("Send a startup report.")
assert captured["quiet_mode"] is True
assert captured["skip_context_files"] is True
assert captured["skip_memory"] is True
assert captured["max_iterations"] == 20
assert captured["model"] == "gpt-5.5"
assert captured["provider"] == "openai-codex"
assert captured["api_mode"] == "codex_responses"
assert captured["credential_pool"] == "pool"
assert "Send a startup report." in captured["prompt"]