hermes-agent/hermes_cli/oneshot.py
brooklyn! 5e6e8b6af3
fix(tui): honor launch toolsets (#17623)
* fix(tui): honor launch toolsets

Carry chat --toolsets through the TUI launcher so TUI sessions use the same per-session tool scope as the classic CLI.

* fix(tui): parse top-level toolsets flag

Allow top-level hermes --tui --toolsets to reach the implicit chat session, matching chat subcommand behavior.

* fix(tui): validate launch toolsets

Filter invalid HERMES_TUI_TOOLSETS entries and fall back to configured CLI toolsets when the override contains no valid toolsets.

* fix(tui): avoid config load for builtin toolsets

Honor built-in HERMES_TUI_TOOLSETS values before loading config and treat all/* as the all-toolsets sentinel.

* fix(cli): honor toolsets in oneshot mode

Forward top-level --toolsets into oneshot agent construction so the flag is not silently ignored outside the TUI path.

* fix(cli): validate oneshot toolsets

Reject invalid-only oneshot toolset overrides before output redirection and clarify TUI fallback warnings.

* fix(cli): preserve all-toolsets sentinel

Map explicit all/* oneshot toolset overrides to the all-toolsets sentinel and replace locals() checks in TUI toolset loading.

* fix(cli): warn on extra all-toolset entries

Warn when all/* toolset overrides include additional ignored entries so typos are still visible.

* fix(tui): honor plugin toolset overrides

Discover plugin toolsets before rejecting unresolved explicit toolset overrides and read raw config for MCP name validation.

* fix(tui): reuse toolset argument normalizer

Share top-level TUI toolset argument parsing with the oneshot path to avoid duplicate normalization logic.

* fix(cli): reject disabled mcp toolsets

Validate explicit toolset overrides against enabled MCP servers only and clarify top-level toolset flag help.

* fix(cli): distinguish disabled mcp from unknown toolsets

Report disabled MCP servers separately from unknown toolset entries and stub plugin discovery in invalid-name tests for determinism.
2026-04-29 16:55:27 -07:00

331 lines
13 KiB
Python

"""Oneshot (-z) mode: send a prompt, get the final content block, exit.
Bypasses cli.py entirely. No banner, no spinner, no session_id line,
no stderr chatter. Just the agent's final text to stdout.
Toolsets = explicit --toolsets when provided, otherwise whatever the user has
configured for "cli" in `hermes tools`.
Rules / memory / AGENTS.md / preloaded skills = same as a normal chat turn.
Approvals = auto-bypassed (HERMES_YOLO_MODE=1 is set for the call).
Working directory = the user's CWD (AGENTS.md etc. resolve from there as usual).
Model / provider selection mirrors `hermes chat`:
- Both optional. If omitted, use the user's configured default.
- If both given, pair them exactly as given.
- If only --model given, auto-detect the provider that serves it.
- If only --provider given, error out (ambiguous — caller must pick a model).
Env var fallbacks (used when the corresponding arg is not passed):
- HERMES_INFERENCE_MODEL
- HERMES_INFERENCE_PROVIDER (already read by resolve_runtime_provider)
"""
from __future__ import annotations
import logging
import os
import sys
from contextlib import redirect_stderr, redirect_stdout
from typing import Optional
def _normalize_toolsets(toolsets: object = None) -> list[str] | None:
if not toolsets:
return None
raw_items = [toolsets] if isinstance(toolsets, str) else toolsets
if not isinstance(raw_items, (list, tuple)):
raw_items = [raw_items]
normalized: list[str] = []
for item in raw_items:
if isinstance(item, str):
normalized.extend(part.strip() for part in item.split(","))
else:
normalized.append(str(item).strip())
return [item for item in normalized if item] or None
def _validate_explicit_toolsets(toolsets: object = None) -> tuple[list[str] | None, str | None]:
normalized = _normalize_toolsets(toolsets)
if normalized is None:
return None, None
try:
from toolsets import validate_toolset
except Exception as exc:
return None, f"hermes -z: failed to validate --toolsets: {exc}\n"
built_in = [name for name in normalized if validate_toolset(name)]
unresolved = [name for name in normalized if name not in built_in]
if unresolved:
try:
from hermes_cli.plugins import discover_plugins
discover_plugins()
plugin_valid = [name for name in unresolved if validate_toolset(name)]
except Exception:
plugin_valid = []
if plugin_valid:
built_in.extend(plugin_valid)
unresolved = [name for name in unresolved if name not in plugin_valid]
if any(name in {"all", "*"} for name in built_in):
ignored = [name for name in normalized if name not in {"all", "*"}]
if ignored:
sys.stderr.write(
"hermes -z: --toolsets all enables every toolset; "
f"ignoring additional entries: {', '.join(ignored)}\n"
)
return None, None
mcp_names: set[str] = set()
mcp_disabled: set[str] = set()
if unresolved:
try:
from hermes_cli.config import read_raw_config
from hermes_cli.tools_config import _parse_enabled_flag
cfg = read_raw_config()
mcp_servers = cfg.get("mcp_servers") if isinstance(cfg.get("mcp_servers"), dict) else {}
for name, server_cfg in mcp_servers.items():
if not isinstance(server_cfg, dict):
continue
if _parse_enabled_flag(server_cfg.get("enabled", True), default=True):
mcp_names.add(str(name))
else:
mcp_disabled.add(str(name))
except Exception:
mcp_names = set()
mcp_disabled = set()
mcp_valid = [name for name in unresolved if name in mcp_names]
disabled = [name for name in unresolved if name in mcp_disabled]
unknown = [name for name in unresolved if name not in mcp_names and name not in mcp_disabled]
valid = built_in + mcp_valid
if unknown:
sys.stderr.write(f"hermes -z: ignoring unknown --toolsets entries: {', '.join(unknown)}\n")
if disabled:
sys.stderr.write(
"hermes -z: ignoring disabled MCP servers (set enabled: true in config.yaml to use): "
f"{', '.join(disabled)}\n"
)
if not valid:
return None, "hermes -z: --toolsets did not contain any valid toolsets.\n"
return valid, None
def run_oneshot(
prompt: str,
model: Optional[str] = None,
provider: Optional[str] = None,
toolsets: object = None,
) -> int:
"""Execute a single prompt and print only the final content block.
Args:
prompt: The user message to send.
model: Optional model override. Falls back to HERMES_INFERENCE_MODEL
env var, then config.yaml's model.default / model.model.
provider: Optional provider override. Falls back to
HERMES_INFERENCE_PROVIDER env var, then config.yaml's model.provider,
then "auto".
toolsets: Optional comma-separated string or iterable of toolsets.
Returns the exit code. Caller should sys.exit() with the return.
"""
# Silence every stdlib logger for the duration. AIAgent, tools, and
# provider adapters all log to stderr through the root logger; file
# handlers added by setup_logging() keep working (they're attached to
# the root logger's handler list, not affected by level), but no
# bytes reach the terminal.
logging.disable(logging.CRITICAL)
# --provider without --model is ambiguous: carrying the user's configured
# model across to a different provider is usually wrong (that provider may
# not host it), and silently picking the provider's catalog default hides
# the mismatch. Require the caller to be explicit. Validate BEFORE the
# stderr redirect so the message actually reaches the terminal.
env_model_early = os.getenv("HERMES_INFERENCE_MODEL", "").strip()
if provider and not ((model or "").strip() or env_model_early):
sys.stderr.write(
"hermes -z: --provider requires --model (or HERMES_INFERENCE_MODEL). "
"Pass both explicitly, or neither to use your configured defaults.\n"
)
return 2
explicit_toolsets, toolsets_error = _validate_explicit_toolsets(toolsets)
if toolsets_error:
sys.stderr.write(toolsets_error)
return 2
use_config_toolsets = _normalize_toolsets(toolsets) is None
# Auto-approve any shell / tool approvals. Non-interactive by
# definition — a prompt would hang forever.
os.environ["HERMES_YOLO_MODE"] = "1"
os.environ["HERMES_ACCEPT_HOOKS"] = "1"
# Redirect stderr AND stdout to devnull for the entire call tree.
# We'll print the final response to the real stdout at the end.
real_stdout = sys.stdout
devnull = open(os.devnull, "w")
try:
with redirect_stdout(devnull), redirect_stderr(devnull):
response = _run_agent(
prompt,
model=model,
provider=provider,
toolsets=explicit_toolsets,
use_config_toolsets=use_config_toolsets,
)
finally:
try:
devnull.close()
except Exception:
pass
if response:
real_stdout.write(response)
if not response.endswith("\n"):
real_stdout.write("\n")
real_stdout.flush()
return 0
def _run_agent(
prompt: str,
model: Optional[str] = None,
provider: Optional[str] = None,
toolsets: object = None,
use_config_toolsets: bool = True,
) -> str:
"""Build an AIAgent exactly like a normal CLI chat turn would, then
run a single conversation. Returns the final response string."""
# Imports are local so they don't run when hermes is invoked for
# other commands (keeps top-level CLI startup cheap).
from hermes_cli.config import load_config
from hermes_cli.models import detect_provider_for_model
from hermes_cli.runtime_provider import resolve_runtime_provider
from hermes_cli.tools_config import _get_platform_tools
from run_agent import AIAgent
cfg = load_config()
# Resolve effective model: explicit arg → env var → config.
model_cfg = cfg.get("model") or {}
if isinstance(model_cfg, str):
cfg_model = model_cfg
else:
cfg_model = model_cfg.get("default") or model_cfg.get("model") or ""
env_model = os.getenv("HERMES_INFERENCE_MODEL", "").strip()
effective_model = (model or "").strip() or env_model or cfg_model
# Resolve effective provider: explicit arg → (auto-detect from model if
# model was explicit) → env / config (handled inside resolve_runtime_provider).
#
# When --model is given without --provider, auto-detect the provider that
# serves that model — same semantic as `/model <name>` in an interactive
# session. Without this, resolve_runtime_provider() would fall back to
# the user's configured default provider, which may not host the model
# the caller just asked for.
effective_provider = (provider or "").strip() or None
explicit_base_url_from_alias: Optional[str] = None
if effective_provider is None and (model or env_model):
# Only auto-detect when the model was explicitly requested via arg or
# env var (not when it came from config — that's the "use my defaults"
# path and the configured provider is already correct).
explicit_model = (model or "").strip() or env_model
if explicit_model:
# First check DIRECT_ALIASES populated from config.yaml `model_aliases:`.
# These map a user-defined alias to (model, provider, base_url) for
# endpoints not in any catalog (local servers, custom proxies, etc.).
try:
from hermes_cli import model_switch as _ms
_ms._ensure_direct_aliases()
direct = _ms.DIRECT_ALIASES.get(explicit_model.strip().lower())
except Exception:
direct = None
if direct is not None:
effective_model = direct.model
effective_provider = direct.provider
if direct.base_url:
explicit_base_url_from_alias = direct.base_url.rstrip("/")
else:
cfg_provider = ""
if isinstance(model_cfg, dict):
cfg_provider = str(model_cfg.get("provider") or "").strip().lower()
current_provider = (
cfg_provider
or os.getenv("HERMES_INFERENCE_PROVIDER", "").strip().lower()
or "auto"
)
detected = detect_provider_for_model(explicit_model, current_provider)
if detected:
effective_provider, effective_model = detected
runtime = resolve_runtime_provider(
requested=effective_provider,
target_model=effective_model or None,
explicit_base_url=explicit_base_url_from_alias,
)
# Pull in explicit toolsets when provided; otherwise use whatever the user
# has enabled for "cli". sorted() gives stable ordering for config-derived
# sets; explicit values preserve user order.
toolsets_list = _normalize_toolsets(toolsets)
if toolsets_list is None and use_config_toolsets:
toolsets_list = sorted(_get_platform_tools(cfg, "cli"))
agent = AIAgent(
api_key=runtime.get("api_key"),
base_url=runtime.get("base_url"),
provider=runtime.get("provider"),
api_mode=runtime.get("api_mode"),
model=effective_model,
enabled_toolsets=toolsets_list,
quiet_mode=True,
platform="cli",
credential_pool=runtime.get("credential_pool"),
# Interactive callbacks are intentionally NOT wired beyond this
# one. In oneshot mode there's no user sitting at a terminal:
# - clarify → returns a synthetic "pick a default" instruction
# so the agent continues instead of stalling on
# the tool's built-in "not available" error
# - sudo password prompt → terminal_tool gates on
# HERMES_INTERACTIVE which we never set
# - shell-hook approval → auto-approved via HERMES_ACCEPT_HOOKS=1
# (set above); also falls back to deny on non-tty
# - dangerous-command approval → bypassed via HERMES_YOLO_MODE=1
# - skill secret capture → returns gracefully when no callback set
clarify_callback=_oneshot_clarify_callback,
)
# Belt-and-braces: make sure AIAgent doesn't invoke any streaming
# display callbacks that would bypass our stdout capture.
agent.suppress_status_output = True
agent.stream_delta_callback = None
agent.tool_gen_callback = None
return agent.chat(prompt) or ""
def _oneshot_clarify_callback(question: str, choices=None) -> str:
"""Clarify is disabled in oneshot mode — tell the agent to pick a
default and proceed instead of stalling or erroring."""
if choices:
return (
f"[oneshot mode: no user available. Pick the best option from "
f"{choices} using your own judgment and continue.]"
)
return (
"[oneshot mode: no user available. Make the most reasonable "
"assumption you can and continue.]"
)