feat: add Codex fast mode toggle (/fast command)

Add /fast slash command to toggle OpenAI Codex service_tier between
normal and priority ('fast') inference. Only exposed for models
registered in _FAST_MODE_BACKEND_CONFIG (currently gpt-5.4).

- Registry-based backend config for extensibility
- Dynamic command visibility (hidden from help/autocomplete for
  non-supported models) via command_filter on SlashCommandCompleter
- service_tier flows through request_overrides from route resolution
- Omit max_output_tokens for Codex backend (rejects it)
- Persists to config.yaml under agent.service_tier

Salvage cleanup: removed simple_term_menu/input() menu (banned),
bare /fast now shows status like /reasoning. Removed redundant
override resolution in _build_api_kwargs — single source of truth
via request_overrides from route.

Co-authored-by: Hermes Agent <hermes@nousresearch.com>
This commit is contained in:
g-guthrie 2026-04-09 18:10:57 -07:00 committed by Teknium
parent 4caa635803
commit d416a69288
9 changed files with 473 additions and 5 deletions

View file

@ -500,6 +500,8 @@ class AIAgent:
status_callback: callable = None,
max_tokens: int = None,
reasoning_config: Dict[str, Any] = None,
service_tier: str = None,
request_overrides: Dict[str, Any] = None,
prefill_messages: List[Dict[str, Any]] = None,
platform: str = None,
user_id: str = None,
@ -662,6 +664,8 @@ class AIAgent:
# Model response configuration
self.max_tokens = max_tokens # None = use model default
self.reasoning_config = reasoning_config # None = use default (medium for OpenRouter)
self.service_tier = service_tier
self.request_overrides = dict(request_overrides or {})
self.prefill_messages = prefill_messages or [] # Prefilled conversation turns
# Anthropic prompt caching: auto-enabled for Claude models via OpenRouter.
@ -3343,7 +3347,7 @@ class AIAgent:
allowed_keys = {
"model", "instructions", "input", "tools", "store",
"reasoning", "include", "max_output_tokens", "temperature",
"tool_choice", "parallel_tool_calls", "prompt_cache_key",
"tool_choice", "parallel_tool_calls", "prompt_cache_key", "service_tier",
}
normalized: Dict[str, Any] = {
"model": model,
@ -3361,6 +3365,9 @@ class AIAgent:
include = api_kwargs.get("include")
if isinstance(include, list):
normalized["include"] = include
service_tier = api_kwargs.get("service_tier")
if isinstance(service_tier, str) and service_tier.strip():
normalized["service_tier"] = service_tier.strip()
# Pass through max_output_tokens and temperature
max_output_tokens = api_kwargs.get("max_output_tokens")
@ -5464,6 +5471,10 @@ class AIAgent:
"models.github.ai" in self.base_url.lower()
or "api.githubcopilot.com" in self.base_url.lower()
)
is_codex_backend = (
self.provider == "openai-codex"
or "chatgpt.com/backend-api/codex" in self.base_url.lower()
)
# Resolve reasoning effort: config > default (medium)
reasoning_effort = "medium"
@ -5501,7 +5512,10 @@ class AIAgent:
elif not is_github_responses:
kwargs["include"] = []
if self.max_tokens is not None:
if self.request_overrides:
kwargs.update(self.request_overrides)
if self.max_tokens is not None and not is_codex_backend:
kwargs["max_output_tokens"] = self.max_tokens
return kwargs