feat: ensure feature parity once again

This commit is contained in:
Brooklyn Nicholson 2026-04-11 14:02:36 -05:00
parent bf6af95ff5
commit e2ea8934d4
6 changed files with 922 additions and 112 deletions

View file

@ -1,6 +1,9 @@
import json
import sys
import threading
import time
import types
from pathlib import Path
from unittest.mock import patch
from tui_gateway import server
@ -30,7 +33,7 @@ class _BrokenStdout:
def test_write_json_serializes_concurrent_writes(monkeypatch):
out = _ChunkyStdout()
monkeypatch.setattr(server.sys, "stdout", out)
monkeypatch.setattr(server, "_real_stdout", out)
threads = [
threading.Thread(target=server.write_json, args=({"seq": i, "text": "x" * 24},))
@ -50,7 +53,7 @@ def test_write_json_serializes_concurrent_writes(monkeypatch):
def test_write_json_returns_false_on_broken_pipe(monkeypatch):
monkeypatch.setattr(server.sys, "stdout", _BrokenStdout())
monkeypatch.setattr(server, "_real_stdout", _BrokenStdout())
assert server.write_json({"ok": True}) is False
@ -77,3 +80,233 @@ def test_status_callback_accepts_single_message_argument():
"sid",
{"kind": "status", "text": "thinking..."},
)
def _session(agent=None, **extra):
return {
"agent": agent if agent is not None else types.SimpleNamespace(),
"session_key": "session-key",
"history": [],
"history_lock": threading.Lock(),
"history_version": 0,
"running": False,
"attached_images": [],
"image_counter": 0,
"cols": 80,
"slash_worker": None,
"show_reasoning": False,
"tool_progress_mode": "all",
**extra,
}
def test_config_set_yolo_toggles_session_scope():
from tools.approval import clear_session, is_session_yolo_enabled
server._sessions["sid"] = _session()
try:
resp_on = server.handle_request({"id": "1", "method": "config.set", "params": {"session_id": "sid", "key": "yolo"}})
assert resp_on["result"]["value"] == "1"
assert is_session_yolo_enabled("session-key") is True
resp_off = server.handle_request({"id": "2", "method": "config.set", "params": {"session_id": "sid", "key": "yolo"}})
assert resp_off["result"]["value"] == "0"
assert is_session_yolo_enabled("session-key") is False
finally:
clear_session("session-key")
server._sessions.clear()
def test_config_set_reasoning_updates_live_session_and_agent(tmp_path, monkeypatch):
monkeypatch.setattr(server, "_hermes_home", tmp_path)
agent = types.SimpleNamespace(reasoning_config=None)
server._sessions["sid"] = _session(agent=agent)
resp_effort = server.handle_request(
{"id": "1", "method": "config.set", "params": {"session_id": "sid", "key": "reasoning", "value": "low"}}
)
assert resp_effort["result"]["value"] == "low"
assert agent.reasoning_config == {"enabled": True, "effort": "low"}
resp_show = server.handle_request(
{"id": "2", "method": "config.set", "params": {"session_id": "sid", "key": "reasoning", "value": "show"}}
)
assert resp_show["result"]["value"] == "show"
assert server._sessions["sid"]["show_reasoning"] is True
def test_config_set_verbose_updates_session_mode_and_agent(tmp_path, monkeypatch):
monkeypatch.setattr(server, "_hermes_home", tmp_path)
agent = types.SimpleNamespace(verbose_logging=False)
server._sessions["sid"] = _session(agent=agent)
resp = server.handle_request(
{"id": "1", "method": "config.set", "params": {"session_id": "sid", "key": "verbose", "value": "cycle"}}
)
assert resp["result"]["value"] == "verbose"
assert server._sessions["sid"]["tool_progress_mode"] == "verbose"
assert agent.verbose_logging is True
def test_config_set_model_uses_live_switch_path(monkeypatch):
server._sessions["sid"] = _session()
seen = {}
def _fake_apply(sid, session, raw):
seen["args"] = (sid, session["session_key"], raw)
return "new/model"
monkeypatch.setattr(server, "_apply_model_switch", _fake_apply)
resp = server.handle_request(
{"id": "1", "method": "config.set", "params": {"session_id": "sid", "key": "model", "value": "new/model"}}
)
assert resp["result"]["value"] == "new/model"
assert seen["args"] == ("sid", "session-key", "new/model")
def test_session_compress_uses_compress_helper(monkeypatch):
agent = types.SimpleNamespace()
server._sessions["sid"] = _session(agent=agent)
monkeypatch.setattr(server, "_compress_session_history", lambda session: (2, {"total": 42}))
monkeypatch.setattr(server, "_session_info", lambda _agent: {"model": "x"})
with patch("tui_gateway.server._emit") as emit:
resp = server.handle_request({"id": "1", "method": "session.compress", "params": {"session_id": "sid"}})
assert resp["result"]["removed"] == 2
assert resp["result"]["usage"]["total"] == 42
emit.assert_called_once_with("session.info", "sid", {"model": "x"})
def test_prompt_submit_sets_approval_session_key(monkeypatch):
from tools.approval import get_current_session_key
captured = {}
class _Agent:
def run_conversation(self, prompt, conversation_history=None, stream_callback=None):
captured["session_key"] = get_current_session_key(default="")
return {"final_response": "ok", "messages": [{"role": "assistant", "content": "ok"}]}
class _ImmediateThread:
def __init__(self, target=None, daemon=None):
self._target = target
def start(self):
self._target()
server._sessions["sid"] = _session(agent=_Agent())
monkeypatch.setattr(server.threading, "Thread", _ImmediateThread)
monkeypatch.setattr(server, "_emit", lambda *args, **kwargs: None)
monkeypatch.setattr(server, "make_stream_renderer", lambda cols: None)
monkeypatch.setattr(server, "render_message", lambda raw, cols: None)
resp = server.handle_request({"id": "1", "method": "prompt.submit", "params": {"session_id": "sid", "text": "ping"}})
assert resp["result"]["status"] == "streaming"
assert captured["session_key"] == "session-key"
def test_prompt_submit_expands_context_refs(monkeypatch):
captured = {}
class _Agent:
model = "test/model"
base_url = ""
api_key = ""
def run_conversation(self, prompt, conversation_history=None, stream_callback=None):
captured["prompt"] = prompt
return {"final_response": "ok", "messages": [{"role": "assistant", "content": "ok"}]}
class _ImmediateThread:
def __init__(self, target=None, daemon=None):
self._target = target
def start(self):
self._target()
fake_ctx = types.ModuleType("agent.context_references")
fake_ctx.preprocess_context_references = lambda message, **kwargs: types.SimpleNamespace(
blocked=False, message="expanded prompt", warnings=[], references=[], injected_tokens=0
)
fake_meta = types.ModuleType("agent.model_metadata")
fake_meta.get_model_context_length = lambda *args, **kwargs: 100000
server._sessions["sid"] = _session(agent=_Agent())
monkeypatch.setattr(server.threading, "Thread", _ImmediateThread)
monkeypatch.setattr(server, "_emit", lambda *args, **kwargs: None)
monkeypatch.setattr(server, "make_stream_renderer", lambda cols: None)
monkeypatch.setattr(server, "render_message", lambda raw, cols: None)
monkeypatch.setitem(sys.modules, "agent.context_references", fake_ctx)
monkeypatch.setitem(sys.modules, "agent.model_metadata", fake_meta)
server.handle_request({"id": "1", "method": "prompt.submit", "params": {"session_id": "sid", "text": "@diff"}})
assert captured["prompt"] == "expanded prompt"
def test_image_attach_appends_local_image(monkeypatch):
fake_cli = types.ModuleType("cli")
fake_cli._IMAGE_EXTENSIONS = {".png"}
fake_cli._split_path_input = lambda raw: (raw, "")
fake_cli._resolve_attachment_path = lambda raw: Path("/tmp/cat.png")
server._sessions["sid"] = _session()
monkeypatch.setitem(sys.modules, "cli", fake_cli)
resp = server.handle_request({"id": "1", "method": "image.attach", "params": {"session_id": "sid", "path": "/tmp/cat.png"}})
assert resp["result"]["attached"] is True
assert resp["result"]["name"] == "cat.png"
assert len(server._sessions["sid"]["attached_images"]) == 1
def test_input_detect_drop_attaches_image(monkeypatch):
fake_cli = types.ModuleType("cli")
fake_cli._detect_file_drop = lambda raw: {
"path": Path("/tmp/cat.png"),
"is_image": True,
"remainder": "",
}
server._sessions["sid"] = _session()
monkeypatch.setitem(sys.modules, "cli", fake_cli)
resp = server.handle_request(
{"id": "1", "method": "input.detect_drop", "params": {"session_id": "sid", "text": "/tmp/cat.png"}}
)
assert resp["result"]["matched"] is True
assert resp["result"]["is_image"] is True
assert resp["result"]["text"] == "[User attached image: cat.png]"
def test_rollback_restore_resolves_number_and_file_path():
calls = {}
class _Mgr:
enabled = True
def list_checkpoints(self, cwd):
return [{"hash": "aaa111"}, {"hash": "bbb222"}]
def restore(self, cwd, target, file_path=None):
calls["args"] = (cwd, target, file_path)
return {"success": True, "message": "done"}
server._sessions["sid"] = _session(agent=types.SimpleNamespace(_checkpoint_mgr=_Mgr()), history=[])
resp = server.handle_request(
{
"id": "1",
"method": "rollback.restore",
"params": {"session_id": "sid", "hash": "2", "file_path": "src/app.tsx"},
}
)
assert resp["result"]["success"] is True
assert calls["args"][1] == "bbb222"
assert calls["args"][2] == "src/app.tsx"

View file

@ -229,6 +229,122 @@ def _resolve_model() -> str:
return "anthropic/claude-sonnet-4"
def _write_config_key(key_path: str, value):
cfg = _load_cfg()
current = cfg
keys = key_path.split(".")
for key in keys[:-1]:
if key not in current or not isinstance(current.get(key), dict):
current[key] = {}
current = current[key]
current[keys[-1]] = value
_save_cfg(cfg)
def _load_reasoning_config() -> dict | None:
from hermes_constants import parse_reasoning_effort
effort = str(_load_cfg().get("agent", {}).get("reasoning_effort", "") or "").strip()
return parse_reasoning_effort(effort)
def _load_service_tier() -> str | None:
raw = str(_load_cfg().get("agent", {}).get("service_tier", "") or "").strip().lower()
if not raw or raw in {"normal", "default", "standard", "off", "none"}:
return None
if raw in {"fast", "priority", "on"}:
return "priority"
return None
def _load_show_reasoning() -> bool:
return bool(_load_cfg().get("display", {}).get("show_reasoning", False))
def _load_tool_progress_mode() -> str:
raw = _load_cfg().get("display", {}).get("tool_progress", "all")
if raw is False:
return "off"
if raw is True:
return "all"
mode = str(raw or "all").strip().lower()
return mode if mode in {"off", "new", "all", "verbose"} else "all"
def _session_show_reasoning(sid: str) -> bool:
return bool(_sessions.get(sid, {}).get("show_reasoning", False))
def _session_tool_progress_mode(sid: str) -> str:
return str(_sessions.get(sid, {}).get("tool_progress_mode", "all") or "all")
def _tool_progress_enabled(sid: str) -> bool:
return _session_tool_progress_mode(sid) != "off"
def _restart_slash_worker(session: dict):
worker = session.get("slash_worker")
if worker:
try:
worker.close()
except Exception:
pass
try:
session["slash_worker"] = _SlashWorker(session["session_key"], getattr(session.get("agent"), "model", _resolve_model()))
except Exception:
session["slash_worker"] = None
def _apply_model_switch(sid: str, session: dict, raw_input: str) -> str:
agent = session.get("agent")
if not agent:
os.environ["HERMES_MODEL"] = raw_input
return raw_input
from hermes_cli.model_switch import switch_model
result = switch_model(
raw_input=raw_input,
current_provider=getattr(agent, "provider", "") or "",
current_model=getattr(agent, "model", "") or "",
current_base_url=getattr(agent, "base_url", "") or "",
current_api_key=getattr(agent, "api_key", "") or "",
)
if not result.success:
raise ValueError(result.error_message or "model switch failed")
agent.switch_model(
new_model=result.new_model,
new_provider=result.target_provider,
api_key=result.api_key,
base_url=result.base_url,
api_mode=result.api_mode,
)
os.environ["HERMES_MODEL"] = result.new_model
_restart_slash_worker(session)
_emit("session.info", sid, _session_info(agent))
return result.new_model
def _compress_session_history(session: dict) -> tuple[int, dict]:
from agent.model_metadata import estimate_messages_tokens_rough
agent = session["agent"]
history = list(session.get("history", []))
if len(history) < 4:
return 0, _get_usage(agent)
approx_tokens = estimate_messages_tokens_rough(history)
compressed, _ = agent._compress_context(
history,
getattr(agent, "_cached_system_prompt", "") or "",
approx_tokens=approx_tokens,
)
session["history"] = compressed
session["history_version"] = int(session.get("history_version", 0)) + 1
return len(history) - len(compressed), _get_usage(agent)
def _get_usage(agent) -> dict:
g = lambda k, fb=None: getattr(agent, k, 0) or (getattr(agent, fb, 0) if fb else 0)
usage = {
@ -320,14 +436,48 @@ def _tool_ctx(name: str, args: dict) -> str:
return ""
def _on_tool_start(sid: str, tool_call_id: str, name: str, args: dict):
session = _sessions.get(sid)
if session is not None:
try:
from agent.display import capture_local_edit_snapshot
snapshot = capture_local_edit_snapshot(name, args)
if snapshot is not None:
session.setdefault("edit_snapshots", {})[tool_call_id] = snapshot
except Exception:
pass
if _tool_progress_enabled(sid):
_emit("tool.start", sid, {"tool_id": tool_call_id, "name": name, "context": _tool_ctx(name, args)})
def _on_tool_complete(sid: str, tool_call_id: str, name: str, args: dict, result: str):
payload = {"tool_id": tool_call_id, "name": name}
session = _sessions.get(sid)
snapshot = None
if session is not None:
snapshot = session.setdefault("edit_snapshots", {}).pop(tool_call_id, None)
try:
from agent.display import render_edit_diff_with_delta
rendered: list[str] = []
if render_edit_diff_with_delta(name, result, function_args=args, snapshot=snapshot, print_fn=rendered.append):
payload["inline_diff"] = "\n".join(rendered)
except Exception:
pass
if _tool_progress_enabled(sid) or payload.get("inline_diff"):
_emit("tool.complete", sid, payload)
def _agent_cbs(sid: str) -> dict:
return dict(
tool_start_callback=lambda tc_id, name, args: _emit("tool.start", sid, {"tool_id": tc_id, "name": name, "context": _tool_ctx(name, args)}),
tool_complete_callback=lambda tc_id, name, args, result: _emit("tool.complete", sid, {"tool_id": tc_id, "name": name}),
tool_progress_callback=lambda name, preview, args: _emit("tool.progress", sid, {"name": name, "preview": preview}),
tool_gen_callback=lambda name: _emit("tool.generating", sid, {"name": name}),
tool_start_callback=lambda tc_id, name, args: _on_tool_start(sid, tc_id, name, args),
tool_complete_callback=lambda tc_id, name, args, result: _on_tool_complete(sid, tc_id, name, args, result),
tool_progress_callback=lambda name, preview, args: _tool_progress_enabled(sid)
and _emit("tool.progress", sid, {"name": name, "preview": preview}),
tool_gen_callback=lambda name: _tool_progress_enabled(sid) and _emit("tool.generating", sid, {"name": name}),
thinking_callback=lambda text: _emit("thinking.delta", sid, {"text": text}),
reasoning_callback=lambda text: _emit("reasoning.delta", sid, {"text": text}),
reasoning_callback=lambda text: _session_show_reasoning(sid) and _emit("reasoning.delta", sid, {"text": text}),
status_callback=lambda kind, text=None: _status_update(sid, str(kind), None if text is None else str(text)),
clarify_callback=lambda q, c: _block("clarify.request", sid, {"question": q, "choices": c}),
)
@ -357,7 +507,12 @@ def _make_agent(sid: str, key: str, session_id: str | None = None):
cfg = _load_cfg()
system_prompt = cfg.get("agent", {}).get("system_prompt", "") or ""
return AIAgent(
model=_resolve_model(), quiet_mode=True, platform="tui",
model=_resolve_model(),
quiet_mode=True,
verbose_logging=_load_tool_progress_mode() == "verbose",
reasoning_config=_load_reasoning_config(),
service_tier=_load_service_tier(),
platform="tui",
session_id=session_id or key, session_db=_get_db(),
ephemeral_system_prompt=system_prompt or None,
**_agent_cbs(sid),
@ -369,10 +524,16 @@ def _init_session(sid: str, key: str, agent, history: list, cols: int = 80):
"agent": agent,
"session_key": key,
"history": history,
"history_lock": threading.Lock(),
"history_version": 0,
"running": False,
"attached_images": [],
"image_counter": 0,
"cols": cols,
"slash_worker": None,
"show_reasoning": _load_show_reasoning(),
"tool_progress_mode": _load_tool_progress_mode(),
"edit_snapshots": {},
}
try:
_sessions[sid]["slash_worker"] = _SlashWorker(key, getattr(agent, "model", _resolve_model()))
@ -397,6 +558,17 @@ def _with_checkpoints(session, fn):
return fn(session["agent"]._checkpoint_mgr, os.getenv("TERMINAL_CWD", os.getcwd()))
def _resolve_checkpoint_hash(mgr, cwd: str, ref: str) -> str:
try:
checkpoints = mgr.list_checkpoints(cwd)
idx = int(ref) - 1
except ValueError:
return ref
if 0 <= idx < len(checkpoints):
return checkpoints[idx].get("hash", ref)
raise ValueError(f"Invalid checkpoint number. Use 1-{len(checkpoints)}.")
def _enrich_with_attached_images(user_text: str, image_paths: list[str]) -> str:
"""Pre-analyze attached images via vision and prepend descriptions to user text."""
import asyncio, json as _json
@ -561,11 +733,17 @@ def _(rid, params: dict) -> dict:
session, err = _sess(params, rid)
if err:
return err
history, removed = session.get("history", []), 0
while history and history[-1].get("role") in ("assistant", "tool"):
history.pop(); removed += 1
if history and history[-1].get("role") == "user":
history.pop(); removed += 1
removed = 0
with session["history_lock"]:
history = session.get("history", [])
while history and history[-1].get("role") in ("assistant", "tool"):
history.pop()
removed += 1
if history and history[-1].get("role") == "user":
history.pop()
removed += 1
if removed:
session["history_version"] = int(session.get("history_version", 0)) + 1
return _ok(rid, {"removed": removed})
@ -574,11 +752,11 @@ def _(rid, params: dict) -> dict:
session, err = _sess(params, rid)
if err:
return err
agent = session["agent"]
try:
if hasattr(agent, "compress_context"):
agent.compress_context()
return _ok(rid, {"status": "compressed", "usage": _get_usage(agent)})
with session["history_lock"]:
removed, usage = _compress_session_history(session)
_emit("session.info", params.get("session_id", ""), _session_info(session["agent"]))
return _ok(rid, {"status": "compressed", "removed": removed, "usage": usage})
except Exception as e:
return _err(rid, 5005, str(e))
@ -606,7 +784,8 @@ def _(rid, params: dict) -> dict:
return err
db = _get_db()
old_key = session["session_key"]
history = session.get("history", [])
with session["history_lock"]:
history = [dict(msg) for msg in session.get("history", [])]
if not history:
return _err(rid, 4008, "nothing to branch — send a message first")
new_key = _new_session_key()
@ -666,15 +845,47 @@ def _(rid, params: dict) -> dict:
session = _sessions.get(sid)
if not session:
return _err(rid, 4001, "session not found")
agent, history = session["agent"], session["history"]
with session["history_lock"]:
if session.get("running"):
return _err(rid, 4009, "session busy")
session["running"] = True
history = list(session["history"])
history_version = int(session.get("history_version", 0))
images = list(session.get("attached_images", []))
session["attached_images"] = []
agent = session["agent"]
_emit("message.start", sid)
def run():
approval_token = None
try:
from tools.approval import reset_current_session_key, set_current_session_key
approval_token = set_current_session_key(session["session_key"])
cols = session.get("cols", 80)
streamer = make_stream_renderer(cols)
images = session.pop("attached_images", [])
prompt = _enrich_with_attached_images(text, images) if images else text
prompt = text
if isinstance(prompt, str) and "@" in prompt:
from agent.context_references import preprocess_context_references
from agent.model_metadata import get_model_context_length
ctx_len = get_model_context_length(
getattr(agent, "model", "") or _resolve_model(),
base_url=getattr(agent, "base_url", "") or "",
api_key=getattr(agent, "api_key", "") or "",
)
ctx = preprocess_context_references(
prompt,
cwd=os.environ.get("TERMINAL_CWD", os.getcwd()),
allowed_root=os.environ.get("TERMINAL_CWD", os.getcwd()),
context_length=ctx_len,
)
if ctx.blocked:
_emit("error", sid, {"message": "\n".join(ctx.warnings) or "Context injection refused."})
return
prompt = ctx.message
prompt = _enrich_with_attached_images(prompt, images) if images else prompt
def _stream(delta):
payload = {"text": delta}
@ -689,7 +900,10 @@ def _(rid, params: dict) -> dict:
if isinstance(result, dict):
if isinstance(result.get("messages"), list):
session["history"] = result["messages"]
with session["history_lock"]:
if int(session.get("history_version", 0)) == history_version:
session["history"] = result["messages"]
session["history_version"] = history_version + 1
raw = result.get("final_response", "")
status = "interrupted" if result.get("interrupted") else "error" if result.get("error") else "complete"
else:
@ -703,6 +917,14 @@ def _(rid, params: dict) -> dict:
_emit("message.complete", sid, payload)
except Exception as e:
_emit("error", sid, {"message": str(e)})
finally:
try:
if approval_token is not None:
reset_current_session_key(approval_token)
except Exception:
pass
with session["history_lock"]:
session["running"] = False
threading.Thread(target=run, daemon=True).start()
return _ok(rid, {"status": "streaming"})
@ -733,6 +955,84 @@ def _(rid, params: dict) -> dict:
return _ok(rid, {"attached": True, "path": str(img_path), "count": len(session["attached_images"])})
@method("image.attach")
def _(rid, params: dict) -> dict:
session, err = _sess(params, rid)
if err:
return err
raw = str(params.get("path", "") or "").strip()
if not raw:
return _err(rid, 4015, "path required")
try:
from cli import _IMAGE_EXTENSIONS, _resolve_attachment_path, _split_path_input
path_token, remainder = _split_path_input(raw)
image_path = _resolve_attachment_path(path_token)
if image_path is None:
return _err(rid, 4016, f"image not found: {path_token}")
if image_path.suffix.lower() not in _IMAGE_EXTENSIONS:
return _err(rid, 4016, f"unsupported image: {image_path.name}")
session.setdefault("attached_images", []).append(str(image_path))
return _ok(
rid,
{
"attached": True,
"path": str(image_path),
"name": image_path.name,
"count": len(session["attached_images"]),
"remainder": remainder,
"text": remainder or f"[User attached image: {image_path.name}]",
},
)
except Exception as e:
return _err(rid, 5027, str(e))
@method("input.detect_drop")
def _(rid, params: dict) -> dict:
session, err = _sess(params, rid)
if err:
return err
try:
from cli import _detect_file_drop
raw = str(params.get("text", "") or "")
dropped = _detect_file_drop(raw)
if not dropped:
return _ok(rid, {"matched": False})
drop_path = dropped["path"]
remainder = dropped["remainder"]
if dropped["is_image"]:
session.setdefault("attached_images", []).append(str(drop_path))
text = remainder or f"[User attached image: {drop_path.name}]"
return _ok(
rid,
{
"matched": True,
"is_image": True,
"path": str(drop_path),
"name": drop_path.name,
"count": len(session["attached_images"]),
"text": text,
},
)
text = f"[User attached file: {drop_path}]" + (f"\n{remainder}" if remainder else "")
return _ok(
rid,
{
"matched": True,
"is_image": False,
"path": str(drop_path),
"name": drop_path.name,
"text": text,
},
)
except Exception as e:
return _err(rid, 5027, str(e))
@method("prompt.background")
def _(rid, params: dict) -> dict:
text, parent = params.get("text", ""), params.get("session_id", "")
@ -819,39 +1119,94 @@ def _(rid, params: dict) -> dict:
@method("config.set")
def _(rid, params: dict) -> dict:
key, value = params.get("key", ""), params.get("value", "")
session = _sessions.get(params.get("session_id", ""))
if key == "model":
os.environ["HERMES_MODEL"] = value
return _ok(rid, {"key": key, "value": value})
try:
if not value:
return _err(rid, 4002, "model value required")
if session:
value = _apply_model_switch(params.get("session_id", ""), session, value)
else:
os.environ["HERMES_MODEL"] = value
return _ok(rid, {"key": key, "value": value})
except Exception as e:
return _err(rid, 5001, str(e))
if key == "verbose":
cycle = ["off", "new", "all", "verbose"]
cur = session.get("tool_progress_mode", _load_tool_progress_mode()) if session else _load_tool_progress_mode()
if value and value != "cycle":
os.environ["HERMES_VERBOSE"] = value
return _ok(rid, {"key": key, "value": value})
cur = os.environ.get("HERMES_VERBOSE", "all")
try:
idx = cycle.index(cur)
except ValueError:
idx = 2
nv = cycle[(idx + 1) % len(cycle)]
os.environ["HERMES_VERBOSE"] = nv
nv = str(value).strip().lower()
if nv not in cycle:
return _err(rid, 4002, f"unknown verbose mode: {value}")
else:
try:
idx = cycle.index(cur)
except ValueError:
idx = 2
nv = cycle[(idx + 1) % len(cycle)]
_write_config_key("display.tool_progress", nv)
if session:
session["tool_progress_mode"] = nv
agent = session.get("agent")
if agent is not None:
agent.verbose_logging = nv == "verbose"
return _ok(rid, {"key": key, "value": nv})
if key == "yolo":
nv = "0" if os.environ.get("HERMES_YOLO", "0") == "1" else "1"
os.environ["HERMES_YOLO"] = nv
return _ok(rid, {"key": key, "value": nv})
try:
if session:
from tools.approval import (
disable_session_yolo,
enable_session_yolo,
is_session_yolo_enabled,
)
current = is_session_yolo_enabled(session["session_key"])
if current:
disable_session_yolo(session["session_key"])
nv = "0"
else:
enable_session_yolo(session["session_key"])
nv = "1"
else:
current = bool(os.environ.get("HERMES_YOLO_MODE"))
if current:
os.environ.pop("HERMES_YOLO_MODE", None)
nv = "0"
else:
os.environ["HERMES_YOLO_MODE"] = "1"
nv = "1"
return _ok(rid, {"key": key, "value": nv})
except Exception as e:
return _err(rid, 5001, str(e))
if key == "reasoning":
if value in ("show", "on"):
os.environ["HERMES_SHOW_REASONING"] = "1"
return _ok(rid, {"key": key, "value": "show"})
if value in ("hide", "off"):
os.environ.pop("HERMES_SHOW_REASONING", None)
return _ok(rid, {"key": key, "value": "hide"})
os.environ["HERMES_REASONING"] = value
return _ok(rid, {"key": key, "value": value})
try:
from hermes_constants import parse_reasoning_effort
arg = str(value or "").strip().lower()
if arg in ("show", "on"):
_write_config_key("display.show_reasoning", True)
if session:
session["show_reasoning"] = True
return _ok(rid, {"key": key, "value": "show"})
if arg in ("hide", "off"):
_write_config_key("display.show_reasoning", False)
if session:
session["show_reasoning"] = False
return _ok(rid, {"key": key, "value": "hide"})
parsed = parse_reasoning_effort(arg)
if parsed is None:
return _err(rid, 4002, f"unknown reasoning value: {value}")
_write_config_key("agent.reasoning_effort", arg)
if session and session.get("agent") is not None:
session["agent"].reasoning_config = parsed
return _ok(rid, {"key": key, "value": arg})
except Exception as e:
return _err(rid, 5001, str(e))
if key in ("prompt", "personality", "skin"):
try:
@ -900,6 +1255,12 @@ def _(rid, params: dict) -> dict:
return _ok(rid, {"prompt": _load_cfg().get("custom_prompt", "")})
if key == "skin":
return _ok(rid, {"value": _load_cfg().get("display", {}).get("skin", "default")})
if key == "mtime":
cfg_path = _hermes_home / "config.yaml"
try:
return _ok(rid, {"mtime": cfg_path.stat().st_mtime if cfg_path.exists() else 0})
except Exception:
return _ok(rid, {"mtime": 0})
return _err(rid, 4002, f"unknown config key: {key}")
@ -1235,30 +1596,23 @@ def _mirror_slash_side_effects(sid: str, session: dict, command: str):
try:
if name == "model" and arg and agent:
from hermes_cli.model_switch import switch_model
result = switch_model(
raw_input=arg,
current_provider=getattr(agent, "provider", "") or "",
current_model=getattr(agent, "model", "") or "",
current_base_url=getattr(agent, "base_url", "") or "",
current_api_key=getattr(agent, "api_key", "") or "",
)
if result.success:
agent.switch_model(
new_model=result.new_model,
new_provider=result.target_provider,
api_key=result.api_key,
base_url=result.base_url,
api_mode=result.api_mode,
)
_emit("session.info", sid, _session_info(agent))
_apply_model_switch(sid, session, arg)
elif name in ("personality", "prompt") and agent:
cfg = _load_cfg()
new_prompt = cfg.get("agent", {}).get("system_prompt", "") or ""
agent.ephemeral_system_prompt = new_prompt or None
agent._cached_system_prompt = None
elif name == "compress" and agent:
(getattr(agent, "compress_context", None) or getattr(agent, "context_compressor", agent).compress)()
with session["history_lock"]:
_compress_session_history(session)
_emit("session.info", sid, _session_info(agent))
elif name == "fast" and agent:
mode = arg.lower()
if mode in {"fast", "on"}:
agent.service_tier = "priority"
elif mode in {"normal", "off"}:
agent.service_tier = None
_emit("session.info", sid, _session_info(agent))
elif name == "reload-mcp" and agent and hasattr(agent, "reload_mcp_tools"):
agent.reload_mcp_tools()
elif name == "stop":
@ -1384,10 +1738,29 @@ def _(rid, params: dict) -> dict:
if err:
return err
target = params.get("hash", "")
file_path = params.get("file_path", "")
if not target:
return _err(rid, 4014, "hash required")
try:
return _ok(rid, _with_checkpoints(session, lambda mgr, cwd: mgr.restore(cwd, target)))
def go(mgr, cwd):
resolved = _resolve_checkpoint_hash(mgr, cwd, target)
result = mgr.restore(cwd, resolved, file_path=file_path or None)
if result.get("success") and not file_path:
removed = 0
with session["history_lock"]:
history = session.get("history", [])
while history and history[-1].get("role") in ("assistant", "tool"):
history.pop()
removed += 1
if history and history[-1].get("role") == "user":
history.pop()
removed += 1
if removed:
session["history_version"] = int(session.get("history_version", 0)) + 1
result["history_removed"] = removed
return result
return _ok(rid, _with_checkpoints(session, go))
except Exception as e:
return _err(rid, 5021, str(e))
@ -1401,7 +1774,7 @@ def _(rid, params: dict) -> dict:
if not target:
return _err(rid, 4014, "hash required")
try:
r = _with_checkpoints(session, lambda mgr, cwd: mgr.diff(cwd, target))
r = _with_checkpoints(session, lambda mgr, cwd: mgr.diff(cwd, _resolve_checkpoint_hash(mgr, cwd, target)))
raw = r.get("diff", "")[:4000]
payload = {"stat": r.get("stat", ""), "diff": raw}
rendered = render_diff(raw, session.get("cols", 80))

View file

@ -189,6 +189,23 @@ function ctxBar(pct: number | undefined, w = 10) {
return '█'.repeat(filled) + '░'.repeat(w - filled)
}
function fmtDuration(ms: number) {
const total = Math.max(0, Math.floor(ms / 1000))
const hours = Math.floor(total / 3600)
const mins = Math.floor((total % 3600) / 60)
const secs = total % 60
if (hours > 0) {
return `${hours}h ${mins}m`
}
if (mins > 0) {
return `${mins}m ${secs}s`
}
return `${secs}s`
}
function StatusRule({
cols,
status,
@ -196,6 +213,8 @@ function StatusRule({
model,
usage,
bgCount,
durationLabel,
voiceLabel,
t
}: {
cols: number
@ -204,6 +223,8 @@ function StatusRule({
model: string
usage: Usage
bgCount: number
durationLabel?: string
voiceLabel?: string
t: Theme
}) {
const pct = usage.context_percent
@ -218,9 +239,16 @@ function StatusRule({
const pctLabel = pct != null ? `${pct}%` : ''
const bar = usage.context_max ? ctxBar(pct) : ''
const segs = [status, model, ctxLabel, bar ? `[${bar}]` : '', pctLabel, bgCount > 0 ? `${bgCount} bg` : ''].filter(
Boolean
)
const segs = [
status,
model,
ctxLabel,
bar ? `[${bar}]` : '',
pctLabel,
durationLabel || '',
voiceLabel || '',
bgCount > 0 ? `${bgCount} bg` : ''
].filter(Boolean)
const inner = segs.join(' │ ')
const pad = Math.max(0, cols - inner.length - 5)
@ -237,6 +265,8 @@ function StatusRule({
<Text color={barColor}>[{bar}]</Text> <Text color={barColor}>{pctLabel}</Text>
</Text>
) : null}
{durationLabel ? <Text color={t.color.dim}> {durationLabel}</Text> : null}
{voiceLabel ? <Text color={t.color.dim}> {voiceLabel}</Text> : null}
{bgCount > 0 ? <Text color={t.color.dim}> {bgCount} bg</Text> : null}
{' ' + '─'.repeat(pad)}
</Text>
@ -314,6 +344,12 @@ export function App({ gw }: { gw: GatewayClient }) {
const [bgTasks, setBgTasks] = useState<Set<string>>(new Set())
const [catalog, setCatalog] = useState<SlashCatalog | null>(null)
const [pager, setPager] = useState<{ lines: string[]; offset: number } | null>(null)
const [voiceEnabled, setVoiceEnabled] = useState(false)
const [voiceRecording, setVoiceRecording] = useState(false)
const [voiceProcessing, setVoiceProcessing] = useState(false)
const [sessionStartedAt, setSessionStartedAt] = useState(() => Date.now())
const [bellOnComplete, setBellOnComplete] = useState(false)
const [clockNow, setClockNow] = useState(() => Date.now())
// ── Refs ─────────────────────────────────────────────────────────
@ -333,6 +369,7 @@ export function App({ gw }: { gw: GatewayClient }) {
const statusTimerRef = useRef<ReturnType<typeof setTimeout> | null>(null)
const busyRef = useRef(busy)
const onEventRef = useRef<(ev: GatewayEvent) => void>(() => {})
const configMtimeRef = useRef(0)
colsRef.current = cols
busyRef.current = busy
reasoningRef.current = reasoning
@ -367,6 +404,12 @@ export function App({ gw }: { gw: GatewayClient }) {
}
}, [sid, stdout]) // eslint-disable-line react-hooks/exhaustive-deps
useEffect(() => {
const id = setInterval(() => setClockNow(Date.now()), 1000)
return () => clearInterval(id)
}, [])
// ── Core actions ─────────────────────────────────────────────────
const appendMessage = useCallback((msg: Msg) => {
@ -423,6 +466,44 @@ export function App({ gw }: { gw: GatewayClient }) {
[gw, sys]
)
useEffect(() => {
if (!sid) {
return
}
rpc('voice.toggle', { action: 'status' }).then((r: any) => setVoiceEnabled(!!r?.enabled))
rpc('config.get', { key: 'mtime' }).then((r: any) => {
configMtimeRef.current = Number(r?.mtime ?? 0)
})
rpc('config.get', { key: 'full' }).then((r: any) => {
setBellOnComplete(!!r?.config?.display?.bell_on_complete)
})
}, [rpc, sid])
useEffect(() => {
if (!sid) {
return
}
const id = setInterval(() => {
rpc('config.get', { key: 'mtime' }).then((r: any) => {
const next = Number(r?.mtime ?? 0)
if (configMtimeRef.current && next && next !== configMtimeRef.current) {
configMtimeRef.current = next
rpc('reload.mcp', { session_id: sid }).then(() => pushActivity('MCP reloaded after config change'))
rpc('config.get', { key: 'full' }).then((cfg: any) => {
setBellOnComplete(!!cfg?.config?.display?.bell_on_complete)
})
} else if (!configMtimeRef.current && next) {
configMtimeRef.current = next
}
})
}, 5000)
return () => clearInterval(id)
}, [pushActivity, rpc, sid])
const idle = () => {
setThinking(false)
setTools([])
@ -454,6 +535,8 @@ export function App({ gw }: { gw: GatewayClient }) {
const resetSession = () => {
idle()
setReasoning('')
setVoiceRecording(false)
setVoiceProcessing(false)
setSid(null as any) // will be set by caller
setHistoryItems([])
setMessages([])
@ -477,6 +560,7 @@ export function App({ gw }: { gw: GatewayClient }) {
resetSession()
setSid(r.session_id)
setSessionStartedAt(Date.now())
setStatus('ready')
if (r.info) {
@ -506,6 +590,7 @@ export function App({ gw }: { gw: GatewayClient }) {
.then((r: any) => {
resetSession()
setSid(r.session_id)
setSessionStartedAt(Date.now())
setInfo(r.info ?? null)
const resumed = toTranscriptMessages(r.messages)
@ -667,25 +752,45 @@ export function App({ gw }: { gw: GatewayClient }) {
pushActivity(`redacted ${payload.redactions} secret-like value(s)`, 'warn')
}
if (statusTimerRef.current) {
clearTimeout(statusTimerRef.current)
statusTimerRef.current = null
const startSubmit = (displayText: string, submitText: string) => {
if (statusTimerRef.current) {
clearTimeout(statusTimerRef.current)
statusTimerRef.current = null
}
inflightPasteIdsRef.current = payload.usedIds
setLastUserMsg(text)
appendMessage({ role: 'user', text: displayText })
setBusy(true)
setStatus('running…')
buf.current = ''
interruptedRef.current = false
gw.request('prompt.submit', { session_id: sid, text: submitText }).catch((e: Error) => {
inflightPasteIdsRef.current = []
sys(`error: ${e.message}`)
setStatus('ready')
setBusy(false)
})
}
inflightPasteIdsRef.current = payload.usedIds
setLastUserMsg(text)
appendMessage({ role: 'user', text })
setBusy(true)
setStatus('running…')
buf.current = ''
interruptedRef.current = false
gw.request('input.detect_drop', { session_id: sid, text: payload.text })
.then((r: any) => {
if (r?.matched) {
if (r.is_image) {
pushActivity(`attached image: ${r.name}`)
} else {
pushActivity(`detected file: ${r.name}`)
}
gw.request('prompt.submit', { session_id: sid, text: payload.text }).catch((e: Error) => {
inflightPasteIdsRef.current = []
sys(`error: ${e.message}`)
setStatus('ready')
setBusy(false)
})
startSubmit(r.text || text, r.text || payload.text)
return
}
startSubmit(text, payload.text)
})
.catch(() => startSubmit(text, payload.text))
}
const shellExec = (cmd: string) => {
@ -1027,6 +1132,37 @@ export function App({ gw }: { gw: GatewayClient }) {
return
}
if (ctrl(key, ch, 'b')) {
if (voiceRecording) {
setVoiceRecording(false)
setVoiceProcessing(true)
rpc('voice.record', { action: 'stop' })
.then((r: any) => {
const transcript = String(r?.text || '').trim()
if (transcript) {
setInput(prev => (prev ? `${prev}${/\s$/.test(prev) ? '' : ' '}${transcript}` : transcript))
} else {
sys('voice: no speech detected')
}
})
.catch((e: Error) => sys(`voice error: ${e.message}`))
.finally(() => {
setVoiceProcessing(false)
setStatus('ready')
})
} else {
rpc('voice.record', { action: 'start' })
.then(() => {
setVoiceRecording(true)
setStatus('recording…')
})
.catch((e: Error) => sys(`voice error: ${e.message}`))
}
return
}
if (ctrl(key, ch, 'g')) {
return openEditor()
}
@ -1184,7 +1320,10 @@ export function App({ gw }: { gw: GatewayClient }) {
break
case 'tool.start':
setTools(prev => [...prev, { id: p.tool_id, name: p.name, context: (p.context as string) || '' }])
setTools(prev => [
...prev,
{ id: p.tool_id, name: p.name, context: (p.context as string) || '', startedAt: Date.now() }
])
break
case 'tool.complete': {
@ -1211,6 +1350,10 @@ export function App({ gw }: { gw: GatewayClient }) {
return remaining
})
if (p?.inline_diff) {
sys(p.inline_diff as string)
}
break
}
@ -1262,7 +1405,7 @@ export function App({ gw }: { gw: GatewayClient }) {
case 'message.delta':
if (p?.text && !interruptedRef.current) {
buf.current += p.rendered ?? p.text
buf.current = p.rendered ?? buf.current + p.text
setStreaming(buf.current.trimStart())
}
@ -1289,6 +1432,10 @@ export function App({ gw }: { gw: GatewayClient }) {
thinking: savedReasoning || undefined,
tools: savedTools.length ? savedTools : undefined
})
if (bellOnComplete && stdout?.isTTY) {
stdout.write('\x07')
}
}
turnToolsRef.current = []
@ -1624,14 +1771,31 @@ export function App({ gw }: { gw: GatewayClient }) {
if (!arg) {
rpc('config.get', { key: 'provider' }).then((r: any) => sys(`${r.model} (${r.provider})`))
} else {
rpc('config.set', { key: 'model', value: arg.replace('--global', '').trim() }).then((r: any) => {
sys(`model → ${r.value}`)
setInfo(prev => (prev ? { ...prev, model: r.value } : prev))
})
rpc('config.set', { session_id: sid, key: 'model', value: arg.replace('--global', '').trim() }).then(
(r: any) => {
sys(`model → ${r.value}`)
setInfo(prev => (prev ? { ...prev, model: r.value } : prev))
}
)
}
return true
case 'image':
rpc('image.attach', { session_id: sid, path: arg }).then((r: any) => {
if (!r) {
return
}
sys(`attached image: ${r.name}`)
if (r?.remainder) {
setInput(r.remainder)
}
})
return true
case 'provider':
gw.request('slash.exec', { command: 'provider', session_id: sid })
.then((r: any) => page(r?.output || '(no output)'))
@ -1649,17 +1813,23 @@ export function App({ gw }: { gw: GatewayClient }) {
return true
case 'yolo':
rpc('config.set', { key: 'yolo' }).then((r: any) => sys(`yolo ${r.value === '1' ? 'on' : 'off'}`))
rpc('config.set', { session_id: sid, key: 'yolo' }).then((r: any) =>
sys(`yolo ${r.value === '1' ? 'on' : 'off'}`)
)
return true
case 'reasoning':
rpc('config.set', { key: 'reasoning', value: arg || 'medium' }).then((r: any) => sys(`reasoning: ${r.value}`))
rpc('config.set', { session_id: sid, key: 'reasoning', value: arg || 'medium' }).then((r: any) =>
sys(`reasoning: ${r.value}`)
)
return true
case 'verbose':
rpc('config.set', { key: 'verbose', value: arg || 'cycle' }).then((r: any) => sys(`verbose: ${r.value}`))
rpc('config.set', { session_id: sid, key: 'verbose', value: arg || 'cycle' }).then((r: any) =>
sys(`verbose: ${r.value}`)
)
return true
@ -1694,6 +1864,7 @@ export function App({ gw }: { gw: GatewayClient }) {
rpc('session.branch', { session_id: sid, name: arg }).then((r: any) => {
if (r?.session_id) {
setSid(r.session_id)
setSessionStartedAt(Date.now())
setHistoryItems([])
setMessages([])
sys(`branched → ${r.title}`)
@ -1773,9 +1944,14 @@ export function App({ gw }: { gw: GatewayClient }) {
return true
case 'voice':
rpc('voice.toggle', { action: arg === 'on' || arg === 'off' ? arg : 'status' }).then((r: any) =>
rpc('voice.toggle', { action: arg === 'on' || arg === 'off' ? arg : 'status' }).then((r: any) => {
if (!r) {
return
}
setVoiceEnabled(!!r?.enabled)
sys(`voice${arg === 'on' || arg === 'off' ? '' : ':'} ${r.enabled ? 'on' : 'off'}`)
)
})
return true
@ -1794,13 +1970,19 @@ export function App({ gw }: { gw: GatewayClient }) {
return sys('no checkpoints')
}
sys(r.checkpoints.map((c: any, i: number) => ` ${i} ${c.hash?.slice(0, 8)} ${c.message}`).join('\n'))
sys(r.checkpoints.map((c: any, i: number) => ` ${i + 1} ${c.hash?.slice(0, 8)} ${c.message}`).join('\n'))
})
} else {
const hash = sub === 'restore' || sub === 'diff' ? rArgs[0] : sub
rpc(sub === 'diff' ? 'rollback.diff' : 'rollback.restore', { session_id: sid, hash }).then((r: any) =>
sys(r.rendered || r.diff || r.message || 'done')
)
const filePath =
sub === 'restore' || sub === 'diff' ? rArgs.slice(1).join(' ').trim() : rArgs.join(' ').trim()
rpc(sub === 'diff' ? 'rollback.diff' : 'rollback.restore', {
session_id: sid,
hash,
...(sub === 'diff' || !filePath ? {} : { file_path: filePath })
}).then((r: any) => sys(r.rendered || r.diff || r.message || 'done'))
}
return true
@ -2003,6 +2185,9 @@ export function App({ gw }: { gw: GatewayClient }) {
? theme.color.warn
: theme.color.dim
const durationLabel = sid ? fmtDuration(clockNow - sessionStartedAt) : ''
const voiceLabel = voiceRecording ? 'REC' : voiceProcessing ? 'STT' : `voice ${voiceEnabled ? 'on' : 'off'}`
// ── Render ───────────────────────────────────────────────────────
return (
@ -2024,7 +2209,6 @@ export function App({ gw }: { gw: GatewayClient }) {
<ToolTrail
activity={busy ? activity : []}
animateCot={busy && !streaming}
padAfter={!!streaming}
t={theme}
tools={tools}
trail={turnTrail}
@ -2126,11 +2310,13 @@ export function App({ gw }: { gw: GatewayClient }) {
<StatusRule
bgCount={bgTasks.size}
cols={cols}
durationLabel={durationLabel}
model={info?.model?.split('/').pop() ?? ''}
status={status}
statusColor={statusColor}
t={theme}
usage={usage}
voiceLabel={voiceLabel}
/>
)}

View file

@ -39,8 +39,12 @@ export const MessageLine = memo(function MessageLine({
return <Text color={t.color.dim}>{msg.text}</Text>
}
if (msg.role !== 'user' && hasAnsi(msg.text)) {
return <Text wrap="wrap">{msg.text}</Text>
}
if (msg.role === 'assistant') {
return hasAnsi(msg.text) ? <Text wrap="wrap">{msg.text}</Text> : <Md compact={compact} t={t} text={msg.text} />
return <Md compact={compact} t={t} text={msg.text} />
}
if (msg.role === 'user' && msg.text.length > LONG_MSG && isPasteBackedText(msg.text)) {
@ -63,7 +67,11 @@ export const MessageLine = memo(function MessageLine({
})()
return (
<Box flexDirection="column" marginTop={msg.role === 'user' || msg.kind === 'slash' ? 1 : 0}>
<Box
flexDirection="column"
marginBottom={msg.role === 'user' ? 1 : 0}
marginTop={msg.role === 'user' || msg.kind === 'slash' ? 1 : 0}
>
{msg.thinking && (
<Text color={t.color.dim} dimColor wrap="truncate-end">
💭 {msg.thinking.replace(/\n/g, ' ').slice(0, 200)}

View file

@ -25,6 +25,12 @@ const activityGlyph = (item: ActivityItem) => (item.tone === 'error' ? '✗' : i
const TreeFork = ({ last }: { last: boolean }) => <Text dimColor>{last ? '└─ ' : '├─ '}</Text>
const fmtElapsed = (ms: number) => {
const sec = Math.max(0, ms) / 1000
return sec < 10 ? `${sec.toFixed(1)}s` : `${Math.round(sec)}s`
}
export function Spinner({ color, variant = 'think' }: { color: string; variant?: 'think' | 'tool' }) {
const [spin] = useState(() => {
const raw = spinners[pick(variant === 'tool' ? TOOL : THINK)]
@ -48,16 +54,26 @@ export const ToolTrail = memo(function ToolTrail({
tools = [],
trail = [],
activity = [],
animateCot = false,
padAfter = false
animateCot = false
}: {
t: Theme
tools?: ActiveTool[]
trail?: string[]
activity?: ActivityItem[]
animateCot?: boolean
padAfter?: boolean
}) {
const [now, setNow] = useState(() => Date.now())
useEffect(() => {
if (!tools.length) {
return
}
const id = setInterval(() => setNow(Date.now()), 200)
return () => clearInterval(id)
}, [tools.length])
if (!trail.length && !tools.length && !activity.length) {
return null
}
@ -70,7 +86,6 @@ export const ToolTrail = memo(function ToolTrail({
<>
{trail.map((line, i) => {
const lastInBlock = i === rowCount - 1
const suffix = padAfter && lastInBlock ? '\n' : ''
if (isToolTrailResultLine(line)) {
return (
@ -81,7 +96,6 @@ export const ToolTrail = memo(function ToolTrail({
>
<TreeFork last={lastInBlock} />
{line}
{suffix}
</Text>
)
}
@ -91,7 +105,6 @@ export const ToolTrail = memo(function ToolTrail({
<Text color={t.color.dim} key={`c-${i}`}>
<TreeFork last={lastInBlock} />
<Spinner color={t.color.amber} variant="think" /> {line}
{suffix}
</Text>
)
}
@ -100,34 +113,30 @@ export const ToolTrail = memo(function ToolTrail({
<Text color={t.color.dim} dimColor key={`c-${i}`}>
<TreeFork last={lastInBlock} />
{line}
{suffix}
</Text>
)
})}
{tools.map((tool, j) => {
const lastInBlock = trail.length + j === rowCount - 1
const suffix = padAfter && lastInBlock ? '\n' : ''
return (
<Text color={t.color.dim} key={tool.id}>
<TreeFork last={lastInBlock} />
<Spinner color={t.color.amber} variant="tool" /> {TOOL_VERBS[tool.name] ?? tool.name}
{tool.context ? `: ${tool.context}` : ''}
{suffix}
{tool.startedAt ? ` (${fmtElapsed(now - tool.startedAt)})` : ''}
</Text>
)
})}
{act.map((item, k) => {
const lastInBlock = trail.length + tools.length + k === rowCount - 1
const suffix = padAfter && lastInBlock ? '\n' : ''
return (
<Text color={tone(item, t)} dimColor={item.tone === 'info'} key={`a-${item.id}`}>
<TreeFork last={lastInBlock} />
{activityGlyph(item)} {item.text}
{suffix}
</Text>
)
})}

View file

@ -2,6 +2,7 @@ export interface ActiveTool {
id: string
name: string
context?: string
startedAt?: number
}
export interface ActivityItem {