mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-14 04:02:26 +00:00
fix(tui): preserve session when switching personality
Previously, /personality in the TUI called _reset_session_agent() which destroyed the agent, cleared conversation history, and effectively started a new session. This made personality switching disruptive — users lost their entire conversation context. Now /personality updates the agent's ephemeral_system_prompt in-place and injects a pivot marker into the conversation history. The marker tells the model to adopt the new persona from that point forward, which is necessary because LLMs tend to pattern-match their prior responses and continue the established tone without an explicit signal. Changes: - tui_gateway/server.py: Rewrite _apply_personality_to_session to update the agent in-place instead of resetting. Inject a user-role pivot marker so the model actually switches style mid-conversation. - ui-tui/src/app/slash/commands/session.ts: Update help text (no longer mentions history reset). - tests/test_tui_gateway_server.py: Update test to verify history is preserved, pivot marker is injected, and ephemeral prompt is set.
This commit is contained in:
parent
c5b4c48165
commit
65c762b2e8
3 changed files with 55 additions and 24 deletions
|
|
@ -1559,13 +1559,15 @@ def test_config_set_personality_rejects_unknown_name(monkeypatch):
|
||||||
assert "Unknown personality" in resp["error"]["message"]
|
assert "Unknown personality" in resp["error"]["message"]
|
||||||
|
|
||||||
|
|
||||||
def test_config_set_personality_resets_history_and_returns_info(monkeypatch):
|
def test_config_set_personality_preserves_history_and_returns_info(monkeypatch):
|
||||||
|
agent = types.SimpleNamespace(
|
||||||
|
ephemeral_system_prompt=None, _cached_system_prompt="old"
|
||||||
|
)
|
||||||
session = _session(
|
session = _session(
|
||||||
agent=types.SimpleNamespace(),
|
agent=agent,
|
||||||
history=[{"role": "user", "text": "hi"}],
|
history=[{"role": "user", "text": "hi"}],
|
||||||
history_version=4,
|
history_version=4,
|
||||||
)
|
)
|
||||||
new_agent = types.SimpleNamespace(model="x")
|
|
||||||
emits = []
|
emits = []
|
||||||
|
|
||||||
server._sessions["sid"] = session
|
server._sessions["sid"] = session
|
||||||
|
|
@ -1574,13 +1576,9 @@ def test_config_set_personality_resets_history_and_returns_info(monkeypatch):
|
||||||
"_available_personalities",
|
"_available_personalities",
|
||||||
lambda cfg=None: {"helpful": "You are helpful."},
|
lambda cfg=None: {"helpful": "You are helpful."},
|
||||||
)
|
)
|
||||||
monkeypatch.setattr(
|
|
||||||
server, "_make_agent", lambda sid, key, session_id=None: new_agent
|
|
||||||
)
|
|
||||||
monkeypatch.setattr(
|
monkeypatch.setattr(
|
||||||
server, "_session_info", lambda agent: {"model": getattr(agent, "model", "?")}
|
server, "_session_info", lambda agent: {"model": getattr(agent, "model", "?")}
|
||||||
)
|
)
|
||||||
monkeypatch.setattr(server, "_restart_slash_worker", lambda session: None)
|
|
||||||
monkeypatch.setattr(server, "_emit", lambda *args: emits.append(args))
|
monkeypatch.setattr(server, "_emit", lambda *args: emits.append(args))
|
||||||
monkeypatch.setattr(server, "_write_config_key", lambda path, value: None)
|
monkeypatch.setattr(server, "_write_config_key", lambda path, value: None)
|
||||||
|
|
||||||
|
|
@ -1592,11 +1590,19 @@ def test_config_set_personality_resets_history_and_returns_info(monkeypatch):
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
assert resp["result"]["history_reset"] is True
|
assert resp["result"]["history_reset"] is False
|
||||||
assert resp["result"]["info"] == {"model": "x"}
|
assert resp["result"]["info"] == {"model": "?"}
|
||||||
assert session["history"] == []
|
# History is preserved with a pivot marker appended
|
||||||
|
assert len(session["history"]) == 2
|
||||||
|
assert session["history"][0] == {"role": "user", "text": "hi"}
|
||||||
|
assert session["history"][1]["role"] == "user"
|
||||||
|
assert "personality" in session["history"][1]["content"].lower()
|
||||||
|
assert "You are helpful." in session["history"][1]["content"]
|
||||||
assert session["history_version"] == 5
|
assert session["history_version"] == 5
|
||||||
assert ("session.info", "sid", {"model": "x"}) in emits
|
# Agent's system prompt was updated in-place; cached prompt untouched
|
||||||
|
assert agent.ephemeral_system_prompt == "You are helpful."
|
||||||
|
assert agent._cached_system_prompt == "old"
|
||||||
|
assert ("session.info", "sid", {"model": "?"}) in emits
|
||||||
|
|
||||||
|
|
||||||
def test_session_compress_uses_compress_helper(monkeypatch):
|
def test_session_compress_uses_compress_helper(monkeypatch):
|
||||||
|
|
|
||||||
|
|
@ -1680,21 +1680,46 @@ def _validate_personality(value: str, cfg: dict | None = None) -> tuple[str, str
|
||||||
def _apply_personality_to_session(
|
def _apply_personality_to_session(
|
||||||
sid: str, session: dict, new_prompt: str
|
sid: str, session: dict, new_prompt: str
|
||||||
) -> tuple[bool, dict | None]:
|
) -> tuple[bool, dict | None]:
|
||||||
|
"""Apply a personality change to an existing session without resetting history.
|
||||||
|
|
||||||
|
Updates the agent's ephemeral system prompt in-place so the new personality
|
||||||
|
takes effect on the next turn. The cached base system prompt is left intact
|
||||||
|
(ephemeral_system_prompt is appended at API-call time, not baked into the
|
||||||
|
cache), which preserves prompt-cache hits.
|
||||||
|
|
||||||
|
Also injects a system-role marker into the conversation history so the model
|
||||||
|
knows to pivot its style from this point forward (without this, LLMs tend to
|
||||||
|
continue the tone established by earlier messages in the transcript).
|
||||||
|
|
||||||
|
Returns (history_reset, info) — history_reset is always False since we
|
||||||
|
preserve the conversation.
|
||||||
|
"""
|
||||||
if not session:
|
if not session:
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
try:
|
agent = session.get("agent")
|
||||||
info = _reset_session_agent(sid, session)
|
if agent:
|
||||||
return True, info
|
agent.ephemeral_system_prompt = new_prompt or None
|
||||||
except Exception:
|
# Inject a pivot marker into history so the model sees the change point.
|
||||||
if session.get("agent"):
|
# This prevents it from pattern-matching its prior style.
|
||||||
agent = session["agent"]
|
if new_prompt:
|
||||||
agent.ephemeral_system_prompt = new_prompt or None
|
marker = (
|
||||||
agent._cached_system_prompt = None
|
"[System: The user has changed the assistant's personality. "
|
||||||
info = _session_info(agent)
|
"From this point forward, adopt the following persona and respond "
|
||||||
_emit("session.info", sid, info)
|
f"accordingly: {new_prompt}]"
|
||||||
return False, info
|
)
|
||||||
return False, None
|
else:
|
||||||
|
marker = (
|
||||||
|
"[System: The user has cleared the personality overlay. "
|
||||||
|
"From this point forward, respond in your normal default style.]"
|
||||||
|
)
|
||||||
|
with session["history_lock"]:
|
||||||
|
session["history"].append({"role": "user", "content": marker})
|
||||||
|
session["history_version"] = int(session.get("history_version", 0)) + 1
|
||||||
|
info = _session_info(agent)
|
||||||
|
_emit("session.info", sid, info)
|
||||||
|
return False, info
|
||||||
|
return False, None
|
||||||
|
|
||||||
|
|
||||||
def _cfg_max_turns(cfg: dict, default: int) -> int:
|
def _cfg_max_turns(cfg: dict, default: int) -> int:
|
||||||
|
|
|
||||||
|
|
@ -109,7 +109,7 @@ export const sessionCommands: SlashCommand[] = [
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
help: 'switch or reset personality (history reset on set)',
|
help: 'switch personality for this session',
|
||||||
name: 'personality',
|
name: 'personality',
|
||||||
run: (arg, ctx) => {
|
run: (arg, ctx) => {
|
||||||
if (!arg) {
|
if (!arg) {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue