fix(tui): preserve session when switching personality

Previously, /personality in the TUI called _reset_session_agent() which
destroyed the agent, cleared conversation history, and effectively started
a new session. This made personality switching disruptive — users lost
their entire conversation context.

Now /personality updates the agent's ephemeral_system_prompt in-place and
injects a pivot marker into the conversation history. The marker tells
the model to adopt the new persona from that point forward, which is
necessary because LLMs tend to pattern-match their prior responses and
continue the established tone without an explicit signal.

Changes:
- tui_gateway/server.py: Rewrite _apply_personality_to_session to update
  the agent in-place instead of resetting. Inject a user-role pivot
  marker so the model actually switches style mid-conversation.
- ui-tui/src/app/slash/commands/session.ts: Update help text (no longer
  mentions history reset).
- tests/test_tui_gateway_server.py: Update test to verify history is
  preserved, pivot marker is injected, and ephemeral prompt is set.
This commit is contained in:
Austin Pickett 2026-05-06 19:30:46 -04:00
parent c5b4c48165
commit 65c762b2e8
3 changed files with 55 additions and 24 deletions

View file

@ -1559,13 +1559,15 @@ def test_config_set_personality_rejects_unknown_name(monkeypatch):
assert "Unknown personality" in resp["error"]["message"]
def test_config_set_personality_resets_history_and_returns_info(monkeypatch):
def test_config_set_personality_preserves_history_and_returns_info(monkeypatch):
agent = types.SimpleNamespace(
ephemeral_system_prompt=None, _cached_system_prompt="old"
)
session = _session(
agent=types.SimpleNamespace(),
agent=agent,
history=[{"role": "user", "text": "hi"}],
history_version=4,
)
new_agent = types.SimpleNamespace(model="x")
emits = []
server._sessions["sid"] = session
@ -1574,13 +1576,9 @@ def test_config_set_personality_resets_history_and_returns_info(monkeypatch):
"_available_personalities",
lambda cfg=None: {"helpful": "You are helpful."},
)
monkeypatch.setattr(
server, "_make_agent", lambda sid, key, session_id=None: new_agent
)
monkeypatch.setattr(
server, "_session_info", lambda agent: {"model": getattr(agent, "model", "?")}
)
monkeypatch.setattr(server, "_restart_slash_worker", lambda session: None)
monkeypatch.setattr(server, "_emit", lambda *args: emits.append(args))
monkeypatch.setattr(server, "_write_config_key", lambda path, value: None)
@ -1592,11 +1590,19 @@ def test_config_set_personality_resets_history_and_returns_info(monkeypatch):
}
)
assert resp["result"]["history_reset"] is True
assert resp["result"]["info"] == {"model": "x"}
assert session["history"] == []
assert resp["result"]["history_reset"] is False
assert resp["result"]["info"] == {"model": "?"}
# History is preserved with a pivot marker appended
assert len(session["history"]) == 2
assert session["history"][0] == {"role": "user", "text": "hi"}
assert session["history"][1]["role"] == "user"
assert "personality" in session["history"][1]["content"].lower()
assert "You are helpful." in session["history"][1]["content"]
assert session["history_version"] == 5
assert ("session.info", "sid", {"model": "x"}) in emits
# Agent's system prompt was updated in-place; cached prompt untouched
assert agent.ephemeral_system_prompt == "You are helpful."
assert agent._cached_system_prompt == "old"
assert ("session.info", "sid", {"model": "?"}) in emits
def test_session_compress_uses_compress_helper(monkeypatch):

View file

@ -1680,21 +1680,46 @@ def _validate_personality(value: str, cfg: dict | None = None) -> tuple[str, str
def _apply_personality_to_session(
sid: str, session: dict, new_prompt: str
) -> tuple[bool, dict | None]:
"""Apply a personality change to an existing session without resetting history.
Updates the agent's ephemeral system prompt in-place so the new personality
takes effect on the next turn. The cached base system prompt is left intact
(ephemeral_system_prompt is appended at API-call time, not baked into the
cache), which preserves prompt-cache hits.
Also injects a system-role marker into the conversation history so the model
knows to pivot its style from this point forward (without this, LLMs tend to
continue the tone established by earlier messages in the transcript).
Returns (history_reset, info) history_reset is always False since we
preserve the conversation.
"""
if not session:
return False, None
try:
info = _reset_session_agent(sid, session)
return True, info
except Exception:
if session.get("agent"):
agent = session["agent"]
agent.ephemeral_system_prompt = new_prompt or None
agent._cached_system_prompt = None
info = _session_info(agent)
_emit("session.info", sid, info)
return False, info
return False, None
agent = session.get("agent")
if agent:
agent.ephemeral_system_prompt = new_prompt or None
# Inject a pivot marker into history so the model sees the change point.
# This prevents it from pattern-matching its prior style.
if new_prompt:
marker = (
"[System: The user has changed the assistant's personality. "
"From this point forward, adopt the following persona and respond "
f"accordingly: {new_prompt}]"
)
else:
marker = (
"[System: The user has cleared the personality overlay. "
"From this point forward, respond in your normal default style.]"
)
with session["history_lock"]:
session["history"].append({"role": "user", "content": marker})
session["history_version"] = int(session.get("history_version", 0)) + 1
info = _session_info(agent)
_emit("session.info", sid, info)
return False, info
return False, None
def _cfg_max_turns(cfg: dict, default: int) -> int:

View file

@ -109,7 +109,7 @@ export const sessionCommands: SlashCommand[] = [
},
{
help: 'switch or reset personality (history reset on set)',
help: 'switch personality for this session',
name: 'personality',
run: (arg, ctx) => {
if (!arg) {