diff --git a/tests/test_tui_gateway_server.py b/tests/test_tui_gateway_server.py index 9e5bbc516f..64a154bb9a 100644 --- a/tests/test_tui_gateway_server.py +++ b/tests/test_tui_gateway_server.py @@ -1863,13 +1863,15 @@ def test_config_set_personality_rejects_unknown_name(monkeypatch): assert "Unknown personality" in resp["error"]["message"] -def test_config_set_personality_resets_history_and_returns_info(monkeypatch): +def test_config_set_personality_preserves_history_and_returns_info(monkeypatch): + agent = types.SimpleNamespace( + ephemeral_system_prompt=None, _cached_system_prompt="old" + ) session = _session( - agent=types.SimpleNamespace(), + agent=agent, history=[{"role": "user", "text": "hi"}], history_version=4, ) - new_agent = types.SimpleNamespace(model="x") emits = [] server._sessions["sid"] = session @@ -1878,13 +1880,9 @@ def test_config_set_personality_resets_history_and_returns_info(monkeypatch): "_available_personalities", lambda cfg=None: {"helpful": "You are helpful."}, ) - monkeypatch.setattr( - server, "_make_agent", lambda sid, key, session_id=None: new_agent - ) monkeypatch.setattr( server, "_session_info", lambda agent: {"model": getattr(agent, "model", "?")} ) - monkeypatch.setattr(server, "_restart_slash_worker", lambda session: None) monkeypatch.setattr(server, "_emit", lambda *args: emits.append(args)) monkeypatch.setattr(server, "_write_config_key", lambda path, value: None) @@ -1896,11 +1894,19 @@ def test_config_set_personality_resets_history_and_returns_info(monkeypatch): } ) - assert resp["result"]["history_reset"] is True - assert resp["result"]["info"] == {"model": "x"} - assert session["history"] == [] + assert resp["result"]["history_reset"] is False + assert resp["result"]["info"] == {"model": "?"} + # History is preserved with a pivot marker appended + assert len(session["history"]) == 2 + assert session["history"][0] == {"role": "user", "text": "hi"} + assert session["history"][1]["role"] == "user" + assert "personality" in session["history"][1]["content"].lower() + assert "You are helpful." in session["history"][1]["content"] assert session["history_version"] == 5 - assert ("session.info", "sid", {"model": "x"}) in emits + # Agent's system prompt was updated in-place; cached prompt untouched + assert agent.ephemeral_system_prompt == "You are helpful." + assert agent._cached_system_prompt == "old" + assert ("session.info", "sid", {"model": "?"}) in emits def test_session_compress_uses_compress_helper(monkeypatch): diff --git a/tui_gateway/server.py b/tui_gateway/server.py index 7219b811e4..fd656118ee 100644 --- a/tui_gateway/server.py +++ b/tui_gateway/server.py @@ -1726,21 +1726,46 @@ def _validate_personality(value: str, cfg: dict | None = None) -> tuple[str, str def _apply_personality_to_session( sid: str, session: dict, new_prompt: str ) -> tuple[bool, dict | None]: + """Apply a personality change to an existing session without resetting history. + + Updates the agent's ephemeral system prompt in-place so the new personality + takes effect on the next turn. The cached base system prompt is left intact + (ephemeral_system_prompt is appended at API-call time, not baked into the + cache), which preserves prompt-cache hits. + + Also injects a system-role marker into the conversation history so the model + knows to pivot its style from this point forward (without this, LLMs tend to + continue the tone established by earlier messages in the transcript). + + Returns (history_reset, info) — history_reset is always False since we + preserve the conversation. + """ if not session: return False, None - try: - info = _reset_session_agent(sid, session) - return True, info - except Exception: - if session.get("agent"): - agent = session["agent"] - agent.ephemeral_system_prompt = new_prompt or None - agent._cached_system_prompt = None - info = _session_info(agent) - _emit("session.info", sid, info) - return False, info - return False, None + agent = session.get("agent") + if agent: + agent.ephemeral_system_prompt = new_prompt or None + # Inject a pivot marker into history so the model sees the change point. + # This prevents it from pattern-matching its prior style. + if new_prompt: + marker = ( + "[System: The user has changed the assistant's personality. " + "From this point forward, adopt the following persona and respond " + f"accordingly: {new_prompt}]" + ) + else: + marker = ( + "[System: The user has cleared the personality overlay. " + "From this point forward, respond in your normal default style.]" + ) + with session["history_lock"]: + session["history"].append({"role": "user", "content": marker}) + session["history_version"] = int(session.get("history_version", 0)) + 1 + info = _session_info(agent) + _emit("session.info", sid, info) + return False, info + return False, None def _cfg_max_turns(cfg: dict, default: int) -> int: diff --git a/ui-tui/src/app/slash/commands/session.ts b/ui-tui/src/app/slash/commands/session.ts index ce9315ddb4..466505d8ce 100644 --- a/ui-tui/src/app/slash/commands/session.ts +++ b/ui-tui/src/app/slash/commands/session.ts @@ -122,7 +122,7 @@ export const sessionCommands: SlashCommand[] = [ }, { - help: 'switch or reset personality (history reset on set)', + help: 'switch personality for this session', name: 'personality', run: (arg, ctx) => { if (!arg) {