From 6e46f99e7e8e4d5c843cd33afcb6547c2f54b54b Mon Sep 17 00:00:00 2001 From: Teknium <127238744+teknium1@users.noreply.github.com> Date: Thu, 7 May 2026 05:53:19 -0700 Subject: [PATCH] fix(tui): surface backend error as visible text when final_response is empty (#21245) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the provider rejects a request (e.g. invalid model slug like '--provider nous --model kimi-k2.6' where the valid slug is 'moonshotai/kimi-k2.6'), run_conversation() returns {failed: True, error: , final_response: None}. The TUI gateway and one-shot CLI mode both dropped the error on the floor and emitted an empty turn, so the user saw a blank response with no indication that anything went wrong. Mirror the interactive CLI's existing pattern (cli.py:9832): when final_response is empty AND (failed|partial) is set AND error is populated, surface 'Error: ' as the visible text. Leaves the None-with-no-error path and the '(empty)' sentinel path untouched — an empty successful turn still renders empty, and existing sentinel handlers keep owning their lane. Reported by @counterposition in PR #20873; taking a minimal fix rather than the broader structured-failure refactor proposed there. --- cli.py | 13 ++++- tests/test_tui_gateway_server.py | 94 ++++++++++++++++++++++++++++++++ tui_gateway/server.py | 12 ++++ 3 files changed, 118 insertions(+), 1 deletion(-) diff --git a/cli.py b/cli.py index 1f11594dcd..16b3bea072 100644 --- a/cli.py +++ b/cli.py @@ -12526,7 +12526,18 @@ def main( ): cli.session_id = cli.agent.session_id response = result.get("final_response", "") if isinstance(result, dict) else str(result) - if response: + # Surface backend errors that produced no visible output + # (e.g. invalid model slug → provider 4xx). Mirrors the + # interactive CLI path. Write to stderr so piped stdout + # stays clean for automation wrappers. + if ( + not response + and isinstance(result, dict) + and result.get("error") + and (result.get("failed") or result.get("partial")) + ): + print(f"Error: {result['error']}", file=sys.stderr) + elif response: print(response) # Session ID goes to stderr so piped stdout is clean. print(f"\nsession_id: {cli.session_id}", file=sys.stderr) diff --git a/tests/test_tui_gateway_server.py b/tests/test_tui_gateway_server.py index 184f5606a8..f7d70f92a9 100644 --- a/tests/test_tui_gateway_server.py +++ b/tests/test_tui_gateway_server.py @@ -3603,6 +3603,100 @@ def test_prompt_submit_skips_auto_title_when_response_empty(monkeypatch): mock_title.assert_not_called() +def test_prompt_submit_surfaces_backend_error_as_visible_text(monkeypatch): + """When the backend fails with no visible response (e.g. invalid model slug + → provider 4xx), the TUI must surface result['error'] as visible text + instead of emitting a blank message.complete turn.""" + + class _Agent: + def run_conversation( + self, prompt, conversation_history=None, stream_callback=None + ): + return { + "final_response": None, + "messages": [], + "api_calls": 0, + "completed": False, + "failed": True, + "error": "HTTP 400: invalid model id 'kimi-k2.6'", + } + + server._sessions["sid"] = _session(agent=_Agent()) + monkeypatch.setattr(server.threading, "Thread", _ImmediateThread) + + emitted: list[tuple[str, str, dict]] = [] + monkeypatch.setattr( + server, + "_emit", + lambda event, sid, payload=None: emitted.append((event, sid, payload or {})), + ) + monkeypatch.setattr(server, "make_stream_renderer", lambda cols: None) + monkeypatch.setattr(server, "render_message", lambda raw, cols: None) + monkeypatch.setattr(server, "_get_db", lambda: None) + + server.handle_request( + { + "id": "1", + "method": "prompt.submit", + "params": {"session_id": "sid", "text": "hello"}, + } + ) + + complete_events = [e for e in emitted if e[0] == "message.complete"] + assert complete_events, "expected message.complete to be emitted" + payload = complete_events[-1][2] + assert payload.get("status") == "error" + assert payload.get("text", "").startswith("Error:") + assert "kimi-k2.6" in payload.get("text", "") + + +def test_prompt_submit_preserves_empty_response_without_error(monkeypatch): + """An empty final_response with NO backend error must stay empty — do not + synthesize an error string. Preserves the existing None/empty-sentinel + semantics owned by downstream handlers.""" + + class _Agent: + def run_conversation( + self, prompt, conversation_history=None, stream_callback=None + ): + return { + "final_response": None, + "messages": [], + "api_calls": 1, + "completed": True, + } + + server._sessions["sid"] = _session(agent=_Agent()) + monkeypatch.setattr(server.threading, "Thread", _ImmediateThread) + + emitted: list[tuple[str, str, dict]] = [] + monkeypatch.setattr( + server, + "_emit", + lambda event, sid, payload=None: emitted.append((event, sid, payload or {})), + ) + monkeypatch.setattr(server, "make_stream_renderer", lambda cols: None) + monkeypatch.setattr(server, "render_message", lambda raw, cols: None) + monkeypatch.setattr(server, "_get_db", lambda: None) + + server.handle_request( + { + "id": "1", + "method": "prompt.submit", + "params": {"session_id": "sid", "text": "hello"}, + } + ) + + complete_events = [e for e in emitted if e[0] == "message.complete"] + assert complete_events, "expected message.complete to be emitted" + payload = complete_events[-1][2] + # Status stays "complete" because no error flag was set + assert payload.get("status") == "complete" + # Text stays empty — we did NOT fabricate an "Error:" string + text = payload.get("text", "") + assert text in ("", None), f"expected empty text, got {text!r}" + + # ── session.most_recent ────────────────────────────────────────────── diff --git a/tui_gateway/server.py b/tui_gateway/server.py index 4c36a561b1..ca378bb728 100644 --- a/tui_gateway/server.py +++ b/tui_gateway/server.py @@ -3137,6 +3137,18 @@ def _run_prompt_submit(rid, sid: str, session: dict, text: Any) -> None: if result.get("interrupted") else "error" if result.get("error") else "complete" ) + # When the backend produced no visible response AND reported a + # real error (e.g. invalid model slug → provider 4xx), surface + # that error as the visible text instead of shipping an empty + # turn to Ink. Mirrors classic CLI behavior at cli.py where + # (failed|partial) + no final_response → "Error: ". + # Leaves the None-with-no-error path untouched: an empty + # successful turn still renders as empty, and the existing + # "(empty)" sentinel handling stays in its own lane. + if (not raw) and result.get("error") and ( + result.get("failed") or result.get("partial") + ): + raw = f"Error: {result.get('error')}" lr = result.get("last_reasoning") if isinstance(lr, str) and lr.strip(): last_reasoning = lr.strip()