fix(tui): strip <think>…</think> tags from assistant content and route to reasoning panel

Models that emit reasoning inline as <think>/<reasoning>/<thinking>/<thought>/
<REASONING_SCRATCHPAD> tags in the content field (rather than a separate API
reasoning channel) had the raw tags + inner content shown twice: once as body
text with literal <think> markers, and again in the thinking panel when the
reasoning field was populated.

Port v1's tag set to lib/reasoning.ts with a splitReasoning(text) helper that
returns { reasoning, text }. Applied in three spots:

  - scheduleStreaming: strips tags from the live streaming view so the user
    never sees <think> mid-turn.
  - flushStreamingSegment: when a tool interrupts assistant output mid-turn,
    the saved segment is the stripped text; extracted reasoning promotes to
    reasoningText if the API channel hasn't already populated it.
  - recordMessageComplete: final message text is split, extracted reasoning
    merges with any existing reasoning (API channel wins on conflicts so we
    don't double-count when both are present).
This commit is contained in:
Brooklyn Nicholson 2026-04-18 14:46:38 -05:00
parent 37cba82bfc
commit 4caf6c23dd
3 changed files with 127 additions and 8 deletions

View file

@ -1,5 +1,6 @@
import { REASONING_PULSE_MS, STREAM_BATCH_MS } from '../config/timing.js'
import type { SessionInterruptResponse, SubagentEventPayload } from '../gatewayTypes.js'
import { hasReasoningTag, splitReasoning } from '../lib/reasoning.js'
import {
buildToolTrailLine,
estimateTokensRough,
@ -121,18 +122,31 @@ class TurnController {
}
flushStreamingSegment() {
const text = this.bufRef.trimStart()
const raw = this.bufRef.trimStart()
if (!text) {
if (!raw) {
return
}
const tools = this.pendingSegmentTools
const split = hasReasoningTag(raw) ? splitReasoning(raw) : { reasoning: '', text: raw }
if (split.reasoning && !this.reasoningText.trim()) {
this.reasoningText = split.reasoning
patchTurnState({ reasoning: this.reasoningText, reasoningTokens: estimateTokensRough(this.reasoningText) })
}
const text = split.text
this.streamTimer = clear(this.streamTimer)
this.segmentMessages = [...this.segmentMessages, { role: 'assistant', text, ...(tools.length && { tools }) }]
if (text) {
const tools = this.pendingSegmentTools
this.segmentMessages = [...this.segmentMessages, { role: 'assistant', text, ...(tools.length && { tools }) }]
this.pendingSegmentTools = []
}
this.bufRef = ''
this.pendingSegmentTools = []
patchTurnState({ streamPendingTools: [], streamSegments: this.segmentMessages, streaming: '' })
}
@ -187,8 +201,11 @@ class TurnController {
}
recordMessageComplete(payload: { rendered?: string; reasoning?: string; text?: string }) {
const finalText = (payload.rendered ?? payload.text ?? this.bufRef).trimStart()
const savedReasoning = this.reasoningText.trim() || String(payload.reasoning ?? '').trim()
const rawText = (payload.rendered ?? payload.text ?? this.bufRef).trimStart()
const split = splitReasoning(rawText)
const finalText = split.text
const existingReasoning = this.reasoningText.trim() || String(payload.reasoning ?? '').trim()
const savedReasoning = [existingReasoning, existingReasoning ? '' : split.reasoning].filter(Boolean).join('\n\n')
const savedReasoningTokens = savedReasoning ? estimateTokensRough(savedReasoning) : 0
const savedToolTokens = this.toolTokenAcc
const tools = this.pendingSegmentTools
@ -355,7 +372,9 @@ class TurnController {
this.streamTimer = setTimeout(() => {
this.streamTimer = null
patchTurnState({ streaming: this.bufRef.trimStart() })
const raw = this.bufRef.trimStart()
const visible = hasReasoningTag(raw) ? splitReasoning(raw).text : raw
patchTurnState({ streaming: visible })
}, STREAM_BATCH_MS)
}