fix: guard aux LLM calls against None content + reasoning fallback + retry (salvage #3389) (#3449)

Salvage of #3389 by @binhnt92 with reasoning fallback and retry logic added on top.

All 7 auxiliary LLM call sites now use extract_content_or_reasoning() which mirrors the main agent loop's behavior: extract content, strip think blocks, fall back to structured reasoning fields, retry on empty.

Closes #3389.
This commit is contained in:
Teknium 2026-03-27 15:28:19 -07:00 committed by GitHub
parent ab09f6b568
commit 658692799d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 414 additions and 14 deletions

View file

@ -948,9 +948,9 @@ def llm_audit_skill(skill_path: Path, static_result: ScanResult,
# Call the LLM via the centralized provider router
try:
from agent.auxiliary_client import call_llm
from agent.auxiliary_client import call_llm, extract_content_or_reasoning
response = call_llm(
call_kwargs = dict(
provider="openrouter",
model=model,
messages=[{
@ -960,7 +960,13 @@ def llm_audit_skill(skill_path: Path, static_result: ScanResult,
temperature=0,
max_tokens=1000,
)
llm_text = response.choices[0].message.content.strip()
response = call_llm(**call_kwargs)
llm_text = extract_content_or_reasoning(response)
# Retry once on empty content (reasoning-only response)
if not llm_text:
response = call_llm(**call_kwargs)
llm_text = extract_content_or_reasoning(response)
except Exception:
# LLM audit is best-effort — don't block install if the call fails
return static_result