diff --git a/tools/browser_camofox.py b/tools/browser_camofox.py index b8f332dd6..c2278f83e 100644 --- a/tools/browser_camofox.py +++ b/tools/browser_camofox.py @@ -522,7 +522,11 @@ def camofox_vision(question: str, annotate: bool = False, task="vision", timeout=_vision_timeout, ) - analysis = response.choices[0].message.content if response.choices else "" + analysis = (response.choices[0].message.content or "").strip() if response.choices else "" + + # Redact secrets the vision LLM may have read from the screenshot. + from agent.redact import redact_sensitive_text + analysis = redact_sensitive_text(analysis) return json.dumps({ "success": True, diff --git a/tools/browser_tool.py b/tools/browser_tool.py index 7523d5db5..04e869b0f 100644 --- a/tools/browser_tool.py +++ b/tools/browser_tool.py @@ -1048,7 +1048,9 @@ def _extract_relevant_content( if model: call_kwargs["model"] = model response = call_llm(**call_kwargs) - return (response.choices[0].message.content or "").strip() or _truncate_snapshot(snapshot_text) + extracted = (response.choices[0].message.content or "").strip() or _truncate_snapshot(snapshot_text) + # Redact any secrets the auxiliary LLM may have echoed back. + return redact_sensitive_text(extracted) except Exception: return _truncate_snapshot(snapshot_text) @@ -1740,6 +1742,9 @@ def browser_vision(question: str, annotate: bool = False, task_id: Optional[str] response = call_llm(**call_kwargs) analysis = (response.choices[0].message.content or "").strip() + # Redact secrets the vision LLM may have read from the screenshot. + from agent.redact import redact_sensitive_text + analysis = redact_sensitive_text(analysis) response_data = { "success": True, "analysis": analysis or "Vision analysis returned no content.",