From c78a188ddd13878a68ff9a88df05b4474d1f6450 Mon Sep 17 00:00:00 2001 From: Teknium Date: Wed, 22 Apr 2026 18:33:49 -0700 Subject: [PATCH] refactor: invalidate transport cache when api_mode auto-upgrades to codex_responses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow-up for #13862 — the post-init api_mode upgrade at __init__ (direct OpenAI / gpt-5-requires-responses path) runs AFTER the eager transport warm. Clear the cache so the stale chat_completions entry is evicted. Cosmetic: correctness was already fine since _get_transport() keys by current api_mode, but this avoids leaving unused cache state behind. --- run_agent.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/run_agent.py b/run_agent.py index a73b41228..eaafac5b4 100644 --- a/run_agent.py +++ b/run_agent.py @@ -913,6 +913,10 @@ class AIAgent: ) ): self.api_mode = "codex_responses" + # Invalidate the eager-warmed transport cache — api_mode changed + # from chat_completions to codex_responses after the warm at __init__. + if hasattr(self, "_transport_cache"): + self._transport_cache.clear() # Pre-warm OpenRouter model metadata cache in a background thread. # fetch_model_metadata() is cached for 1 hour; this avoids a blocking