fix(analytics): prevent silent token loss and add Claude 4.5–4.7 pricing (#21455)

- Add pricing entries for Claude Opus 4.5/4.6/4.7, Sonnet 4.5/4.6, and
  Haiku 4.5 with updated source URLs (platform.claude.com)
- Add _normalize_anthropic_model_name() to handle dot-notation variants
  (e.g. claude-opus-4.7 → claude-opus-4-7) for pricing lookups
- Fix silent token loss: ensure session row exists before UPDATE in both
  run_agent.py and hermes_state.py (INSERT OR IGNORE is idempotent)
- Log token persistence failures at DEBUG level instead of swallowing
  them silently — makes undercounted analytics diagnosable
- Surface reasoning tokens in CLI /usage and TUI usage panel
- Add 'reasoning' and 'cost_status' fields to TUI Usage type
This commit is contained in:
Austin Pickett 2026-05-07 16:24:31 -04:00 committed by GitHub
parent cff821e2dc
commit d87c7b99e2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 186 additions and 16 deletions

View file

@ -12131,6 +12131,14 @@ class AIAgent:
# deltas instead of double-counting them.
if self._session_db and self.session_id:
try:
# Ensure the session row exists before attempting UPDATE.
# Under concurrent load (cron/kanban), the initial
# _ensure_db_session() may have failed due to SQLite
# locking. Retry here so per-call token deltas are
# not silently lost (UPDATE on a non-existent row
# affects 0 rows without error).
if not self._session_db_created:
self._ensure_db_session()
self._session_db.update_token_counts(
self.session_id,
input_tokens=canonical_usage.input_tokens,
@ -12149,8 +12157,14 @@ class AIAgent:
model=self.model,
api_call_count=1,
)
except Exception:
pass # never block the agent loop
except Exception as e:
# Log token persistence failures so they're
# visible in agent.log — silent loss here is
# the root cause of undercounted analytics.
logger.debug(
"Token persistence failed (session=%s, tokens=%d): %s",
self.session_id, total_tokens, e,
)
if self.verbose_logging:
logging.debug(f"Token usage: prompt={usage_dict['prompt_tokens']:,}, completion={usage_dict['completion_tokens']:,}, total={usage_dict['total_tokens']:,}")