feat(models): add deepseek/deepseek-v4-pro to OpenRouter + Nous Portal curated lists (#20495)

Endpoint re-tested over 6 conversational turns (9 API calls, 3 tool calls)
and an 8-request burst — no rate limits, no errors, ~2-3s latency. The
historical rate-limit issues that caused its removal are gone.

- hermes_cli/models.py: add to OPENROUTER_MODELS and _PROVIDER_MODELS['nous']
- website/static/api/model-catalog.json: regenerated via build_model_catalog.py
This commit is contained in:
Teknium 2026-05-05 19:11:58 -07:00 committed by GitHub
parent e598e18529
commit 477e4a2fe6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 15 additions and 2 deletions

View file

@ -67,6 +67,7 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [
("arcee-ai/trinity-large-thinking", ""),
("openai/gpt-5.5-pro", ""),
("openai/gpt-5.4-nano", ""),
("deepseek/deepseek-v4-pro", ""),
]
_openrouter_catalog_cache: list[tuple[str, str]] | None = None
@ -185,6 +186,7 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"arcee-ai/trinity-large-thinking",
"openai/gpt-5.5-pro",
"openai/gpt-5.4-nano",
"deepseek/deepseek-v4-pro",
],
# Native OpenAI Chat Completions (api.openai.com). Used by /model counts and
# provider_model_ids fallback when /v1/models is unavailable.