feat: add Xiaomi MiMo v2.5-pro and v2.5 model support (#14635)

## Merged

Adds MiMo v2.5-pro and v2.5 support to Xiaomi native provider, OpenCode Go, and setup wizard.

### Changes
- Context lengths: added v2.5-pro (1M) and v2.5 (1M), corrected existing MiMo entries to exact values (262144)
- Provider lists: xiaomi, opencode-go, setup wizard
- Vision: upgraded from mimo-v2-omni to mimo-v2.5 (omnimodal)
- Config description updated for XIAOMI_API_KEY
- Tests updated for new vision model preference

### Verification
- 4322 tests passed, 0 new regressions
- Live API tested on Xiaomi portal: basic, reasoning, tool calling, multi-tool, file ops, system prompt, vision — all pass
- Self-review found and fixed 2 issues (redundant vision check, stale HuggingFace context length)
This commit is contained in:
kshitij 2026-04-23 10:06:25 -07:00 committed by GitHub
parent ce089169d5
commit 82a0ed1afb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 20 additions and 16 deletions

View file

@ -151,7 +151,7 @@ _API_KEY_PROVIDER_AUX_MODELS: Dict[str, str] = {
# differs from their main chat model, map it here. The vision auto-detect # differs from their main chat model, map it here. The vision auto-detect
# "exotic provider" branch checks this before falling back to the main model. # "exotic provider" branch checks this before falling back to the main model.
_PROVIDER_VISION_MODELS: Dict[str, str] = { _PROVIDER_VISION_MODELS: Dict[str, str] = {
"xiaomi": "mimo-v2-omni", "xiaomi": "mimo-v2.5",
"zai": "glm-5v-turbo", "zai": "glm-5v-turbo",
} }

View file

@ -183,12 +183,12 @@ DEFAULT_CONTEXT_LENGTHS = {
"moonshotai/Kimi-K2.6": 262144, "moonshotai/Kimi-K2.6": 262144,
"moonshotai/Kimi-K2-Thinking": 262144, "moonshotai/Kimi-K2-Thinking": 262144,
"MiniMaxAI/MiniMax-M2.5": 204800, "MiniMaxAI/MiniMax-M2.5": 204800,
"XiaomiMiMo/MiMo-V2-Flash": 256000, "XiaomiMiMo/MiMo-V2-Flash": 262144,
"mimo-v2-pro": 1000000, "mimo-v2-pro": 1048576,
"mimo-v2-omni": 256000, "mimo-v2.5-pro": 1048576,
"mimo-v2-flash": 256000, "mimo-v2.5": 1048576,
"mimo-v2.5-pro": 1000000, "mimo-v2-omni": 262144,
"mimo-v2.5": 1000000, "mimo-v2-flash": 262144,
"zai-org/GLM-5": 202752, "zai-org/GLM-5": 202752,
} }

View file

@ -1291,7 +1291,7 @@ OPTIONAL_ENV_VARS = {
"advanced": True, "advanced": True,
}, },
"XIAOMI_API_KEY": { "XIAOMI_API_KEY": {
"description": "Xiaomi MiMo API key for MiMo models (mimo-v2-pro, mimo-v2-omni, mimo-v2-flash)", "description": "Xiaomi MiMo API key for MiMo models (mimo-v2.5-pro, mimo-v2.5, mimo-v2-pro, mimo-v2-omni, mimo-v2-flash)",
"prompt": "Xiaomi MiMo API Key", "prompt": "Xiaomi MiMo API Key",
"url": "https://platform.xiaomimimo.com", "url": "https://platform.xiaomimimo.com",
"password": True, "password": True,

View file

@ -250,6 +250,8 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"deepseek-reasoner", "deepseek-reasoner",
], ],
"xiaomi": [ "xiaomi": [
"mimo-v2.5-pro",
"mimo-v2.5",
"mimo-v2-pro", "mimo-v2-pro",
"mimo-v2-omni", "mimo-v2-omni",
"mimo-v2-flash", "mimo-v2-flash",
@ -301,6 +303,8 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"kimi-k2.5", "kimi-k2.5",
"glm-5.1", "glm-5.1",
"glm-5", "glm-5",
"mimo-v2.5-pro",
"mimo-v2.5",
"mimo-v2-pro", "mimo-v2-pro",
"mimo-v2-omni", "mimo-v2-omni",
"minimax-m2.7", "minimax-m2.7",
@ -692,7 +696,7 @@ CANONICAL_PROVIDERS: list[ProviderEntry] = [
ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway (200+ models, $5 free credit, no markup)"), ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway (200+ models, $5 free credit, no markup)"),
ProviderEntry("anthropic", "Anthropic", "Anthropic (Claude models — API key or Claude Code)"), ProviderEntry("anthropic", "Anthropic", "Anthropic (Claude models — API key or Claude Code)"),
ProviderEntry("openai-codex", "OpenAI Codex", "OpenAI Codex"), ProviderEntry("openai-codex", "OpenAI Codex", "OpenAI Codex"),
ProviderEntry("xiaomi", "Xiaomi MiMo", "Xiaomi MiMo (MiMo-V2 models — pro, omni, flash)"), ProviderEntry("xiaomi", "Xiaomi MiMo", "Xiaomi MiMo (MiMo-V2.5 and V2 models — pro, omni, flash)"),
ProviderEntry("nvidia", "NVIDIA NIM", "NVIDIA NIM (Nemotron models — build.nvidia.com or local NIM)"), ProviderEntry("nvidia", "NVIDIA NIM", "NVIDIA NIM (Nemotron models — build.nvidia.com or local NIM)"),
ProviderEntry("qwen-oauth", "Qwen OAuth (Portal)", "Qwen OAuth (reuses local Qwen CLI login)"), ProviderEntry("qwen-oauth", "Qwen OAuth (Portal)", "Qwen OAuth (reuses local Qwen CLI login)"),
ProviderEntry("copilot", "GitHub Copilot", "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"), ProviderEntry("copilot", "GitHub Copilot", "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"),

View file

@ -103,7 +103,7 @@ _DEFAULT_PROVIDER_MODELS = {
"ai-gateway": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5", "google/gemini-3-flash"], "ai-gateway": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5", "google/gemini-3-flash"],
"kilocode": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5.4", "google/gemini-3-pro-preview", "google/gemini-3-flash-preview"], "kilocode": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5.4", "google/gemini-3-pro-preview", "google/gemini-3-flash-preview"],
"opencode-zen": ["gpt-5.4", "gpt-5.3-codex", "claude-sonnet-4-6", "gemini-3-flash", "glm-5", "kimi-k2.5", "minimax-m2.7"], "opencode-zen": ["gpt-5.4", "gpt-5.3-codex", "claude-sonnet-4-6", "gemini-3-flash", "glm-5", "kimi-k2.5", "minimax-m2.7"],
"opencode-go": ["kimi-k2.6", "kimi-k2.5", "glm-5.1", "glm-5", "mimo-v2-pro", "mimo-v2-omni", "minimax-m2.5", "minimax-m2.7", "qwen3.6-plus", "qwen3.5-plus"], "opencode-go": ["kimi-k2.6", "kimi-k2.5", "glm-5.1", "glm-5", "mimo-v2.5-pro", "mimo-v2.5", "mimo-v2-pro", "mimo-v2-omni", "minimax-m2.7", "minimax-m2.5", "qwen3.6-plus", "qwen3.5-plus"],
"huggingface": [ "huggingface": [
"Qwen/Qwen3.5-397B-A17B", "Qwen/Qwen3-235B-A22B-Thinking-2507", "Qwen/Qwen3.5-397B-A17B", "Qwen/Qwen3-235B-A22B-Thinking-2507",
"Qwen/Qwen3-Coder-480B-A35B-Instruct", "deepseek-ai/DeepSeek-R1-0528", "Qwen/Qwen3-Coder-480B-A35B-Instruct", "deepseek-ai/DeepSeek-R1-0528",

View file

@ -245,7 +245,7 @@ class TestResolveVisionMainFirst:
assert model == "xiaomi/mimo-v2-omni" assert model == "xiaomi/mimo-v2-omni"
def test_exotic_provider_with_vision_override_preserved(self): def test_exotic_provider_with_vision_override_preserved(self):
"""xiaomi → mimo-v2-omni override still wins over main_model.""" """xiaomi → mimo-v2.5 override still wins over main_model."""
with patch( with patch(
"agent.auxiliary_client._read_main_provider", return_value="xiaomi", "agent.auxiliary_client._read_main_provider", return_value="xiaomi",
), patch( ), patch(
@ -257,15 +257,15 @@ class TestResolveVisionMainFirst:
"agent.auxiliary_client._resolve_task_provider_model", "agent.auxiliary_client._resolve_task_provider_model",
return_value=("auto", None, None, None, None), return_value=("auto", None, None, None, None),
): ):
mock_resolve.return_value = (MagicMock(), "mimo-v2-omni") mock_resolve.return_value = (MagicMock(), "mimo-v2.5")
from agent.auxiliary_client import resolve_vision_provider_client from agent.auxiliary_client import resolve_vision_provider_client
provider, client, model = resolve_vision_provider_client() provider, client, model = resolve_vision_provider_client()
assert provider == "xiaomi" assert provider == "xiaomi"
# Should use mimo-v2-omni (vision override), not mimo-v2-pro (text main) # Should use mimo-v2.5 (vision override), not mimo-v2-pro (text main)
assert mock_resolve.call_args.args[1] == "mimo-v2-omni" assert mock_resolve.call_args.args[1] == "mimo-v2.5"
def test_main_unavailable_vision_falls_through_to_aggregators(self): def test_main_unavailable_vision_falls_through_to_aggregators(self):
"""Main provider fails → fall back to OpenRouter/Nous strict backends.""" """Main provider fails → fall back to OpenRouter/Nous strict backends."""

View file

@ -287,10 +287,10 @@ class TestXiaomiAuxiliary:
assert "xiaomi" not in _API_KEY_PROVIDER_AUX_MODELS assert "xiaomi" not in _API_KEY_PROVIDER_AUX_MODELS
def test_vision_model_override(self): def test_vision_model_override(self):
"""Xiaomi vision tasks should use mimo-v2-omni (multimodal), not the main model.""" """Xiaomi vision tasks should use mimo-v2.5 (multimodal), not the main model."""
from agent.auxiliary_client import _PROVIDER_VISION_MODELS from agent.auxiliary_client import _PROVIDER_VISION_MODELS
assert "xiaomi" in _PROVIDER_VISION_MODELS assert "xiaomi" in _PROVIDER_VISION_MODELS
assert _PROVIDER_VISION_MODELS["xiaomi"] == "mimo-v2-omni" assert _PROVIDER_VISION_MODELS["xiaomi"] == "mimo-v2.5"
# ============================================================================= # =============================================================================