mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-12 03:42:08 +00:00
Introduces providers/ package — single source of truth for every inference provider. Adding a simple api-key provider now requires one providers/<name>.py file with zero edits anywhere else. What this PR ships: - providers/ package (ProviderProfile ABC + 33 profiles across 4 api_modes) - ProviderProfile declarative fields: name, api_mode, aliases, display_name, env_vars, base_url, models_url, auth_type, fallback_models, hostname, default_headers, fixed_temperature, default_max_tokens, default_aux_model - 4 overridable hooks: prepare_messages, build_extra_body, build_api_kwargs_extras, fetch_models - chat_completions.build_kwargs: profile path via _build_kwargs_from_profile, legacy flag path retained for lmstudio/tencent-tokenhub (which have session-aware reasoning probing that doesn't map cleanly to hooks yet) - run_agent.py: profile path for all registered providers; legacy path variable scoping fixed (all flags defined before branching) - Auto-wires: auth.PROVIDER_REGISTRY, models.CANONICAL_PROVIDERS, doctor health checks, config.OPTIONAL_ENV_VARS, model_metadata._URL_TO_PROVIDER - GeminiProfile: thinking_config translation (native + openai-compat nested) - New tests/providers/ (79 tests covering profile declarations, transport parity, hook overrides, e2e kwargs assembly) Deltas vs original PR (salvaged onto current main): - Added profiles: alibaba-coding-plan, azure-foundry, minimax-oauth (were added to main since original PR) - Skipped profiles: lmstudio, tencent-tokenhub stay on legacy path (their reasoning_effort probing has no clean hook equivalent yet) - Removed lmstudio alias from custom profile (it's a separate provider now) - Skipped openrouter/custom from PROVIDER_REGISTRY auto-extension (resolve_provider special-cases them; adding breaks runtime resolution) - runtime_provider: profile.api_mode only as fallback when URL detection finds nothing (was breaking minimax /v1 override) - Preserved main's legacy-path improvements: deepseek reasoning_content preserve, gemini Gemma skip, OpenRouter response caching, Anthropic 1M beta recovery, etc. - Kept agent/copilot_acp_client.py in place (rejected PR's relocation — main has 7 fixes landed since; relocation would revert them) - _API_KEY_PROVIDER_AUX_MODELS alias kept for backward compat with existing test imports Co-authored-by: kshitijk4poor <82637225+kshitijk4poor@users.noreply.github.com> Closes #14418
82 lines
2.7 KiB
Python
82 lines
2.7 KiB
Python
"""Qwen Portal provider profile."""
|
|
|
|
import copy
|
|
from typing import Any
|
|
|
|
from providers import register_provider
|
|
from providers.base import ProviderProfile
|
|
|
|
|
|
class QwenProfile(ProviderProfile):
|
|
"""Qwen Portal — message normalization, vl_high_resolution, metadata top-level."""
|
|
|
|
def prepare_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
"""Normalize content to list-of-dicts format.
|
|
|
|
Inject cache_control on system message.
|
|
|
|
Matches the behavior of run_agent.py:_qwen_prepare_chat_messages().
|
|
"""
|
|
prepared = copy.deepcopy(messages)
|
|
if not prepared:
|
|
return prepared
|
|
|
|
for msg in prepared:
|
|
if not isinstance(msg, dict):
|
|
continue
|
|
content = msg.get("content")
|
|
if isinstance(content, str):
|
|
msg["content"] = [{"type": "text", "text": content}]
|
|
elif isinstance(content, list):
|
|
normalized_parts = []
|
|
for part in content:
|
|
if isinstance(part, str):
|
|
normalized_parts.append({"type": "text", "text": part})
|
|
elif isinstance(part, dict):
|
|
normalized_parts.append(part)
|
|
if normalized_parts:
|
|
msg["content"] = normalized_parts
|
|
|
|
# Inject cache_control on the last part of the system message.
|
|
for msg in prepared:
|
|
if isinstance(msg, dict) and msg.get("role") == "system":
|
|
content = msg.get("content")
|
|
if (
|
|
isinstance(content, list)
|
|
and content
|
|
and isinstance(content[-1], dict)
|
|
):
|
|
content[-1]["cache_control"] = {"type": "ephemeral"}
|
|
break
|
|
|
|
return prepared
|
|
|
|
def build_extra_body(
|
|
self, *, session_id: str | None = None, **context
|
|
) -> dict[str, Any]:
|
|
return {"vl_high_resolution_images": True}
|
|
|
|
def build_api_kwargs_extras(
|
|
self,
|
|
*,
|
|
reasoning_config: dict | None = None,
|
|
qwen_session_metadata: dict | None = None,
|
|
**context,
|
|
) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
"""Qwen metadata goes to top-level api_kwargs, not extra_body."""
|
|
top_level = {}
|
|
if qwen_session_metadata:
|
|
top_level["metadata"] = qwen_session_metadata
|
|
return {}, top_level
|
|
|
|
|
|
qwen = QwenProfile(
|
|
name="qwen-oauth",
|
|
aliases=("qwen", "qwen-portal", "qwen-cli"),
|
|
env_vars=("QWEN_API_KEY",),
|
|
base_url="https://portal.qwen.ai/v1",
|
|
auth_type="oauth_external",
|
|
default_max_tokens=65536,
|
|
)
|
|
|
|
register_provider(qwen)
|