fix: vision tool respects auxiliary.vision.temperature from config (#4661)

The vision tool hardcoded temperature=0.1, ignoring the user's
config.yaml setting. This broke providers like Kimi/Moonshot that
require temperature=1 for vision models. Now reads temperature
from auxiliary.vision.temperature, falling back to 0.1.
This commit is contained in:
Saurabh 2026-04-04 11:14:53 +05:30 committed by Teknium
parent e485bc60cd
commit 088bf9057f

View file

@ -553,18 +553,23 @@ async def vision_analyze_tool(
# Read timeout from config.yaml (auxiliary.vision.timeout), default 120s.
# Local vision models (llama.cpp, ollama) can take well over 30s.
vision_timeout = 120.0
vision_temperature = 0.1
try:
from hermes_cli.config import load_config
_cfg = load_config()
_vt = _cfg.get("auxiliary", {}).get("vision", {}).get("timeout")
_vision_cfg = _cfg.get("auxiliary", {}).get("vision", {})
_vt = _vision_cfg.get("timeout")
if _vt is not None:
vision_timeout = float(_vt)
_vtemp = _vision_cfg.get("temperature")
if _vtemp is not None:
vision_temperature = float(_vtemp)
except Exception:
pass
call_kwargs = {
"task": "vision",
"messages": messages,
"temperature": 0.1,
"temperature": vision_temperature,
"max_tokens": 2000,
"timeout": vision_timeout,
}