merge: resolve conflicts with main (URL update to hermes-agent.nousresearch.com)

This commit is contained in:
teknium1 2026-03-12 17:49:26 -07:00
commit e976879cf2
29 changed files with 3110 additions and 115 deletions

View file

@ -11,6 +11,7 @@ Modular wizard with independently-runnable sections:
Config files are stored in ~/.hermes/ for easy access.
"""
import importlib.util
import logging
import os
import sys
@ -51,6 +52,68 @@ def _set_default_model(config: Dict[str, Any], model_name: str) -> None:
config["model"] = model_cfg
# Default model lists per provider — used as fallback when the live
# /models endpoint can't be reached.
_DEFAULT_PROVIDER_MODELS = {
"zai": ["glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"],
"kimi-coding": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"],
"minimax": ["MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1"],
"minimax-cn": ["MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1"],
}
def _setup_provider_model_selection(config, provider_id, current_model, prompt_choice, prompt_fn):
"""Model selection for API-key providers with live /models detection.
Tries the provider's /models endpoint first. Falls back to a
hardcoded default list with a warning if the endpoint is unreachable.
Always offers a 'Custom model' escape hatch.
"""
from hermes_cli.auth import PROVIDER_REGISTRY
from hermes_cli.config import get_env_value
from hermes_cli.models import fetch_api_models
pconfig = PROVIDER_REGISTRY[provider_id]
# Resolve API key and base URL for the probe
api_key = ""
for ev in pconfig.api_key_env_vars:
api_key = get_env_value(ev) or os.getenv(ev, "")
if api_key:
break
base_url_env = pconfig.base_url_env_var or ""
base_url = (get_env_value(base_url_env) if base_url_env else "") or pconfig.inference_base_url
# Try live /models endpoint
live_models = fetch_api_models(api_key, base_url)
if live_models:
provider_models = live_models
print_info(f"Found {len(live_models)} model(s) from {pconfig.name} API")
else:
provider_models = _DEFAULT_PROVIDER_MODELS.get(provider_id, [])
if provider_models:
print_warning(
f"Could not auto-detect models from {pconfig.name} API — showing defaults.\n"
f" Use \"Custom model\" if the model you expect isn't listed."
)
model_choices = list(provider_models)
model_choices.append("Custom model")
model_choices.append(f"Keep current ({current_model})")
keep_idx = len(model_choices) - 1
model_idx = prompt_choice("Select default model:", model_choices, keep_idx)
if model_idx < len(provider_models):
_set_default_model(config, provider_models[model_idx])
elif model_idx == len(provider_models):
custom = prompt_fn("Enter model name")
if custom:
_set_default_model(config, custom)
# else: keep current
def _sync_model_from_disk(config: Dict[str, Any]) -> None:
disk_model = load_config().get("model")
if isinstance(disk_model, dict):
@ -889,7 +952,8 @@ def setup_model_provider(config: dict):
print_info(f" URL: {detected['base_url']}")
if detected["id"].startswith("coding"):
print_info(
f" Note: Coding Plan detected — GLM-5 is not available, using {detected['model']}"
f" Note: Coding Plan endpoint detected (default model: {detected['model']}). "
f"GLM-5 may still be available depending on your plan tier."
)
save_env_value("GLM_BASE_URL", zai_base_url)
else:
@ -1174,10 +1238,10 @@ def setup_model_provider(config: dict):
_set_default_model(config, custom)
_update_config_for_provider("openai-codex", DEFAULT_CODEX_BASE_URL)
_set_model_provider(config, "openai-codex", DEFAULT_CODEX_BASE_URL)
elif selected_provider == "zai":
# Coding Plan endpoints don't have GLM-5
is_coding_plan = get_env_value("GLM_BASE_URL") and "coding" in (
get_env_value("GLM_BASE_URL") or ""
elif selected_provider in ("zai", "kimi-coding", "minimax", "minimax-cn"):
_setup_provider_model_selection(
config, selected_provider, current_model,
prompt_choice, prompt,
)
if is_coding_plan:
zai_models = ["glm-4.7", "glm-4.5", "glm-4.5-flash"]
@ -2111,6 +2175,114 @@ def setup_tools(config: dict, first_install: bool = False):
tools_command(first_install=first_install, config=config)
# =============================================================================
# OpenClaw Migration
# =============================================================================
_OPENCLAW_SCRIPT = (
PROJECT_ROOT
/ "optional-skills"
/ "migration"
/ "openclaw-migration"
/ "scripts"
/ "openclaw_to_hermes.py"
)
def _offer_openclaw_migration(hermes_home: Path) -> bool:
"""Detect ~/.openclaw and offer to migrate during first-time setup.
Returns True if migration ran successfully, False otherwise.
"""
openclaw_dir = Path.home() / ".openclaw"
if not openclaw_dir.is_dir():
return False
if not _OPENCLAW_SCRIPT.exists():
return False
print()
print_header("OpenClaw Installation Detected")
print_info(f"Found OpenClaw data at {openclaw_dir}")
print_info("Hermes can import your settings, memories, skills, and API keys.")
print()
if not prompt_yes_no("Would you like to import from OpenClaw?", default=True):
print_info(
"Skipping migration. You can run it later via the openclaw-migration skill."
)
return False
# Ensure config.yaml exists before migration tries to read it
config_path = get_config_path()
if not config_path.exists():
save_config(load_config())
# Dynamically load the migration script
try:
spec = importlib.util.spec_from_file_location(
"openclaw_to_hermes", _OPENCLAW_SCRIPT
)
if spec is None or spec.loader is None:
print_warning("Could not load migration script.")
return False
mod = importlib.util.module_from_spec(spec)
# Register in sys.modules so @dataclass can resolve the module
# (Python 3.11+ requires this for dynamically loaded modules)
import sys as _sys
_sys.modules[spec.name] = mod
try:
spec.loader.exec_module(mod)
except Exception:
_sys.modules.pop(spec.name, None)
raise
# Run migration with the "full" preset, execute mode, no overwrite
selected = mod.resolve_selected_options(None, None, preset="full")
migrator = mod.Migrator(
source_root=openclaw_dir.resolve(),
target_root=hermes_home.resolve(),
execute=True,
workspace_target=None,
overwrite=False,
migrate_secrets=True,
output_dir=None,
selected_options=selected,
preset_name="full",
)
report = migrator.migrate()
except Exception as e:
print_warning(f"Migration failed: {e}")
logger.debug("OpenClaw migration error", exc_info=True)
return False
# Print summary
summary = report.get("summary", {})
migrated = summary.get("migrated", 0)
skipped = summary.get("skipped", 0)
conflicts = summary.get("conflict", 0)
errors = summary.get("error", 0)
print()
if migrated:
print_success(f"Imported {migrated} item(s) from OpenClaw.")
if conflicts:
print_info(f"Skipped {conflicts} item(s) that already exist in Hermes.")
if skipped:
print_info(f"Skipped {skipped} item(s) (not found or unchanged).")
if errors:
print_warning(f"{errors} item(s) had errors — check the migration report.")
output_dir = report.get("output_dir")
if output_dir:
print_info(f"Full report saved to: {output_dir}")
print_success("Migration complete! Continuing with setup...")
return True
# =============================================================================
# Main Wizard Orchestrator
# =============================================================================
@ -2277,6 +2449,11 @@ def run_setup_wizard(args):
print()
return
# Offer OpenClaw migration before configuration begins
if _offer_openclaw_migration(hermes_home):
# Reload config in case migration wrote to it
config = load_config()
# ── Full Setup — run all sections ──
print_header("Configuration Location")
print_info(f"Config file: {get_config_path()}")