refactor: make config.yaml the single source of truth for endpoint URLs (#4165)

OPENAI_BASE_URL was written to .env AND config.yaml, creating a dual-source
confusion. Users (especially Docker) would see the URL in .env and assume
that's where all config lives, then wonder why LLM_MODEL in .env didn't work.

Changes:
- Remove all 27 save_env_value("OPENAI_BASE_URL", ...) calls across main.py,
  setup.py, and tools_config.py
- Remove OPENAI_BASE_URL env var reading from runtime_provider.py, cli.py,
  models.py, and gateway/run.py
- Remove LLM_MODEL/HERMES_MODEL env var reading from gateway/run.py and
  auxiliary_client.py — config.yaml model.default is authoritative
- Vision base URL now saved to config.yaml auxiliary.vision.base_url
  (both setup wizard and tools_config paths)
- Tests updated to set config values instead of env vars

Convention enforced: .env is for SECRETS only (API keys). All other
configuration (model names, base URLs, provider selection) lives
exclusively in config.yaml.
This commit is contained in:
Teknium 2026-03-30 22:02:53 -07:00 committed by GitHub
parent 4d7e3c7157
commit f890a94c12
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 77 additions and 152 deletions

View file

@ -7,7 +7,7 @@ the best available backend without duplicating fallback logic.
Resolution order for text tasks (auto mode):
1. OpenRouter (OPENROUTER_API_KEY)
2. Nous Portal (~/.hermes/auth.json active provider)
3. Custom endpoint (OPENAI_BASE_URL + OPENAI_API_KEY)
3. Custom endpoint (config.yaml model.base_url + OPENAI_API_KEY)
4. Codex OAuth (Responses API via chatgpt.com with gpt-5.3-codex,
wrapped to look like a chat.completions client)
5. Native Anthropic
@ -584,15 +584,11 @@ def _try_nous() -> Tuple[Optional[OpenAI], Optional[str]]:
def _read_main_model() -> str:
"""Read the user's configured main model from config/env.
"""Read the user's configured main model from config.yaml.
Falls back through HERMES_MODEL LLM_MODEL config.yaml model.default
so the auxiliary client can use the same model as the main agent when no
dedicated auxiliary model is available.
config.yaml model.default is the single source of truth for the active
model. Environment variables are no longer consulted.
"""
from_env = os.getenv("OPENAI_MODEL") or os.getenv("HERMES_MODEL") or os.getenv("LLM_MODEL")
if from_env:
return from_env.strip()
try:
from hermes_cli.config import load_config
cfg = load_config()

8
cli.py
View file

@ -1124,9 +1124,9 @@ class HermesCLI:
self.acp_args: list[str] = []
self.base_url = (
base_url
or os.getenv("OPENAI_BASE_URL")
or os.getenv("OPENROUTER_BASE_URL", CLI_CONFIG["model"]["base_url"])
)
or CLI_CONFIG["model"].get("base_url", "")
or os.getenv("OPENROUTER_BASE_URL", "")
) or None
# Match key to resolved base_url: OpenRouter URL → prefer OPENROUTER_API_KEY,
# custom endpoint → prefer OPENAI_API_KEY (issue #560).
# Note: _ensure_runtime_credentials() re-resolves this before first use.
@ -3239,7 +3239,7 @@ class HermesCLI:
print(f" {mid}{current_marker}")
elif p["id"] == "custom":
from hermes_cli.models import _get_custom_base_url
custom_url = _get_custom_base_url() or os.getenv("OPENAI_BASE_URL", "")
custom_url = _get_custom_base_url()
if custom_url:
print(f" endpoint: {custom_url}")
if is_active:

View file

@ -364,20 +364,19 @@ def _load_gateway_config() -> dict:
def _resolve_gateway_model(config: dict | None = None) -> str:
"""Read model from env/config — mirrors the resolution in _run_agent_sync.
"""Read model from config.yaml — single source of truth.
Without this, temporary AIAgent instances (memory flush, /compress) fall
back to the hardcoded default which fails when the active provider is
openai-codex.
"""
model = os.getenv("HERMES_MODEL") or os.getenv("LLM_MODEL") or ""
cfg = config if config is not None else _load_gateway_config()
model_cfg = cfg.get("model", {})
if isinstance(model_cfg, str):
model = model_cfg
return model_cfg
elif isinstance(model_cfg, dict):
model = model_cfg.get("default") or model_cfg.get("model") or model
return model
return model_cfg.get("default") or model_cfg.get("model") or ""
return ""
def _resolve_hermes_bin() -> Optional[list[str]]:
@ -2762,7 +2761,7 @@ class GatewayRunner:
{
"role": "session_meta",
"tools": tool_defs or [],
"model": os.getenv("HERMES_MODEL", ""),
"model": _resolve_gateway_model(),
"platform": source.platform.value if source.platform else "",
"timestamp": ts,
}
@ -3227,9 +3226,11 @@ class GatewayRunner:
except Exception:
current_provider = "openrouter"
# Detect custom endpoint
if current_provider == "openrouter" and os.getenv("OPENAI_BASE_URL", "").strip():
current_provider = "custom"
# Detect custom endpoint from config base_url
if current_provider == "openrouter":
_cfg_base = model_cfg.get("base_url", "") if isinstance(model_cfg, dict) else ""
if _cfg_base and "openrouter.ai" not in _cfg_base:
current_provider = "custom"
current_label = _PROVIDER_LABELS.get(current_provider, current_provider)

View file

@ -1050,10 +1050,6 @@ def _model_flow_openrouter(config, current_model=""):
selected = _prompt_model_selection(openrouter_models, current_model=current_model)
if selected:
# Clear any custom endpoint and set provider to openrouter
if get_env_value("OPENAI_BASE_URL"):
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_save_model_choice(selected)
# Update config provider and deactivate any OAuth provider
@ -1143,10 +1139,6 @@ def _model_flow_nous(config, current_model=""):
# Reactivate Nous as the provider and update config
inference_url = creds.get("base_url", "")
_update_config_for_provider("nous", inference_url)
# Clear any custom endpoint that might conflict
if get_env_value("OPENAI_BASE_URL"):
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
print(f"Default model set to: {selected} (via Nous Portal)")
else:
print("No change.")
@ -1191,10 +1183,6 @@ def _model_flow_openai_codex(config, current_model=""):
if selected:
_save_model_choice(selected)
_update_config_for_provider("openai-codex", DEFAULT_CODEX_BASE_URL)
# Clear custom endpoint env vars that would otherwise override Codex.
if get_env_value("OPENAI_BASE_URL"):
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
print(f"Default model set to: {selected} (via OpenAI Codex)")
else:
print("No change.")
@ -1275,11 +1263,6 @@ def _model_flow_custom(config):
if probe.get("suggested_base_url"):
print(f" If this server expects /v1, try base URL: {probe['suggested_base_url']}")
if base_url:
save_env_value("OPENAI_BASE_URL", effective_url)
if api_key:
save_env_value("OPENAI_API_KEY", api_key)
if model_name:
_save_model_choice(model_name)
@ -1439,9 +1422,6 @@ def _model_flow_named_custom(config, provider_info):
# If a model is saved, just activate immediately — no probing needed
if saved_model:
save_env_value("OPENAI_BASE_URL", base_url)
if api_key:
save_env_value("OPENAI_API_KEY", api_key)
_save_model_choice(saved_model)
cfg = load_config()
@ -1513,9 +1493,6 @@ def _model_flow_named_custom(config, provider_info):
return
# Activate and save the model to the custom_providers entry
save_env_value("OPENAI_BASE_URL", base_url)
if api_key:
save_env_value("OPENAI_API_KEY", api_key)
_save_model_choice(model_name)
cfg = load_config()
@ -1829,11 +1806,6 @@ def _model_flow_copilot(config, current_model=""):
catalog=catalog,
api_key=api_key,
) or selected
# Clear stale custom-endpoint overrides so the Copilot provider wins cleanly.
if get_env_value("OPENAI_BASE_URL"):
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
initial_cfg = load_config()
current_effort = _current_reasoning_effort(initial_cfg)
reasoning_efforts = github_model_reasoning_efforts(
@ -2058,11 +2030,6 @@ def _model_flow_kimi(config, current_model=""):
selected = None
if selected:
# Clear custom endpoint if set (avoid confusion)
if get_env_value("OPENAI_BASE_URL"):
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_save_model_choice(selected)
# Update config with provider and base URL
@ -2165,11 +2132,6 @@ def _model_flow_api_key_provider(config, provider_id, current_model=""):
selected = None
if selected:
# Clear custom endpoint if set (avoid confusion)
if get_env_value("OPENAI_BASE_URL"):
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_save_model_choice(selected)
# Update config with provider and base URL
@ -2381,11 +2343,6 @@ def _model_flow_anthropic(config, current_model=""):
selected = None
if selected:
# Clear custom endpoint if set
if get_env_value("OPENAI_BASE_URL"):
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_save_model_choice(selected)
# Update config with provider — clear base_url since

View file

@ -349,7 +349,7 @@ def list_available_providers() -> list[dict[str, str]]:
try:
from hermes_cli.auth import get_auth_status, has_usable_secret
if pid == "custom":
custom_base_url = _get_custom_base_url() or os.getenv("OPENAI_BASE_URL", "")
custom_base_url = _get_custom_base_url() or ""
has_creds = bool(custom_base_url.strip())
elif pid == "openrouter":
has_creds = has_usable_secret(os.getenv("OPENROUTER_API_KEY", ""))

View file

@ -229,28 +229,22 @@ def _resolve_openrouter_runtime(
requested_norm = (requested_provider or "").strip().lower()
cfg_provider = cfg_provider.strip().lower()
env_openai_base_url = os.getenv("OPENAI_BASE_URL", "").strip()
env_openrouter_base_url = os.getenv("OPENROUTER_BASE_URL", "").strip()
# Use config base_url when available and the provider context matches.
# OPENAI_BASE_URL env var is no longer consulted — config.yaml is
# the single source of truth for endpoint URLs.
use_config_base_url = False
if cfg_base_url.strip() and not explicit_base_url:
if requested_norm == "auto":
if (not cfg_provider or cfg_provider == "auto") and not env_openai_base_url:
if not cfg_provider or cfg_provider == "auto":
use_config_base_url = True
elif requested_norm == "custom" and cfg_provider == "custom":
# provider: custom — use base_url from config (Fixes #1760).
use_config_base_url = True
# When the user explicitly requested the openrouter provider, skip
# OPENAI_BASE_URL — it typically points to a custom / non-OpenRouter
# endpoint and would prevent switching back to OpenRouter (#874).
skip_openai_base = requested_norm == "openrouter"
# For custom, prefer config base_url over env so config.yaml is honored (#1760).
base_url = (
(explicit_base_url or "").strip()
or (cfg_base_url.strip() if use_config_base_url else "")
or ("" if skip_openai_base else env_openai_base_url)
or env_openrouter_base_url
or OPENROUTER_BASE_URL
).rstrip("/")

View file

@ -941,10 +941,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear any custom endpoint if switching to OpenRouter
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
# Update config.yaml and deactivate any OAuth provider so the
# resolver doesn't keep returning the old provider (e.g. Codex).
@ -1032,10 +1028,6 @@ def setup_model_provider(config: dict):
mock_args = argparse.Namespace()
_login_openai_codex(mock_args, PROVIDER_REGISTRY["openai-codex"])
# Clear custom endpoint vars that would override provider routing.
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_update_config_for_provider("openai-codex", DEFAULT_CODEX_BASE_URL)
_set_model_provider(config, "openai-codex", DEFAULT_CODEX_BASE_URL)
except SystemExit:
@ -1118,10 +1110,6 @@ def setup_model_provider(config: dict):
" If you get billing errors, check your plan at https://open.bigmodel.cn/"
)
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "zai", zai_base_url)
selected_base_url = zai_base_url
@ -1151,10 +1139,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "kimi-coding", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1184,10 +1168,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "minimax", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1217,10 +1197,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "minimax-cn", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1250,10 +1226,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "kilocode", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1352,10 +1324,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped — agent won't work without credentials")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
# Don't save base_url for Anthropic — resolve_runtime_provider()
# always hardcodes it. Stale base_urls contaminate other providers.
_set_model_provider(config, "anthropic")
@ -1386,10 +1354,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_update_config_for_provider("ai-gateway", pconfig.inference_base_url, default_model="anthropic/claude-opus-4.6")
_set_model_provider(config, "ai-gateway", pconfig.inference_base_url)
@ -1418,10 +1382,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_update_config_for_provider("alibaba", pconfig.inference_base_url, default_model="qwen3.5-plus")
_set_model_provider(config, "alibaba", pconfig.inference_base_url)
@ -1451,10 +1411,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "opencode-zen", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1484,10 +1440,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without an API key")
# Clear custom endpoint vars if switching
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "opencode-go", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1518,9 +1470,6 @@ def setup_model_provider(config: dict):
else:
print_warning("Skipped - agent won't work without a GitHub token or gh auth login")
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "copilot", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1534,9 +1483,6 @@ def setup_model_provider(config: dict):
print_info(f"Base marker: {pconfig.inference_base_url}")
print()
if existing_custom:
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "copilot-acp", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1553,9 +1499,6 @@ def setup_model_provider(config: dict):
api_key = prompt(" HF Token", password=True)
if api_key:
save_env_value("HF_TOKEN", api_key)
# Clear OpenRouter env vars to prevent routing confusion
save_env_value("OPENAI_BASE_URL", "")
save_env_value("OPENAI_API_KEY", "")
_set_model_provider(config, "huggingface", pconfig.inference_base_url)
selected_base_url = pconfig.inference_base_url
@ -1632,7 +1575,9 @@ def setup_model_provider(config: dict):
_oai_key = prompt(_api_key_label, password=True).strip()
if _oai_key:
save_env_value("OPENAI_API_KEY", _oai_key)
save_env_value("OPENAI_BASE_URL", _base_url)
# Save vision base URL to config (not .env — only secrets go there)
_vaux = config.setdefault("auxiliary", {}).setdefault("vision", {})
_vaux["base_url"] = _base_url
if "api.openai.com" in _base_url.lower():
_oai_vision_models = ["gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano"]
_vm_choices = _oai_vision_models + ["Use default (gpt-4o-mini)"]

View file

@ -983,8 +983,13 @@ def _configure_simple_requirements(ts_key: str):
key_label = " OPENAI_API_KEY" if "api.openai.com" in base_url.lower() else " API key"
api_key = _prompt(key_label, password=True)
if api_key and api_key.strip():
save_env_value("OPENAI_BASE_URL", base_url)
save_env_value("OPENAI_API_KEY", api_key.strip())
# Save vision base URL to config (not .env — only secrets go there)
from hermes_cli.config import load_config, save_config
_cfg = load_config()
_aux = _cfg.setdefault("auxiliary", {}).setdefault("vision", {})
_aux["base_url"] = base_url
save_config(_cfg)
if "api.openai.com" in base_url.lower():
save_env_value("AUXILIARY_VISION_MODEL", "gpt-4o-mini")
_print_success(" Saved")

View file

@ -465,9 +465,16 @@ class TestGetTextAuxiliaryClient:
assert model == "google/gemini-3-flash-preview"
def test_custom_endpoint_over_codex(self, monkeypatch, codex_auth_dir):
monkeypatch.setenv("OPENAI_BASE_URL", "http://localhost:1234/v1")
config = {
"model": {
"provider": "custom",
"base_url": "http://localhost:1234/v1",
"default": "my-local-model",
}
}
monkeypatch.setenv("OPENAI_API_KEY", "lm-studio-key")
monkeypatch.setenv("OPENAI_MODEL", "my-local-model")
monkeypatch.setattr("hermes_cli.config.load_config", lambda: config)
monkeypatch.setattr("hermes_cli.runtime_provider.load_config", lambda: config)
# Override the autouse monkeypatch for codex
monkeypatch.setattr(
"agent.auxiliary_client._read_codex_access_token",
@ -726,10 +733,17 @@ class TestVisionClientFallback:
def test_vision_forced_main_uses_custom_endpoint(self, monkeypatch):
"""When explicitly forced to 'main', vision CAN use custom endpoint."""
config = {
"model": {
"provider": "custom",
"base_url": "http://localhost:1234/v1",
"default": "my-local-model",
}
}
monkeypatch.setenv("AUXILIARY_VISION_PROVIDER", "main")
monkeypatch.setenv("OPENAI_BASE_URL", "http://localhost:1234/v1")
monkeypatch.setenv("OPENAI_API_KEY", "local-key")
monkeypatch.setenv("OPENAI_MODEL", "my-local-model")
monkeypatch.setattr("hermes_cli.config.load_config", lambda: config)
monkeypatch.setattr("hermes_cli.runtime_provider.load_config", lambda: config)
with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \
patch("agent.auxiliary_client.OpenAI") as mock_openai:
client, model = get_vision_auxiliary_client()
@ -827,9 +841,16 @@ class TestResolveForcedProvider:
assert model is None
def test_forced_main_uses_custom(self, monkeypatch):
monkeypatch.setenv("OPENAI_BASE_URL", "http://local:8080/v1")
config = {
"model": {
"provider": "custom",
"base_url": "http://local:8080/v1",
"default": "my-local-model",
}
}
monkeypatch.setenv("OPENAI_API_KEY", "local-key")
monkeypatch.setenv("OPENAI_MODEL", "my-local-model")
monkeypatch.setattr("hermes_cli.config.load_config", lambda: config)
monkeypatch.setattr("hermes_cli.runtime_provider.load_config", lambda: config)
with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \
patch("agent.auxiliary_client.OpenAI") as mock_openai:
client, model = _resolve_forced_provider("main")
@ -858,10 +879,17 @@ class TestResolveForcedProvider:
def test_forced_main_skips_openrouter_nous(self, monkeypatch):
"""Even if OpenRouter key is set, 'main' skips it."""
config = {
"model": {
"provider": "custom",
"base_url": "http://local:8080/v1",
"default": "my-local-model",
}
}
monkeypatch.setenv("OPENROUTER_API_KEY", "or-key")
monkeypatch.setenv("OPENAI_BASE_URL", "http://local:8080/v1")
monkeypatch.setenv("OPENAI_API_KEY", "local-key")
monkeypatch.setenv("OPENAI_MODEL", "my-local-model")
monkeypatch.setattr("hermes_cli.config.load_config", lambda: config)
monkeypatch.setattr("hermes_cli.runtime_provider.load_config", lambda: config)
with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \
patch("agent.auxiliary_client.OpenAI") as mock_openai:
client, model = _resolve_forced_provider("main")

View file

@ -129,16 +129,13 @@ def test_setup_custom_endpoint_saves_working_v1_base_url(tmp_path, monkeypatch):
env = _read_env(tmp_path)
# _model_flow_custom saves env vars and config to disk
assert env.get("OPENAI_BASE_URL") == "http://localhost:8000/v1"
assert env.get("OPENAI_API_KEY") == "local-key"
# The model config is saved as a dict by _model_flow_custom
# _model_flow_custom saves config to disk (base_url in config, not .env)
reloaded = load_config()
model_cfg = reloaded.get("model", {})
if isinstance(model_cfg, dict):
assert model_cfg.get("provider") == "custom"
assert model_cfg.get("default") == "llm"
assert model_cfg.get("base_url") == "http://localhost:8000/v1"
def test_setup_keep_current_config_provider_uses_provider_specific_model_menu(tmp_path, monkeypatch):
@ -232,8 +229,11 @@ def test_setup_keep_current_anthropic_can_configure_openai_vision_default(tmp_pa
env = _read_env(tmp_path)
assert env.get("OPENAI_API_KEY") == "sk-openai"
assert env.get("OPENAI_BASE_URL") == "https://api.openai.com/v1"
assert env.get("AUXILIARY_VISION_MODEL") == "gpt-4o-mini"
# Vision base URL saved to config.yaml, not .env
reloaded = load_config()
vision_cfg = reloaded.get("auxiliary", {}).get("vision", {})
assert vision_cfg.get("base_url") == "https://api.openai.com/v1"
def test_setup_copilot_uses_gh_auth_and_saves_provider(tmp_path, monkeypatch):
@ -433,8 +433,7 @@ def test_setup_switch_custom_to_codex_clears_custom_endpoint_and_updates_config(
env = _read_env(tmp_path)
reloaded = load_config()
assert env.get("OPENAI_BASE_URL") == ""
assert env.get("OPENAI_API_KEY") == ""
# OPENAI_BASE_URL is no longer written/cleared in .env — config is authoritative
assert reloaded["model"]["provider"] == "openai-codex"
assert reloaded["model"]["default"] == "openai/gpt-5.3-codex"
assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex"

View file

@ -467,6 +467,6 @@ def test_model_flow_custom_saves_verified_v1_base_url(monkeypatch, capsys):
output = capsys.readouterr().out
assert "Saving the working base URL instead" in output
assert saved_env["OPENAI_BASE_URL"] == "http://localhost:8000/v1"
assert saved_env["OPENAI_API_KEY"] == "local-key"
# OPENAI_BASE_URL is no longer saved to .env — config.yaml is authoritative
assert "OPENAI_BASE_URL" not in saved_env
assert saved_env["MODEL"] == "llm"