chore(salvage): strip duplicated/merge-corrupted blocks from PR #17664

Removes drive-by duplication that accumulated during the contributor
branch's multiple rebases. All runtime-benign (dict last-wins,
redefinition last-wins) but left dead source that would confuse
reviewers and maintainers.

Surgical in-place de-duplication (kept PR's intentional additions,
removed only the doubled copy):

* hermes_cli/auth.py: duplicate "gmi" + "azure-foundry" ProviderConfig
* hermes_cli/models.py: duplicate "gmi" entry in _PROVIDER_MODELS
* hermes_cli/config.py: duplicate NOTION/LINEAR/AIRTABLE/TENOR skill env
  block + duplicate get_custom_provider_context_length definition
* hermes_cli/gateway.py: duplicate _setup_yuanbao
* gateway/platforms/base.py: duplicate is_host_excluded_by_no_proxy
* gateway/platforms/telegram.py: duplicate delete_message
* gateway/stream_consumer.py: duplicate _should_send_fresh_final and
  _try_fresh_final
* gateway/run.py: duplicate _parse_reasoning_command_args /
  _resolve_session_reasoning_config / _set_session_reasoning_override,
  duplicate "Drain silently when interrupted" interrupt check
* run_agent.py: duplicate HERMES_AGENT_HELP_GUIDANCE append, duplicate
  codex_message_items capture, duplicate custom_providers resolution
* tools/approval.py: duplicate HARDLINE_PATTERNS section and duplicate
  hardline call in check_dangerous_command
* tools/mcp_tool.py: duplicate _orphan_stdio_pids module-level decl
* cron/scheduler.py: duplicate "not configured/enabled" check — kept
  the new early-rejection, removed the stale late-path copy

Full-file resets to origin/main (all PR additions were duplicates of
content already on main):

* ui-tui/packages/hermes-ink/index.d.ts
* ui-tui/packages/hermes-ink/src/entry-exports.ts
* ui-tui/packages/hermes-ink/src/ink/selection.ts
* ui-tui/src/app/interfaces.ts
* ui-tui/src/app/slash/commands/core.ts
* ui-tui/src/components/thinking.tsx
* ui-tui/src/lib/memoryMonitor.ts
* ui-tui/src/types.ts
* ui-tui/src/types/hermes-ink.d.ts
* tests/hermes_cli/test_doctor.py
* tests/hermes_cli/test_api_key_providers.py
* tests/hermes_cli/test_model_validation.py
* tests/plugins/memory/test_hindsight_provider.py
* tests/run_agent/test_run_agent.py
* tests/gateway/test_email.py
* tests/tools/test_dockerfile_pid1_reaping.py
* hermes_cli/commands.py (slack_native_slashes block — full duplicate)
This commit is contained in:
Teknium 2026-04-29 20:33:29 -07:00
parent 868bc1c242
commit 71c8ca17dc
29 changed files with 12 additions and 928 deletions

View file

@ -479,69 +479,6 @@ class TestAzureFoundryModelApiMode:
assert azure_foundry_model_api_mode("Codex-Mini") == "codex_responses"
class TestAzureFoundryModelApiMode:
"""Azure Foundry deploys GPT-5.x / codex / o-series as Responses-API-only.
Azure returns ``400 "The requested operation is unsupported."`` when
/chat/completions is called against these deployments. Verified in the
wild by a user debug bundle on 2026-04-26: gpt-5.3-codex failed with
that exact payload while gpt-4o-pure worked on the same endpoint.
"""
def test_gpt5_family_uses_responses(self):
assert azure_foundry_model_api_mode("gpt-5") == "codex_responses"
assert azure_foundry_model_api_mode("gpt-5.3") == "codex_responses"
assert azure_foundry_model_api_mode("gpt-5.4") == "codex_responses"
assert azure_foundry_model_api_mode("gpt-5-codex") == "codex_responses"
assert azure_foundry_model_api_mode("gpt-5.3-codex") == "codex_responses"
# gpt-5-mini exceptions are Copilot-specific; Azure deploys the whole
# gpt-5 family on Responses API uniformly.
assert azure_foundry_model_api_mode("gpt-5-mini") == "codex_responses"
def test_codex_family_uses_responses(self):
assert azure_foundry_model_api_mode("codex") == "codex_responses"
assert azure_foundry_model_api_mode("codex-mini") == "codex_responses"
def test_o_series_reasoning_uses_responses(self):
assert azure_foundry_model_api_mode("o1") == "codex_responses"
assert azure_foundry_model_api_mode("o1-preview") == "codex_responses"
assert azure_foundry_model_api_mode("o1-mini") == "codex_responses"
assert azure_foundry_model_api_mode("o3") == "codex_responses"
assert azure_foundry_model_api_mode("o3-mini") == "codex_responses"
assert azure_foundry_model_api_mode("o4-mini") == "codex_responses"
def test_gpt4_family_returns_none(self):
"""GPT-4, GPT-4o, etc. speak chat completions on Azure."""
assert azure_foundry_model_api_mode("gpt-4") is None
assert azure_foundry_model_api_mode("gpt-4o") is None
assert azure_foundry_model_api_mode("gpt-4o-pure") is None
assert azure_foundry_model_api_mode("gpt-4o-mini") is None
assert azure_foundry_model_api_mode("gpt-4-turbo") is None
assert azure_foundry_model_api_mode("gpt-4.1") is None
assert azure_foundry_model_api_mode("gpt-3.5-turbo") is None
def test_non_openai_deployments_return_none(self):
"""Llama, Mistral, Grok, etc. keep the default chat completions."""
assert azure_foundry_model_api_mode("llama-3.1-70b") is None
assert azure_foundry_model_api_mode("mistral-large") is None
assert azure_foundry_model_api_mode("grok-4") is None
assert azure_foundry_model_api_mode("phi-3-medium") is None
def test_vendor_prefix_stripped(self):
"""Users who copy-paste ``openai/gpt-5.3-codex`` should still match."""
assert azure_foundry_model_api_mode("openai/gpt-5.3-codex") == "codex_responses"
assert azure_foundry_model_api_mode("openai/gpt-4o") is None
def test_empty_and_none_return_none(self):
assert azure_foundry_model_api_mode(None) is None
assert azure_foundry_model_api_mode("") is None
assert azure_foundry_model_api_mode(" ") is None
def test_case_insensitive(self):
assert azure_foundry_model_api_mode("GPT-5.3-Codex") == "codex_responses"
assert azure_foundry_model_api_mode("Codex-Mini") == "codex_responses"
# -- validate — format checks -----------------------------------------------
class TestValidateFormatChecks: