mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
refactor: remove dead code — 1,784 lines across 77 files (#9180)
Deep scan with vulture, pyflakes, and manual cross-referencing identified: - 41 dead functions/methods (zero callers in production) - 7 production-dead functions (only test callers, tests deleted) - 5 dead constants/variables - ~35 unused imports across agent/, hermes_cli/, tools/, gateway/ Categories of dead code removed: - Refactoring leftovers: _set_default_model, _setup_copilot_reasoning_selection, rebuild_lookups, clear_session_context, get_logs_dir, clear_session - Unused API surface: search_models_dev, get_pricing, skills_categories, get_read_files_summary, clear_read_tracker, menu_labels, get_spinner_list - Dead compatibility wrappers: schedule_cronjob, list_cronjobs, remove_cronjob - Stale debug helpers: get_debug_session_info copies in 4 tool files (centralized version in debug_helpers.py already exists) - Dead gateway methods: send_emote, send_notice (matrix), send_reaction (bluebubbles), _normalize_inbound_text (feishu), fetch_room_history (matrix), _start_typing_indicator (signal), parse_feishu_post_content - Dead constants: NOUS_API_BASE_URL, SKILLS_TOOL_DESCRIPTION, FILE_TOOLS, VALID_ASPECT_RATIOS, MEMORY_DIR - Unused UI code: _interactive_provider_selection, _interactive_model_selection (superseded by prompt_toolkit picker) Test suite verified: 609 tests covering affected files all pass. Tests for removed functions deleted. Tests using removed utilities (clear_read_tracker, MEMORY_DIR) updated to use internal APIs directly.
This commit is contained in:
parent
a66fc1365d
commit
8d023e43ed
77 changed files with 44 additions and 1784 deletions
|
|
@ -817,74 +817,6 @@ class TestTranscribeAudioDispatch:
|
|||
assert mock_openai.call_args[0][1] == "gpt-4o-transcribe"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# get_stt_model_from_config
|
||||
# ============================================================================
|
||||
|
||||
class TestGetSttModelFromConfig:
|
||||
"""get_stt_model_from_config is provider-aware: it reads the model from the
|
||||
correct provider-specific section (stt.local.model, stt.openai.model, etc.)
|
||||
and only honours the legacy flat stt.model key for cloud providers."""
|
||||
|
||||
def test_returns_local_model_from_nested_config(self, tmp_path, monkeypatch):
|
||||
cfg = tmp_path / "config.yaml"
|
||||
cfg.write_text("stt:\n provider: local\n local:\n model: large-v3\n")
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
|
||||
from tools.transcription_tools import get_stt_model_from_config
|
||||
assert get_stt_model_from_config() == "large-v3"
|
||||
|
||||
def test_returns_openai_model_from_nested_config(self, tmp_path, monkeypatch):
|
||||
cfg = tmp_path / "config.yaml"
|
||||
cfg.write_text("stt:\n provider: openai\n openai:\n model: gpt-4o-transcribe\n")
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
|
||||
from tools.transcription_tools import get_stt_model_from_config
|
||||
assert get_stt_model_from_config() == "gpt-4o-transcribe"
|
||||
|
||||
def test_legacy_flat_key_ignored_for_local_provider(self, tmp_path, monkeypatch):
|
||||
"""Legacy stt.model should NOT be used when provider is local, to prevent
|
||||
OpenAI model names (whisper-1) from being fed to faster-whisper."""
|
||||
cfg = tmp_path / "config.yaml"
|
||||
cfg.write_text("stt:\n provider: local\n model: whisper-1\n")
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
|
||||
from tools.transcription_tools import get_stt_model_from_config
|
||||
result = get_stt_model_from_config()
|
||||
assert result != "whisper-1", "Legacy stt.model should be ignored for local provider"
|
||||
|
||||
def test_legacy_flat_key_honoured_for_cloud_provider(self, tmp_path, monkeypatch):
|
||||
"""Legacy stt.model should still work for cloud providers that don't
|
||||
have a section in DEFAULT_CONFIG (e.g. groq)."""
|
||||
cfg = tmp_path / "config.yaml"
|
||||
cfg.write_text("stt:\n provider: groq\n model: whisper-large-v3\n")
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
|
||||
from tools.transcription_tools import get_stt_model_from_config
|
||||
assert get_stt_model_from_config() == "whisper-large-v3"
|
||||
|
||||
def test_defaults_to_local_model_when_no_config_file(self, tmp_path, monkeypatch):
|
||||
"""With no config file, load_config() returns DEFAULT_CONFIG which has
|
||||
stt.provider=local and stt.local.model=base."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
|
||||
from tools.transcription_tools import get_stt_model_from_config
|
||||
assert get_stt_model_from_config() == "base"
|
||||
|
||||
def test_returns_none_on_invalid_yaml(self, tmp_path, monkeypatch):
|
||||
cfg = tmp_path / "config.yaml"
|
||||
cfg.write_text(": : :\n bad yaml [[[")
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
|
||||
from tools.transcription_tools import get_stt_model_from_config
|
||||
# _load_stt_config catches exceptions and returns {}, so the function
|
||||
# falls through to return None (no provider section in empty dict)
|
||||
result = get_stt_model_from_config()
|
||||
# With empty config, load_config may still merge defaults; either
|
||||
# None or a default is acceptable — just not an OpenAI model name
|
||||
assert result is None or result in ("base", "small", "medium", "large-v3")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# _transcribe_mistral
|
||||
# ============================================================================
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue