refactor: remove dead code — 1,784 lines across 77 files (#9180)

Deep scan with vulture, pyflakes, and manual cross-referencing identified:
- 41 dead functions/methods (zero callers in production)
- 7 production-dead functions (only test callers, tests deleted)
- 5 dead constants/variables
- ~35 unused imports across agent/, hermes_cli/, tools/, gateway/

Categories of dead code removed:
- Refactoring leftovers: _set_default_model, _setup_copilot_reasoning_selection,
  rebuild_lookups, clear_session_context, get_logs_dir, clear_session
- Unused API surface: search_models_dev, get_pricing, skills_categories,
  get_read_files_summary, clear_read_tracker, menu_labels, get_spinner_list
- Dead compatibility wrappers: schedule_cronjob, list_cronjobs, remove_cronjob
- Stale debug helpers: get_debug_session_info copies in 4 tool files
  (centralized version in debug_helpers.py already exists)
- Dead gateway methods: send_emote, send_notice (matrix), send_reaction
  (bluebubbles), _normalize_inbound_text (feishu), fetch_room_history
  (matrix), _start_typing_indicator (signal), parse_feishu_post_content
- Dead constants: NOUS_API_BASE_URL, SKILLS_TOOL_DESCRIPTION,
  FILE_TOOLS, VALID_ASPECT_RATIOS, MEMORY_DIR
- Unused UI code: _interactive_provider_selection,
  _interactive_model_selection (superseded by prompt_toolkit picker)

Test suite verified: 609 tests covering affected files all pass.
Tests for removed functions deleted. Tests using removed utilities
(clear_read_tracker, MEMORY_DIR) updated to use internal APIs directly.
This commit is contained in:
Teknium 2026-04-13 16:32:04 -07:00 committed by GitHub
parent a66fc1365d
commit 8d023e43ed
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
77 changed files with 44 additions and 1784 deletions

View file

@ -22,8 +22,6 @@ from unittest.mock import patch, MagicMock
from tools.file_tools import (
read_file_tool,
search_tool,
get_read_files_summary,
clear_read_tracker,
notify_other_tool_call,
_read_tracker,
)
@ -63,10 +61,10 @@ class TestReadLoopDetection(unittest.TestCase):
"""Verify that read_file_tool detects and warns on consecutive re-reads."""
def setUp(self):
clear_read_tracker()
_read_tracker.clear()
def tearDown(self):
clear_read_tracker()
_read_tracker.clear()
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_first_read_has_no_warning(self, _mock_ops):
@ -158,10 +156,10 @@ class TestNotifyOtherToolCall(unittest.TestCase):
"""Verify that notify_other_tool_call resets the consecutive counter."""
def setUp(self):
clear_read_tracker()
_read_tracker.clear()
def tearDown(self):
clear_read_tracker()
_read_tracker.clear()
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_other_tool_resets_consecutive(self, _mock_ops):
@ -192,120 +190,18 @@ class TestNotifyOtherToolCall(unittest.TestCase):
"""notify_other_tool_call on a task that hasn't read anything is a no-op."""
notify_other_tool_call("nonexistent_task") # Should not raise
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_history_survives_notify(self, _mock_ops):
"""notify_other_tool_call resets consecutive but preserves read_history."""
read_file_tool("/tmp/test.py", offset=1, limit=100, task_id="t1")
notify_other_tool_call("t1")
summary = get_read_files_summary("t1")
self.assertEqual(len(summary), 1)
self.assertEqual(summary[0]["path"], "/tmp/test.py")
class TestReadFilesSummary(unittest.TestCase):
"""Verify get_read_files_summary returns accurate file-read history."""
def setUp(self):
clear_read_tracker()
def tearDown(self):
clear_read_tracker()
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_empty_when_no_reads(self, _mock_ops):
summary = get_read_files_summary("t1")
self.assertEqual(summary, [])
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_single_file_single_region(self, _mock_ops):
read_file_tool("/tmp/test.py", offset=1, limit=500, task_id="t1")
summary = get_read_files_summary("t1")
self.assertEqual(len(summary), 1)
self.assertEqual(summary[0]["path"], "/tmp/test.py")
self.assertIn("lines 1-500", summary[0]["regions"])
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_single_file_multiple_regions(self, _mock_ops):
read_file_tool("/tmp/test.py", offset=1, limit=500, task_id="t1")
read_file_tool("/tmp/test.py", offset=501, limit=500, task_id="t1")
summary = get_read_files_summary("t1")
self.assertEqual(len(summary), 1)
self.assertEqual(len(summary[0]["regions"]), 2)
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_multiple_files(self, _mock_ops):
read_file_tool("/tmp/a.py", task_id="t1")
read_file_tool("/tmp/b.py", task_id="t1")
summary = get_read_files_summary("t1")
self.assertEqual(len(summary), 2)
paths = [s["path"] for s in summary]
self.assertIn("/tmp/a.py", paths)
self.assertIn("/tmp/b.py", paths)
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_different_task_has_separate_summary(self, _mock_ops):
read_file_tool("/tmp/a.py", task_id="task_a")
read_file_tool("/tmp/b.py", task_id="task_b")
summary_a = get_read_files_summary("task_a")
summary_b = get_read_files_summary("task_b")
self.assertEqual(len(summary_a), 1)
self.assertEqual(summary_a[0]["path"], "/tmp/a.py")
self.assertEqual(len(summary_b), 1)
self.assertEqual(summary_b[0]["path"], "/tmp/b.py")
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_summary_unaffected_by_searches(self, _mock_ops):
"""Searches should NOT appear in the file-read summary."""
read_file_tool("/tmp/test.py", task_id="t1")
search_tool("def main", task_id="t1")
summary = get_read_files_summary("t1")
self.assertEqual(len(summary), 1)
self.assertEqual(summary[0]["path"], "/tmp/test.py")
class TestClearReadTracker(unittest.TestCase):
"""Verify clear_read_tracker resets state properly."""
def setUp(self):
clear_read_tracker()
def tearDown(self):
clear_read_tracker()
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_clear_specific_task(self, _mock_ops):
read_file_tool("/tmp/test.py", task_id="t1")
read_file_tool("/tmp/test.py", task_id="t2")
clear_read_tracker("t1")
self.assertEqual(get_read_files_summary("t1"), [])
self.assertEqual(len(get_read_files_summary("t2")), 1)
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_clear_all(self, _mock_ops):
read_file_tool("/tmp/test.py", task_id="t1")
read_file_tool("/tmp/test.py", task_id="t2")
clear_read_tracker()
self.assertEqual(get_read_files_summary("t1"), [])
self.assertEqual(get_read_files_summary("t2"), [])
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_clear_then_reread_no_warning(self, _mock_ops):
for _ in range(3):
read_file_tool("/tmp/test.py", task_id="t1")
clear_read_tracker("t1")
result = json.loads(read_file_tool("/tmp/test.py", task_id="t1"))
self.assertNotIn("_warning", result)
self.assertNotIn("error", result)
class TestSearchLoopDetection(unittest.TestCase):
"""Verify that search_tool detects and blocks consecutive repeated searches."""
def setUp(self):
clear_read_tracker()
_read_tracker.clear()
def tearDown(self):
clear_read_tracker()
_read_tracker.clear()
@patch("tools.file_tools._get_file_ops", return_value=_make_fake_file_ops())
def test_first_search_no_warning(self, _mock_ops):