fix(batch_runner): mark discarded no-reasoning prompts as completed (#9950)

Cherry-picked from PR #10005 by @houziershi.

Discarded prompts (has_any_reasoning=False) were skipped by `continue`
before being added to completed_in_batch. On --resume they were retried
forever. Now they are added to completed_in_batch before the continue.

- Added AUTHOR_MAP entry for @houziershi

Closes #9950
This commit is contained in:
houguokun 2026-04-20 04:55:21 -07:00 committed by Teknium
parent 7242afaa5f
commit 6cdab70320
3 changed files with 32 additions and 1 deletions

View file

@ -444,6 +444,7 @@ def _process_batch_worker(args: Tuple) -> Dict[str, Any]:
if not reasoning.get("has_any_reasoning", True):
print(f" 🚫 Prompt {prompt_index} discarded (no reasoning in any turn)")
discarded_no_reasoning += 1
completed_in_batch.append(prompt_index)
continue
# Get and normalize tool stats for consistent schema across all entries

View file

@ -174,6 +174,7 @@ AUTHOR_MAP = {
"1115117931@qq.com": "aaronagent",
"1506751656@qq.com": "hqhq1025",
"364939526@qq.com": "luyao618",
"hgk324@gmail.com": "houziershi",
"906014227@qq.com": "bingo906",
"aaronwong1999@icloud.com": "AaronWong1999",
"agents@kylefrench.dev": "DeployFaith",

View file

@ -12,7 +12,7 @@ import pytest
import sys
sys.path.insert(0, str(Path(__file__).parent.parent))
from batch_runner import BatchRunner
from batch_runner import BatchRunner, _process_batch_worker
@pytest.fixture
@ -157,3 +157,32 @@ class TestResumePreservesProgress:
assert checkpoint_data["completed_prompts"] == []
assert checkpoint_data["run_name"] == "test_run"
class TestBatchWorkerResumeBehavior:
def test_discarded_no_reasoning_prompts_are_marked_completed(self, tmp_path, monkeypatch):
batch_file = tmp_path / "batch_1.jsonl"
prompt_result = {
"success": True,
"trajectory": [{"role": "assistant", "content": "x"}],
"reasoning_stats": {"has_any_reasoning": False},
"tool_stats": {},
"metadata": {},
"completed": True,
"api_calls": 1,
"toolsets_used": [],
}
monkeypatch.setattr("batch_runner._process_single_prompt", lambda *args, **kwargs: prompt_result)
result = _process_batch_worker((
1,
[(0, {"prompt": "hi"})],
tmp_path,
set(),
{"verbose": False},
))
assert result["discarded_no_reasoning"] == 1
assert result["completed_prompts"] == [0]
assert not batch_file.exists() or batch_file.read_text() == ""