mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
feat: centralized logging, instrumentation, hermes logs CLI, gateway noise fix (#5430)
Adds comprehensive logging infrastructure to Hermes Agent across 4 phases: **Phase 1 — Centralized logging** - New hermes_logging.py with idempotent setup_logging() used by CLI, gateway, and cron - agent.log (INFO+) and errors.log (WARNING+) with RotatingFileHandler + RedactingFormatter - config.yaml logging: section (level, max_size_mb, backup_count) - All entry points wired (cli.py, main.py, gateway/run.py, run_agent.py) - Fixed debug_helpers.py writing to ./logs/ instead of ~/.hermes/logs/ **Phase 2 — Event instrumentation** - API calls: model, provider, tokens, latency, cache hit % - Tool execution: name, duration, result size (both sequential + concurrent) - Session lifecycle: turn start (session/model/provider/platform), compression (before/after) - Credential pool: rotation events, exhaustion tracking **Phase 3 — hermes logs CLI command** - hermes logs / hermes logs -f / hermes logs errors / hermes logs gateway - --level, --session, --since filters - hermes logs list (file sizes + ages) **Phase 4 — Gateway bug fix + noise reduction** - fix: _async_flush_memories() called with wrong arg count — sessions never flushed - Batched session expiry logs: 6 lines/cycle → 2 summary lines - Added inbound message + response time logging 75 new tests, zero regressions on the full suite.
This commit is contained in:
parent
89db3aeb2c
commit
9c96f669a1
11 changed files with 1399 additions and 100 deletions
|
|
@ -660,6 +660,7 @@ class CredentialPool:
|
|||
available = self._available_entries(clear_expired=True, refresh=True)
|
||||
if not available:
|
||||
self._current_id = None
|
||||
logger.info("credential pool: no available entries (all exhausted or empty)")
|
||||
return None
|
||||
|
||||
if self._strategy == STRATEGY_RANDOM:
|
||||
|
|
@ -702,9 +703,18 @@ class CredentialPool:
|
|||
entry = self.current() or self._select_unlocked()
|
||||
if entry is None:
|
||||
return None
|
||||
_label = entry.label or entry.id[:8]
|
||||
logger.info(
|
||||
"credential pool: marking %s exhausted (status=%s), rotating",
|
||||
_label, status_code,
|
||||
)
|
||||
self._mark_exhausted(entry, status_code, error_context)
|
||||
self._current_id = None
|
||||
return self._select_unlocked()
|
||||
next_entry = self._select_unlocked()
|
||||
if next_entry:
|
||||
_next_label = next_entry.label or next_entry.id[:8]
|
||||
logger.info("credential pool: rotated to %s", _next_label)
|
||||
return next_entry
|
||||
|
||||
def try_refresh_current(self) -> Optional[PooledCredential]:
|
||||
with self._lock:
|
||||
|
|
|
|||
8
cli.py
8
cli.py
|
|
@ -453,6 +453,14 @@ def load_cli_config() -> Dict[str, Any]:
|
|||
# Load configuration at module startup
|
||||
CLI_CONFIG = load_cli_config()
|
||||
|
||||
# Initialize centralized logging early — agent.log + errors.log in ~/.hermes/logs/.
|
||||
# This ensures CLI sessions produce a log trail even before AIAgent is instantiated.
|
||||
try:
|
||||
from hermes_logging import setup_logging
|
||||
setup_logging(mode="cli")
|
||||
except Exception:
|
||||
pass # Logging setup is best-effort — don't crash the CLI
|
||||
|
||||
# Validate config structure early — print warnings before user hits cryptic errors
|
||||
try:
|
||||
from hermes_cli.config import print_config_warnings
|
||||
|
|
|
|||
107
gateway/run.py
107
gateway/run.py
|
|
@ -25,7 +25,6 @@ import tempfile
|
|||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Dict, Optional, Any, List
|
||||
|
|
@ -1283,18 +1282,34 @@ class GatewayRunner:
|
|||
while self._running:
|
||||
try:
|
||||
self.session_store._ensure_loaded()
|
||||
# Collect expired sessions first, then log a single summary.
|
||||
_expired_entries = []
|
||||
for key, entry in list(self.session_store._entries.items()):
|
||||
if entry.memory_flushed:
|
||||
continue # already flushed this session (persisted to disk)
|
||||
continue
|
||||
if not self.session_store._is_session_expired(entry):
|
||||
continue # session still active
|
||||
# Session has expired — flush memories in the background
|
||||
logger.info(
|
||||
"Session %s expired (key=%s), flushing memories proactively",
|
||||
entry.session_id, key,
|
||||
continue
|
||||
_expired_entries.append((key, entry))
|
||||
|
||||
if _expired_entries:
|
||||
# Extract platform names from session keys for a compact summary.
|
||||
# Keys look like "agent:main:telegram:dm:12345" — platform is field [2].
|
||||
_platforms: dict[str, int] = {}
|
||||
for _k, _e in _expired_entries:
|
||||
_parts = _k.split(":")
|
||||
_plat = _parts[2] if len(_parts) > 2 else "unknown"
|
||||
_platforms[_plat] = _platforms.get(_plat, 0) + 1
|
||||
_plat_summary = ", ".join(
|
||||
f"{p}:{c}" for p, c in sorted(_platforms.items())
|
||||
)
|
||||
logger.info(
|
||||
"Session expiry: %d sessions to flush (%s)",
|
||||
len(_expired_entries), _plat_summary,
|
||||
)
|
||||
|
||||
for key, entry in _expired_entries:
|
||||
try:
|
||||
await self._async_flush_memories(entry.session_id, key)
|
||||
await self._async_flush_memories(entry.session_id)
|
||||
# Shut down memory provider on the cached agent
|
||||
cached_agent = self._running_agents.get(key)
|
||||
if cached_agent and cached_agent is not _AGENT_PENDING_SENTINEL:
|
||||
|
|
@ -1308,8 +1323,8 @@ class GatewayRunner:
|
|||
with self.session_store._lock:
|
||||
entry.memory_flushed = True
|
||||
self.session_store._save()
|
||||
logger.info(
|
||||
"Pre-reset memory flush completed for session %s",
|
||||
logger.debug(
|
||||
"Memory flush completed for session %s",
|
||||
entry.session_id,
|
||||
)
|
||||
_flush_failures.pop(entry.session_id, None)
|
||||
|
|
@ -1318,7 +1333,7 @@ class GatewayRunner:
|
|||
_flush_failures[entry.session_id] = failures
|
||||
if failures >= _MAX_FLUSH_RETRIES:
|
||||
logger.warning(
|
||||
"Proactive memory flush gave up after %d attempts for %s: %s. "
|
||||
"Memory flush gave up after %d attempts for %s: %s. "
|
||||
"Marking as flushed to prevent infinite retry loop.",
|
||||
failures, entry.session_id, e,
|
||||
)
|
||||
|
|
@ -1328,9 +1343,24 @@ class GatewayRunner:
|
|||
_flush_failures.pop(entry.session_id, None)
|
||||
else:
|
||||
logger.debug(
|
||||
"Proactive memory flush failed (%d/%d) for %s: %s",
|
||||
"Memory flush failed (%d/%d) for %s: %s",
|
||||
failures, _MAX_FLUSH_RETRIES, entry.session_id, e,
|
||||
)
|
||||
|
||||
if _expired_entries:
|
||||
_flushed = sum(
|
||||
1 for _, e in _expired_entries if e.memory_flushed
|
||||
)
|
||||
_failed = len(_expired_entries) - _flushed
|
||||
if _failed:
|
||||
logger.info(
|
||||
"Session expiry done: %d flushed, %d pending retry",
|
||||
_flushed, _failed,
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"Session expiry done: %d flushed", _flushed,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Session expiry watcher error: %s", e)
|
||||
# Sleep in small increments so we can stop quickly
|
||||
|
|
@ -2260,6 +2290,14 @@ class GatewayRunner:
|
|||
|
||||
async def _handle_message_with_agent(self, event, source, _quick_key: str):
|
||||
"""Inner handler that runs under the _running_agents sentinel guard."""
|
||||
_msg_start_time = time.time()
|
||||
_platform_name = source.platform.value if hasattr(source.platform, "value") else str(source.platform)
|
||||
_msg_preview = (event.text or "")[:80].replace("\n", " ")
|
||||
logger.info(
|
||||
"inbound message: platform=%s user=%s chat=%s msg=%r",
|
||||
_platform_name, source.user_name or source.user_id or "unknown",
|
||||
source.chat_id or "unknown", _msg_preview,
|
||||
)
|
||||
|
||||
# Get or create session
|
||||
session_entry = self.session_store.get_or_create_session(source)
|
||||
|
|
@ -2872,6 +2910,14 @@ class GatewayRunner:
|
|||
|
||||
response = agent_result.get("final_response") or ""
|
||||
agent_messages = agent_result.get("messages", [])
|
||||
_response_time = time.time() - _msg_start_time
|
||||
_api_calls = agent_result.get("api_calls", 0)
|
||||
_resp_len = len(response)
|
||||
logger.info(
|
||||
"response ready: platform=%s chat=%s time=%.1fs api_calls=%d response=%d chars",
|
||||
_platform_name, source.chat_id or "unknown",
|
||||
_response_time, _api_calls, _resp_len,
|
||||
)
|
||||
|
||||
# Surface error details when the agent failed silently (final_response=None)
|
||||
if not response and agent_result.get("failed"):
|
||||
|
|
@ -7194,18 +7240,23 @@ async def start_gateway(config: Optional[GatewayConfig] = None, replace: bool =
|
|||
except Exception:
|
||||
pass
|
||||
|
||||
# Configure rotating file log so gateway output is persisted for debugging
|
||||
log_dir = _hermes_home / 'logs'
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
file_handler = RotatingFileHandler(
|
||||
log_dir / 'gateway.log',
|
||||
maxBytes=5 * 1024 * 1024,
|
||||
backupCount=3,
|
||||
)
|
||||
# Centralized logging — agent.log (INFO+) and errors.log (WARNING+).
|
||||
# Idempotent, so repeated calls from AIAgent.__init__ won't duplicate.
|
||||
from hermes_logging import setup_logging
|
||||
log_dir = setup_logging(hermes_home=_hermes_home, mode="gateway")
|
||||
|
||||
# Gateway-specific rotating log — captures all gateway-level messages
|
||||
# (session management, platform adapters, slash commands, etc.).
|
||||
from agent.redact import RedactingFormatter
|
||||
file_handler.setFormatter(RedactingFormatter('%(asctime)s %(levelname)s %(name)s: %(message)s'))
|
||||
logging.getLogger().addHandler(file_handler)
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
from hermes_logging import _add_rotating_handler
|
||||
_add_rotating_handler(
|
||||
logging.getLogger(),
|
||||
log_dir / 'gateway.log',
|
||||
level=logging.INFO,
|
||||
max_bytes=5 * 1024 * 1024,
|
||||
backup_count=3,
|
||||
formatter=RedactingFormatter('%(asctime)s %(levelname)s %(name)s: %(message)s'),
|
||||
)
|
||||
|
||||
# Optional stderr handler — level driven by -v/-q flags on the CLI.
|
||||
# verbosity=None (-q/--quiet): no stderr output
|
||||
|
|
@ -7222,16 +7273,6 @@ async def start_gateway(config: Optional[GatewayConfig] = None, replace: bool =
|
|||
if _stderr_level < logging.getLogger().level:
|
||||
logging.getLogger().setLevel(_stderr_level)
|
||||
|
||||
# Separate errors-only log for easy debugging
|
||||
error_handler = RotatingFileHandler(
|
||||
log_dir / 'errors.log',
|
||||
maxBytes=2 * 1024 * 1024,
|
||||
backupCount=2,
|
||||
)
|
||||
error_handler.setLevel(logging.WARNING)
|
||||
error_handler.setFormatter(RedactingFormatter('%(asctime)s %(levelname)s %(name)s: %(message)s'))
|
||||
logging.getLogger().addHandler(error_handler)
|
||||
|
||||
runner = GatewayRunner(config)
|
||||
|
||||
# Set up signal handlers
|
||||
|
|
|
|||
|
|
@ -537,6 +537,14 @@ DEFAULT_CONFIG = {
|
|||
"wrap_response": True,
|
||||
},
|
||||
|
||||
# Logging — controls file logging to ~/.hermes/logs/.
|
||||
# agent.log captures INFO+ (all agent activity); errors.log captures WARNING+.
|
||||
"logging": {
|
||||
"level": "INFO", # Minimum level for agent.log: DEBUG, INFO, WARNING
|
||||
"max_size_mb": 5, # Max size per log file before rotation
|
||||
"backup_count": 3, # Number of rotated backup files to keep
|
||||
},
|
||||
|
||||
# Config schema version - bump this when adding new required fields
|
||||
"_config_version": 12,
|
||||
}
|
||||
|
|
|
|||
336
hermes_cli/logs.py
Normal file
336
hermes_cli/logs.py
Normal file
|
|
@ -0,0 +1,336 @@
|
|||
"""``hermes logs`` — view and filter Hermes log files.
|
||||
|
||||
Supports tailing, following, session filtering, level filtering, and
|
||||
relative time ranges. All log files live under ``~/.hermes/logs/``.
|
||||
|
||||
Usage examples::
|
||||
|
||||
hermes logs # last 50 lines of agent.log
|
||||
hermes logs -f # follow agent.log in real time
|
||||
hermes logs errors # last 50 lines of errors.log
|
||||
hermes logs gateway -n 100 # last 100 lines of gateway.log
|
||||
hermes logs --level WARNING # only WARNING+ lines
|
||||
hermes logs --session abc123 # filter by session ID substring
|
||||
hermes logs --since 1h # lines from the last hour
|
||||
hermes logs --since 30m -f # follow, starting 30 min ago
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from hermes_constants import get_hermes_home, display_hermes_home
|
||||
|
||||
# Known log files (name → filename)
|
||||
LOG_FILES = {
|
||||
"agent": "agent.log",
|
||||
"errors": "errors.log",
|
||||
"gateway": "gateway.log",
|
||||
}
|
||||
|
||||
# Log line timestamp regex — matches "2026-04-05 22:35:00,123" or
|
||||
# "2026-04-05 22:35:00" at the start of a line.
|
||||
_TS_RE = re.compile(r"^(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})")
|
||||
|
||||
# Level extraction — matches " INFO ", " WARNING ", " ERROR ", " DEBUG ", " CRITICAL "
|
||||
_LEVEL_RE = re.compile(r"\s(DEBUG|INFO|WARNING|ERROR|CRITICAL)\s")
|
||||
|
||||
# Level ordering for >= filtering
|
||||
_LEVEL_ORDER = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3, "CRITICAL": 4}
|
||||
|
||||
|
||||
def _parse_since(since_str: str) -> Optional[datetime]:
|
||||
"""Parse a relative time string like '1h', '30m', '2d' into a datetime cutoff.
|
||||
|
||||
Returns None if the string can't be parsed.
|
||||
"""
|
||||
since_str = since_str.strip().lower()
|
||||
match = re.match(r"^(\d+)\s*([smhd])$", since_str)
|
||||
if not match:
|
||||
return None
|
||||
value = int(match.group(1))
|
||||
unit = match.group(2)
|
||||
delta = {
|
||||
"s": timedelta(seconds=value),
|
||||
"m": timedelta(minutes=value),
|
||||
"h": timedelta(hours=value),
|
||||
"d": timedelta(days=value),
|
||||
}[unit]
|
||||
return datetime.now() - delta
|
||||
|
||||
|
||||
def _parse_line_timestamp(line: str) -> Optional[datetime]:
|
||||
"""Extract timestamp from a log line. Returns None if not parseable."""
|
||||
m = _TS_RE.match(line)
|
||||
if not m:
|
||||
return None
|
||||
try:
|
||||
return datetime.strptime(m.group(1), "%Y-%m-%d %H:%M:%S")
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _extract_level(line: str) -> Optional[str]:
|
||||
"""Extract the log level from a line."""
|
||||
m = _LEVEL_RE.search(line)
|
||||
return m.group(1) if m else None
|
||||
|
||||
|
||||
def _matches_filters(
|
||||
line: str,
|
||||
*,
|
||||
min_level: Optional[str] = None,
|
||||
session_filter: Optional[str] = None,
|
||||
since: Optional[datetime] = None,
|
||||
) -> bool:
|
||||
"""Check if a log line passes all active filters."""
|
||||
if since is not None:
|
||||
ts = _parse_line_timestamp(line)
|
||||
if ts is not None and ts < since:
|
||||
return False
|
||||
|
||||
if min_level is not None:
|
||||
level = _extract_level(line)
|
||||
if level is not None:
|
||||
if _LEVEL_ORDER.get(level, 0) < _LEVEL_ORDER.get(min_level, 0):
|
||||
return False
|
||||
|
||||
if session_filter is not None:
|
||||
if session_filter not in line:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def tail_log(
|
||||
log_name: str = "agent",
|
||||
*,
|
||||
num_lines: int = 50,
|
||||
follow: bool = False,
|
||||
level: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
since: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Read and display log lines, optionally following in real time.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
log_name
|
||||
Which log to read: ``"agent"``, ``"errors"``, ``"gateway"``.
|
||||
num_lines
|
||||
Number of recent lines to show (before follow starts).
|
||||
follow
|
||||
If True, keep watching for new lines (Ctrl+C to stop).
|
||||
level
|
||||
Minimum log level to show (e.g. ``"WARNING"``).
|
||||
session
|
||||
Session ID substring to filter on.
|
||||
since
|
||||
Relative time string (e.g. ``"1h"``, ``"30m"``).
|
||||
"""
|
||||
filename = LOG_FILES.get(log_name)
|
||||
if filename is None:
|
||||
print(f"Unknown log: {log_name!r}. Available: {', '.join(sorted(LOG_FILES))}")
|
||||
sys.exit(1)
|
||||
|
||||
log_path = get_hermes_home() / "logs" / filename
|
||||
if not log_path.exists():
|
||||
print(f"Log file not found: {log_path}")
|
||||
print(f"(Logs are created when Hermes runs — try 'hermes chat' first)")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse --since into a datetime cutoff
|
||||
since_dt = None
|
||||
if since:
|
||||
since_dt = _parse_since(since)
|
||||
if since_dt is None:
|
||||
print(f"Invalid --since value: {since!r}. Use format like '1h', '30m', '2d'.")
|
||||
sys.exit(1)
|
||||
|
||||
min_level = level.upper() if level else None
|
||||
if min_level and min_level not in _LEVEL_ORDER:
|
||||
print(f"Invalid --level: {level!r}. Use DEBUG, INFO, WARNING, ERROR, or CRITICAL.")
|
||||
sys.exit(1)
|
||||
|
||||
has_filters = min_level is not None or session is not None or since_dt is not None
|
||||
|
||||
# Read and display the tail
|
||||
try:
|
||||
lines = _read_tail(log_path, num_lines, has_filters=has_filters,
|
||||
min_level=min_level, session_filter=session,
|
||||
since=since_dt)
|
||||
except PermissionError:
|
||||
print(f"Permission denied: {log_path}")
|
||||
sys.exit(1)
|
||||
|
||||
# Print header
|
||||
filter_parts = []
|
||||
if min_level:
|
||||
filter_parts.append(f"level>={min_level}")
|
||||
if session:
|
||||
filter_parts.append(f"session={session}")
|
||||
if since:
|
||||
filter_parts.append(f"since={since}")
|
||||
filter_desc = f" [{', '.join(filter_parts)}]" if filter_parts else ""
|
||||
|
||||
if follow:
|
||||
print(f"--- {display_hermes_home()}/logs/{filename}{filter_desc} (Ctrl+C to stop) ---")
|
||||
else:
|
||||
print(f"--- {display_hermes_home()}/logs/{filename}{filter_desc} (last {num_lines}) ---")
|
||||
|
||||
for line in lines:
|
||||
print(line, end="")
|
||||
|
||||
if not follow:
|
||||
return
|
||||
|
||||
# Follow mode — poll for new content
|
||||
try:
|
||||
_follow_log(log_path, min_level=min_level, session_filter=session,
|
||||
since=since_dt)
|
||||
except KeyboardInterrupt:
|
||||
print("\n--- stopped ---")
|
||||
|
||||
|
||||
def _read_tail(
|
||||
path: Path,
|
||||
num_lines: int,
|
||||
*,
|
||||
has_filters: bool = False,
|
||||
min_level: Optional[str] = None,
|
||||
session_filter: Optional[str] = None,
|
||||
since: Optional[datetime] = None,
|
||||
) -> list:
|
||||
"""Read the last *num_lines* matching lines from a log file.
|
||||
|
||||
When filters are active, we read more raw lines to find enough matches.
|
||||
"""
|
||||
if has_filters:
|
||||
# Read more lines to ensure we get enough after filtering.
|
||||
# For large files, read last 10K lines and filter down.
|
||||
raw_lines = _read_last_n_lines(path, max(num_lines * 20, 2000))
|
||||
filtered = [
|
||||
l for l in raw_lines
|
||||
if _matches_filters(l, min_level=min_level,
|
||||
session_filter=session_filter, since=since)
|
||||
]
|
||||
return filtered[-num_lines:]
|
||||
else:
|
||||
return _read_last_n_lines(path, num_lines)
|
||||
|
||||
|
||||
def _read_last_n_lines(path: Path, n: int) -> list:
|
||||
"""Efficiently read the last N lines from a file.
|
||||
|
||||
For files under 1MB, reads the whole file (fast, simple).
|
||||
For larger files, reads chunks from the end.
|
||||
"""
|
||||
try:
|
||||
size = path.stat().st_size
|
||||
if size == 0:
|
||||
return []
|
||||
|
||||
# For files up to 1MB, just read the whole thing — simple and correct.
|
||||
if size <= 1_048_576:
|
||||
with open(path, "r", encoding="utf-8", errors="replace") as f:
|
||||
all_lines = f.readlines()
|
||||
return all_lines[-n:]
|
||||
|
||||
# For large files, read chunks from the end.
|
||||
with open(path, "rb") as f:
|
||||
chunk_size = 8192
|
||||
lines = []
|
||||
pos = size
|
||||
|
||||
while pos > 0 and len(lines) <= n + 1:
|
||||
read_size = min(chunk_size, pos)
|
||||
pos -= read_size
|
||||
f.seek(pos)
|
||||
chunk = f.read(read_size)
|
||||
chunk_lines = chunk.split(b"\n")
|
||||
if lines:
|
||||
# Merge the last partial line of the new chunk with the
|
||||
# first partial line of what we already have.
|
||||
lines[0] = chunk_lines[-1] + lines[0]
|
||||
lines = chunk_lines[:-1] + lines
|
||||
else:
|
||||
lines = chunk_lines
|
||||
chunk_size = min(chunk_size * 2, 65536)
|
||||
|
||||
# Decode and return last N non-empty lines.
|
||||
decoded = []
|
||||
for raw in lines:
|
||||
if not raw.strip():
|
||||
continue
|
||||
try:
|
||||
decoded.append(raw.decode("utf-8", errors="replace") + "\n")
|
||||
except Exception:
|
||||
decoded.append(raw.decode("latin-1") + "\n")
|
||||
return decoded[-n:]
|
||||
|
||||
except Exception:
|
||||
# Fallback: read entire file
|
||||
with open(path, "r", encoding="utf-8", errors="replace") as f:
|
||||
all_lines = f.readlines()
|
||||
return all_lines[-n:]
|
||||
|
||||
|
||||
def _follow_log(
|
||||
path: Path,
|
||||
*,
|
||||
min_level: Optional[str] = None,
|
||||
session_filter: Optional[str] = None,
|
||||
since: Optional[datetime] = None,
|
||||
) -> None:
|
||||
"""Poll a log file for new content and print matching lines."""
|
||||
with open(path, "r", encoding="utf-8", errors="replace") as f:
|
||||
# Seek to end
|
||||
f.seek(0, 2)
|
||||
while True:
|
||||
line = f.readline()
|
||||
if line:
|
||||
if _matches_filters(line, min_level=min_level,
|
||||
session_filter=session_filter, since=since):
|
||||
print(line, end="")
|
||||
sys.stdout.flush()
|
||||
else:
|
||||
time.sleep(0.3)
|
||||
|
||||
|
||||
def list_logs() -> None:
|
||||
"""Print available log files with sizes."""
|
||||
log_dir = get_hermes_home() / "logs"
|
||||
if not log_dir.exists():
|
||||
print(f"No logs directory at {display_hermes_home()}/logs/")
|
||||
return
|
||||
|
||||
print(f"Log files in {display_hermes_home()}/logs/:\n")
|
||||
found = False
|
||||
for entry in sorted(log_dir.iterdir()):
|
||||
if entry.is_file() and entry.suffix == ".log":
|
||||
size = entry.stat().st_size
|
||||
mtime = datetime.fromtimestamp(entry.stat().st_mtime)
|
||||
if size < 1024:
|
||||
size_str = f"{size}B"
|
||||
elif size < 1024 * 1024:
|
||||
size_str = f"{size / 1024:.1f}KB"
|
||||
else:
|
||||
size_str = f"{size / (1024 * 1024):.1f}MB"
|
||||
age = datetime.now() - mtime
|
||||
if age.total_seconds() < 60:
|
||||
age_str = "just now"
|
||||
elif age.total_seconds() < 3600:
|
||||
age_str = f"{int(age.total_seconds() / 60)}m ago"
|
||||
elif age.total_seconds() < 86400:
|
||||
age_str = f"{int(age.total_seconds() / 3600)}h ago"
|
||||
else:
|
||||
age_str = mtime.strftime("%Y-%m-%d")
|
||||
print(f" {entry.name:<25} {size_str:>8} {age_str}")
|
||||
found = True
|
||||
|
||||
if not found:
|
||||
print(" (no log files yet — run 'hermes chat' to generate logs)")
|
||||
|
|
@ -142,6 +142,13 @@ from hermes_cli.config import get_hermes_home
|
|||
from hermes_cli.env_loader import load_hermes_dotenv
|
||||
load_hermes_dotenv(project_env=PROJECT_ROOT / '.env')
|
||||
|
||||
# Initialize centralized file logging early — all `hermes` subcommands
|
||||
# (chat, setup, gateway, config, etc.) write to agent.log + errors.log.
|
||||
try:
|
||||
from hermes_logging import setup_logging as _setup_logging
|
||||
_setup_logging(mode="cli")
|
||||
except Exception:
|
||||
pass # best-effort — don't crash the CLI if logging setup fails
|
||||
|
||||
import logging
|
||||
import time as _time
|
||||
|
|
@ -4003,6 +4010,26 @@ def cmd_completion(args):
|
|||
print(generate_bash_completion())
|
||||
|
||||
|
||||
def cmd_logs(args):
|
||||
"""View and filter Hermes log files."""
|
||||
from hermes_cli.logs import tail_log, list_logs
|
||||
|
||||
log_name = getattr(args, "log_name", "agent") or "agent"
|
||||
|
||||
if log_name == "list":
|
||||
list_logs()
|
||||
return
|
||||
|
||||
tail_log(
|
||||
log_name,
|
||||
num_lines=getattr(args, "lines", 50),
|
||||
follow=getattr(args, "follow", False),
|
||||
level=getattr(args, "level", None),
|
||||
session=getattr(args, "session", None),
|
||||
since=getattr(args, "since", None),
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for hermes CLI."""
|
||||
parser = argparse.ArgumentParser(
|
||||
|
|
@ -4033,6 +4060,10 @@ Examples:
|
|||
hermes sessions list List past sessions
|
||||
hermes sessions browse Interactive session picker
|
||||
hermes sessions rename ID T Rename/title a session
|
||||
hermes logs View agent.log (last 50 lines)
|
||||
hermes logs -f Follow agent.log in real time
|
||||
hermes logs errors View errors.log
|
||||
hermes logs --since 1h Lines from the last hour
|
||||
hermes update Update to latest version
|
||||
|
||||
For more help on a command:
|
||||
|
|
@ -5356,6 +5387,53 @@ For more help on a command:
|
|||
)
|
||||
completion_parser.set_defaults(func=cmd_completion)
|
||||
|
||||
# =========================================================================
|
||||
# logs command
|
||||
# =========================================================================
|
||||
logs_parser = subparsers.add_parser(
|
||||
"logs",
|
||||
help="View and filter Hermes log files",
|
||||
description="View, tail, and filter agent.log / errors.log / gateway.log",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""\
|
||||
Examples:
|
||||
hermes logs Show last 50 lines of agent.log
|
||||
hermes logs -f Follow agent.log in real time
|
||||
hermes logs errors Show last 50 lines of errors.log
|
||||
hermes logs gateway -n 100 Show last 100 lines of gateway.log
|
||||
hermes logs --level WARNING Only show WARNING and above
|
||||
hermes logs --session abc123 Filter by session ID
|
||||
hermes logs --since 1h Lines from the last hour
|
||||
hermes logs --since 30m -f Follow, starting from 30 min ago
|
||||
hermes logs list List available log files with sizes
|
||||
""",
|
||||
)
|
||||
logs_parser.add_argument(
|
||||
"log_name", nargs="?", default="agent",
|
||||
help="Log to view: agent (default), errors, gateway, or 'list' to show available files",
|
||||
)
|
||||
logs_parser.add_argument(
|
||||
"-n", "--lines", type=int, default=50,
|
||||
help="Number of lines to show (default: 50)",
|
||||
)
|
||||
logs_parser.add_argument(
|
||||
"-f", "--follow", action="store_true",
|
||||
help="Follow the log in real time (like tail -f)",
|
||||
)
|
||||
logs_parser.add_argument(
|
||||
"--level", metavar="LEVEL",
|
||||
help="Minimum log level to show (DEBUG, INFO, WARNING, ERROR)",
|
||||
)
|
||||
logs_parser.add_argument(
|
||||
"--session", metavar="ID",
|
||||
help="Filter lines containing this session ID substring",
|
||||
)
|
||||
logs_parser.add_argument(
|
||||
"--since", metavar="TIME",
|
||||
help="Show lines since TIME ago (e.g. 1h, 30m, 2d)",
|
||||
)
|
||||
logs_parser.set_defaults(func=cmd_logs)
|
||||
|
||||
# =========================================================================
|
||||
# Parse and execute
|
||||
# =========================================================================
|
||||
|
|
|
|||
230
hermes_logging.py
Normal file
230
hermes_logging.py
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
"""Centralized logging setup for Hermes Agent.
|
||||
|
||||
Provides a single ``setup_logging()`` entry point that both the CLI and
|
||||
gateway call early in their startup path. All log files live under
|
||||
``~/.hermes/logs/`` (profile-aware via ``get_hermes_home()``).
|
||||
|
||||
Log files produced:
|
||||
agent.log — INFO+, all agent/tool/session activity (the main log)
|
||||
errors.log — WARNING+, errors and warnings only (quick triage)
|
||||
|
||||
Both files use ``RotatingFileHandler`` with ``RedactingFormatter`` so
|
||||
secrets are never written to disk.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
# Sentinel to track whether setup_logging() has already run. The function
|
||||
# is idempotent — calling it twice is safe but the second call is a no-op
|
||||
# unless ``force=True``.
|
||||
_logging_initialized = False
|
||||
|
||||
# Default log format — includes timestamp, level, logger name, and message.
|
||||
_LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s: %(message)s"
|
||||
_LOG_FORMAT_VERBOSE = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
# Third-party loggers that are noisy at DEBUG/INFO level.
|
||||
_NOISY_LOGGERS = (
|
||||
"openai",
|
||||
"openai._base_client",
|
||||
"httpx",
|
||||
"httpcore",
|
||||
"asyncio",
|
||||
"hpack",
|
||||
"hpack.hpack",
|
||||
"grpc",
|
||||
"modal",
|
||||
"urllib3",
|
||||
"urllib3.connectionpool",
|
||||
"websockets",
|
||||
"charset_normalizer",
|
||||
"markdown_it",
|
||||
)
|
||||
|
||||
|
||||
def setup_logging(
|
||||
*,
|
||||
hermes_home: Optional[Path] = None,
|
||||
log_level: Optional[str] = None,
|
||||
max_size_mb: Optional[int] = None,
|
||||
backup_count: Optional[int] = None,
|
||||
mode: Optional[str] = None,
|
||||
force: bool = False,
|
||||
) -> Path:
|
||||
"""Configure the Hermes logging subsystem.
|
||||
|
||||
Safe to call multiple times — the second call is a no-op unless
|
||||
*force* is ``True``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hermes_home
|
||||
Override for the Hermes home directory. Falls back to
|
||||
``get_hermes_home()`` (profile-aware).
|
||||
log_level
|
||||
Minimum level for the ``agent.log`` file handler. Accepts any
|
||||
standard Python level name (``"DEBUG"``, ``"INFO"``, ``"WARNING"``).
|
||||
Defaults to ``"INFO"`` or the value from config.yaml ``logging.level``.
|
||||
max_size_mb
|
||||
Maximum size of each log file in megabytes before rotation.
|
||||
Defaults to 5 or the value from config.yaml ``logging.max_size_mb``.
|
||||
backup_count
|
||||
Number of rotated backup files to keep.
|
||||
Defaults to 3 or the value from config.yaml ``logging.backup_count``.
|
||||
mode
|
||||
Hint for the caller context: ``"cli"``, ``"gateway"``, ``"cron"``.
|
||||
Currently used only for log format tuning (gateway includes PID).
|
||||
force
|
||||
Re-run setup even if it has already been called.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path
|
||||
The ``logs/`` directory where files are written.
|
||||
"""
|
||||
global _logging_initialized
|
||||
if _logging_initialized and not force:
|
||||
home = hermes_home or get_hermes_home()
|
||||
return home / "logs"
|
||||
|
||||
home = hermes_home or get_hermes_home()
|
||||
log_dir = home / "logs"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Read config defaults (best-effort — config may not be loaded yet).
|
||||
cfg_level, cfg_max_size, cfg_backup = _read_logging_config()
|
||||
|
||||
level_name = (log_level or cfg_level or "INFO").upper()
|
||||
level = getattr(logging, level_name, logging.INFO)
|
||||
max_bytes = (max_size_mb or cfg_max_size or 5) * 1024 * 1024
|
||||
backups = backup_count or cfg_backup or 3
|
||||
|
||||
# Lazy import to avoid circular dependency at module load time.
|
||||
from agent.redact import RedactingFormatter
|
||||
|
||||
root = logging.getLogger()
|
||||
|
||||
# --- agent.log (INFO+) — the main activity log -------------------------
|
||||
_add_rotating_handler(
|
||||
root,
|
||||
log_dir / "agent.log",
|
||||
level=level,
|
||||
max_bytes=max_bytes,
|
||||
backup_count=backups,
|
||||
formatter=RedactingFormatter(_LOG_FORMAT),
|
||||
)
|
||||
|
||||
# --- errors.log (WARNING+) — quick triage log --------------------------
|
||||
_add_rotating_handler(
|
||||
root,
|
||||
log_dir / "errors.log",
|
||||
level=logging.WARNING,
|
||||
max_bytes=2 * 1024 * 1024,
|
||||
backup_count=2,
|
||||
formatter=RedactingFormatter(_LOG_FORMAT),
|
||||
)
|
||||
|
||||
# Ensure root logger level is low enough for the handlers to fire.
|
||||
if root.level == logging.NOTSET or root.level > level:
|
||||
root.setLevel(level)
|
||||
|
||||
# Suppress noisy third-party loggers.
|
||||
for name in _NOISY_LOGGERS:
|
||||
logging.getLogger(name).setLevel(logging.WARNING)
|
||||
|
||||
_logging_initialized = True
|
||||
return log_dir
|
||||
|
||||
|
||||
def setup_verbose_logging() -> None:
|
||||
"""Enable DEBUG-level console logging for ``--verbose`` / ``-v`` mode.
|
||||
|
||||
Called by ``AIAgent.__init__()`` when ``verbose_logging=True``.
|
||||
"""
|
||||
from agent.redact import RedactingFormatter
|
||||
|
||||
root = logging.getLogger()
|
||||
|
||||
# Avoid adding duplicate stream handlers.
|
||||
for h in root.handlers:
|
||||
if isinstance(h, logging.StreamHandler) and not isinstance(h, RotatingFileHandler):
|
||||
if getattr(h, "_hermes_verbose", False):
|
||||
return
|
||||
|
||||
handler = logging.StreamHandler()
|
||||
handler.setLevel(logging.DEBUG)
|
||||
handler.setFormatter(RedactingFormatter(_LOG_FORMAT_VERBOSE, datefmt="%H:%M:%S"))
|
||||
handler._hermes_verbose = True # type: ignore[attr-defined]
|
||||
root.addHandler(handler)
|
||||
|
||||
# Lower root logger level so DEBUG records reach all handlers.
|
||||
if root.level > logging.DEBUG:
|
||||
root.setLevel(logging.DEBUG)
|
||||
|
||||
# Keep third-party libraries at WARNING to reduce noise.
|
||||
for name in _NOISY_LOGGERS:
|
||||
logging.getLogger(name).setLevel(logging.WARNING)
|
||||
# rex-deploy at INFO for sandbox status.
|
||||
logging.getLogger("rex-deploy").setLevel(logging.INFO)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _add_rotating_handler(
|
||||
logger: logging.Logger,
|
||||
path: Path,
|
||||
*,
|
||||
level: int,
|
||||
max_bytes: int,
|
||||
backup_count: int,
|
||||
formatter: logging.Formatter,
|
||||
) -> None:
|
||||
"""Add a ``RotatingFileHandler`` to *logger*, skipping if one already
|
||||
exists for the same resolved file path (idempotent).
|
||||
"""
|
||||
resolved = path.resolve()
|
||||
for existing in logger.handlers:
|
||||
if (
|
||||
isinstance(existing, RotatingFileHandler)
|
||||
and Path(getattr(existing, "baseFilename", "")).resolve() == resolved
|
||||
):
|
||||
return # already attached
|
||||
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
handler = RotatingFileHandler(
|
||||
str(path), maxBytes=max_bytes, backupCount=backup_count,
|
||||
)
|
||||
handler.setLevel(level)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def _read_logging_config():
|
||||
"""Best-effort read of ``logging.*`` from config.yaml.
|
||||
|
||||
Returns ``(level, max_size_mb, backup_count)`` — any may be ``None``.
|
||||
"""
|
||||
try:
|
||||
import yaml
|
||||
config_path = get_hermes_home() / "config.yaml"
|
||||
if config_path.exists():
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
cfg = yaml.safe_load(f) or {}
|
||||
log_cfg = cfg.get("logging", {})
|
||||
if isinstance(log_cfg, dict):
|
||||
return (
|
||||
log_cfg.get("level"),
|
||||
log_cfg.get("max_size_mb"),
|
||||
log_cfg.get("backup_count"),
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
return (None, None, None)
|
||||
112
run_agent.py
112
run_agent.py
|
|
@ -717,77 +717,23 @@ class AIAgent:
|
|||
self._current_tool: str | None = None
|
||||
self._api_call_count: int = 0
|
||||
|
||||
# Persistent error log -- always writes WARNING+ to ~/.hermes/logs/errors.log
|
||||
# so tool failures, API errors, etc. are inspectable after the fact.
|
||||
# In gateway mode, each incoming message creates a new AIAgent instance,
|
||||
# while the root logger is process-global. Re-adding the same errors.log
|
||||
# handler would cause each warning/error line to be written multiple times.
|
||||
from logging.handlers import RotatingFileHandler
|
||||
root_logger = logging.getLogger()
|
||||
error_log_dir = _hermes_home / "logs"
|
||||
error_log_path = error_log_dir / "errors.log"
|
||||
resolved_error_log_path = error_log_path.resolve()
|
||||
has_errors_log_handler = any(
|
||||
isinstance(handler, RotatingFileHandler)
|
||||
and Path(getattr(handler, "baseFilename", "")).resolve() == resolved_error_log_path
|
||||
for handler in root_logger.handlers
|
||||
)
|
||||
from agent.redact import RedactingFormatter
|
||||
if not has_errors_log_handler:
|
||||
error_log_dir.mkdir(parents=True, exist_ok=True)
|
||||
error_file_handler = RotatingFileHandler(
|
||||
error_log_path, maxBytes=2 * 1024 * 1024, backupCount=2,
|
||||
)
|
||||
error_file_handler.setLevel(logging.WARNING)
|
||||
error_file_handler.setFormatter(RedactingFormatter(
|
||||
'%(asctime)s %(levelname)s %(name)s: %(message)s',
|
||||
))
|
||||
root_logger.addHandler(error_file_handler)
|
||||
# Centralized logging — agent.log (INFO+) and errors.log (WARNING+)
|
||||
# both live under ~/.hermes/logs/. Idempotent, so gateway mode
|
||||
# (which creates a new AIAgent per message) won't duplicate handlers.
|
||||
from hermes_logging import setup_logging, setup_verbose_logging
|
||||
setup_logging(hermes_home=_hermes_home)
|
||||
|
||||
if self.verbose_logging:
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
datefmt='%H:%M:%S'
|
||||
)
|
||||
for handler in logging.getLogger().handlers:
|
||||
handler.setFormatter(RedactingFormatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
datefmt='%H:%M:%S',
|
||||
))
|
||||
# Keep third-party libraries at WARNING level to reduce noise
|
||||
# We have our own retry and error logging that's more informative
|
||||
logging.getLogger('openai').setLevel(logging.WARNING)
|
||||
logging.getLogger('openai._base_client').setLevel(logging.WARNING)
|
||||
logging.getLogger('httpx').setLevel(logging.WARNING)
|
||||
logging.getLogger('httpcore').setLevel(logging.WARNING)
|
||||
logging.getLogger('asyncio').setLevel(logging.WARNING)
|
||||
# Suppress Modal/gRPC related debug spam
|
||||
logging.getLogger('hpack').setLevel(logging.WARNING)
|
||||
logging.getLogger('hpack.hpack').setLevel(logging.WARNING)
|
||||
logging.getLogger('grpc').setLevel(logging.WARNING)
|
||||
logging.getLogger('modal').setLevel(logging.WARNING)
|
||||
logging.getLogger('rex-deploy').setLevel(logging.INFO) # Keep INFO for sandbox status
|
||||
setup_verbose_logging()
|
||||
logger.info("Verbose logging enabled (third-party library logs suppressed)")
|
||||
else:
|
||||
# Set logging to INFO level for important messages only
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||
datefmt='%H:%M:%S'
|
||||
)
|
||||
# Suppress noisy library logging
|
||||
logging.getLogger('openai').setLevel(logging.ERROR)
|
||||
logging.getLogger('openai._base_client').setLevel(logging.ERROR)
|
||||
logging.getLogger('httpx').setLevel(logging.ERROR)
|
||||
logging.getLogger('httpcore').setLevel(logging.ERROR)
|
||||
if self.quiet_mode:
|
||||
# In quiet mode (CLI default), suppress all tool/infra log
|
||||
# noise. The TUI has its own rich display for status; logger
|
||||
# INFO/WARNING messages just clutter it.
|
||||
# noise on the *console*. The TUI has its own rich display
|
||||
# for status; logger INFO/WARNING messages just clutter it.
|
||||
# File handlers (agent.log, errors.log) still capture everything.
|
||||
for quiet_logger in [
|
||||
'tools', # all tools.* (terminal, browser, web, file, etc.)
|
||||
|
||||
'run_agent', # agent runner internals
|
||||
'trajectory_compressor',
|
||||
'cron', # scheduler (only relevant in daemon mode)
|
||||
|
|
@ -5880,6 +5826,12 @@ class AIAgent:
|
|||
Returns:
|
||||
(compressed_messages, new_system_prompt) tuple
|
||||
"""
|
||||
_pre_msg_count = len(messages)
|
||||
logger.info(
|
||||
"context compression started: session=%s messages=%d tokens=~%s model=%s",
|
||||
self.session_id or "none", _pre_msg_count,
|
||||
f"{approx_tokens:,}" if approx_tokens else "unknown", self.model,
|
||||
)
|
||||
# Pre-compression memory flush: let the model save memories before they're lost
|
||||
self.flush_memories(messages, min_turns=0)
|
||||
|
||||
|
|
@ -5956,6 +5908,11 @@ class AIAgent:
|
|||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(
|
||||
"context compression done: session=%s messages=%d->%d tokens=~%s",
|
||||
self.session_id or "none", _pre_msg_count, len(compressed),
|
||||
f"{_compressed_est:,}",
|
||||
)
|
||||
return compressed, new_system_prompt
|
||||
|
||||
def _execute_tool_calls(self, assistant_message, messages: list, effective_task_id: str, api_call_count: int = 0) -> None:
|
||||
|
|
@ -6159,6 +6116,10 @@ class AIAgent:
|
|||
logger.error("_invoke_tool raised for %s: %s", function_name, tool_error, exc_info=True)
|
||||
duration = time.time() - start
|
||||
is_error, _ = _detect_tool_failure(function_name, result)
|
||||
if is_error:
|
||||
logger.info("tool %s failed (%.2fs): %s", function_name, duration, result[:200])
|
||||
else:
|
||||
logger.info("tool %s completed (%.2fs, %d chars)", function_name, duration, len(result))
|
||||
results[index] = (function_name, function_args, result, duration, is_error)
|
||||
|
||||
# Start spinner for CLI mode (skip when TUI handles tool progress)
|
||||
|
|
@ -6508,6 +6469,8 @@ class AIAgent:
|
|||
_is_error_result, _ = _detect_tool_failure(function_name, function_result)
|
||||
if _is_error_result:
|
||||
logger.warning("Tool %s returned error (%.2fs): %s", function_name, tool_duration, result_preview)
|
||||
else:
|
||||
logger.info("tool %s completed (%.2fs, %d chars)", function_name, tool_duration, len(function_result))
|
||||
|
||||
if self.tool_progress_callback:
|
||||
try:
|
||||
|
|
@ -6885,7 +6848,17 @@ class AIAgent:
|
|||
# They are initialized in __init__ and must persist across run_conversation
|
||||
# calls so that nudge logic accumulates correctly in CLI mode.
|
||||
self.iteration_budget = IterationBudget(self.max_iterations)
|
||||
|
||||
|
||||
# Log conversation turn start for debugging/observability
|
||||
_msg_preview = (user_message[:80] + "...") if len(user_message) > 80 else user_message
|
||||
_msg_preview = _msg_preview.replace("\n", " ")
|
||||
logger.info(
|
||||
"conversation turn: session=%s model=%s provider=%s platform=%s history=%d msg=%r",
|
||||
self.session_id or "none", self.model, self.provider or "unknown",
|
||||
self.platform or "unknown", len(conversation_history or []),
|
||||
_msg_preview,
|
||||
)
|
||||
|
||||
# Initialize conversation (copy to avoid mutating the caller's list)
|
||||
messages = list(conversation_history) if conversation_history else []
|
||||
|
||||
|
|
@ -7682,6 +7655,17 @@ class AIAgent:
|
|||
self.session_cache_write_tokens += canonical_usage.cache_write_tokens
|
||||
self.session_reasoning_tokens += canonical_usage.reasoning_tokens
|
||||
|
||||
# Log API call details for debugging/observability
|
||||
_cache_pct = ""
|
||||
if canonical_usage.cache_read_tokens and prompt_tokens:
|
||||
_cache_pct = f" cache={canonical_usage.cache_read_tokens}/{prompt_tokens} ({100*canonical_usage.cache_read_tokens/prompt_tokens:.0f}%)"
|
||||
logger.info(
|
||||
"API call #%d: model=%s provider=%s in=%d out=%d total=%d latency=%.1fs%s",
|
||||
self.session_api_calls, self.model, self.provider or "unknown",
|
||||
prompt_tokens, completion_tokens, total_tokens,
|
||||
api_duration, _cache_pct,
|
||||
)
|
||||
|
||||
cost_result = estimate_usage_cost(
|
||||
self.model,
|
||||
canonical_usage,
|
||||
|
|
|
|||
288
tests/hermes_cli/test_logs.py
Normal file
288
tests/hermes_cli/test_logs.py
Normal file
|
|
@ -0,0 +1,288 @@
|
|||
"""Tests for hermes_cli/logs.py — log viewing and filtering."""
|
||||
|
||||
import os
|
||||
import textwrap
|
||||
from datetime import datetime, timedelta
|
||||
from io import StringIO
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from hermes_cli.logs import (
|
||||
LOG_FILES,
|
||||
_extract_level,
|
||||
_matches_filters,
|
||||
_parse_line_timestamp,
|
||||
_parse_since,
|
||||
_read_last_n_lines,
|
||||
list_logs,
|
||||
tail_log,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.fixture
|
||||
def log_dir(tmp_path, monkeypatch):
|
||||
"""Create a fake HERMES_HOME with a logs/ directory."""
|
||||
home = Path(os.environ["HERMES_HOME"])
|
||||
logs = home / "logs"
|
||||
logs.mkdir(parents=True, exist_ok=True)
|
||||
return logs
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_agent_log(log_dir):
|
||||
"""Write a realistic agent.log with mixed levels and sessions."""
|
||||
lines = textwrap.dedent("""\
|
||||
2026-04-05 10:00:00,000 INFO run_agent: conversation turn: session=sess_aaa model=claude provider=openrouter platform=cli history=0 msg='hello'
|
||||
2026-04-05 10:00:01,000 INFO run_agent: tool terminal completed (0.50s, 200 chars)
|
||||
2026-04-05 10:00:02,000 INFO run_agent: API call #1: model=claude provider=openrouter in=1000 out=200 total=1200 latency=1.5s
|
||||
2026-04-05 10:00:03,000 WARNING run_agent: Tool web_search returned error (2.00s): timeout
|
||||
2026-04-05 10:00:04,000 INFO run_agent: conversation turn: session=sess_bbb model=gpt-5 provider=openai platform=telegram history=5 msg='fix bug'
|
||||
2026-04-05 10:00:05,000 ERROR run_agent: API call failed after 3 retries. rate limited
|
||||
2026-04-05 10:00:06,000 INFO run_agent: tool read_file completed (0.01s, 500 chars)
|
||||
2026-04-05 10:00:07,000 DEBUG run_agent: verbose internal detail
|
||||
2026-04-05 10:00:08,000 INFO credential_pool: credential pool: marking key-1 exhausted (status=429), rotating
|
||||
2026-04-05 10:00:09,000 INFO credential_pool: credential pool: rotated to key-2
|
||||
""")
|
||||
path = log_dir / "agent.log"
|
||||
path.write_text(lines)
|
||||
return path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_errors_log(log_dir):
|
||||
"""Write a small errors.log."""
|
||||
lines = textwrap.dedent("""\
|
||||
2026-04-05 10:00:03,000 WARNING run_agent: Tool web_search returned error (2.00s): timeout
|
||||
2026-04-05 10:00:05,000 ERROR run_agent: API call failed after 3 retries. rate limited
|
||||
""")
|
||||
path = log_dir / "errors.log"
|
||||
path.write_text(lines)
|
||||
return path
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _parse_since
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestParseSince:
|
||||
def test_hours(self):
|
||||
cutoff = _parse_since("2h")
|
||||
assert cutoff is not None
|
||||
assert (datetime.now() - cutoff).total_seconds() == pytest.approx(7200, abs=5)
|
||||
|
||||
def test_minutes(self):
|
||||
cutoff = _parse_since("30m")
|
||||
assert cutoff is not None
|
||||
assert (datetime.now() - cutoff).total_seconds() == pytest.approx(1800, abs=5)
|
||||
|
||||
def test_days(self):
|
||||
cutoff = _parse_since("1d")
|
||||
assert cutoff is not None
|
||||
assert (datetime.now() - cutoff).total_seconds() == pytest.approx(86400, abs=5)
|
||||
|
||||
def test_seconds(self):
|
||||
cutoff = _parse_since("60s")
|
||||
assert cutoff is not None
|
||||
assert (datetime.now() - cutoff).total_seconds() == pytest.approx(60, abs=5)
|
||||
|
||||
def test_invalid_returns_none(self):
|
||||
assert _parse_since("abc") is None
|
||||
assert _parse_since("") is None
|
||||
assert _parse_since("10x") is None
|
||||
|
||||
def test_whitespace_handling(self):
|
||||
cutoff = _parse_since(" 1h ")
|
||||
assert cutoff is not None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _parse_line_timestamp
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestParseLineTimestamp:
|
||||
def test_standard_format(self):
|
||||
ts = _parse_line_timestamp("2026-04-05 10:00:00,123 INFO something")
|
||||
assert ts is not None
|
||||
assert ts.year == 2026
|
||||
assert ts.hour == 10
|
||||
|
||||
def test_no_timestamp(self):
|
||||
assert _parse_line_timestamp("just some text") is None
|
||||
|
||||
def test_continuation_line(self):
|
||||
assert _parse_line_timestamp(" at module.function (line 42)") is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _extract_level
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestExtractLevel:
|
||||
def test_info(self):
|
||||
assert _extract_level("2026-04-05 10:00:00 INFO run_agent: something") == "INFO"
|
||||
|
||||
def test_warning(self):
|
||||
assert _extract_level("2026-04-05 10:00:00 WARNING run_agent: bad") == "WARNING"
|
||||
|
||||
def test_error(self):
|
||||
assert _extract_level("2026-04-05 10:00:00 ERROR run_agent: crash") == "ERROR"
|
||||
|
||||
def test_debug(self):
|
||||
assert _extract_level("2026-04-05 10:00:00 DEBUG run_agent: detail") == "DEBUG"
|
||||
|
||||
def test_no_level(self):
|
||||
assert _extract_level("just a plain line") is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _matches_filters
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestMatchesFilters:
|
||||
def test_no_filters_always_matches(self):
|
||||
assert _matches_filters("any line") is True
|
||||
|
||||
def test_level_filter_passes(self):
|
||||
assert _matches_filters(
|
||||
"2026-04-05 10:00:00 WARNING something",
|
||||
min_level="WARNING",
|
||||
) is True
|
||||
|
||||
def test_level_filter_rejects(self):
|
||||
assert _matches_filters(
|
||||
"2026-04-05 10:00:00 INFO something",
|
||||
min_level="WARNING",
|
||||
) is False
|
||||
|
||||
def test_session_filter_passes(self):
|
||||
assert _matches_filters(
|
||||
"session=sess_aaa model=claude",
|
||||
session_filter="sess_aaa",
|
||||
) is True
|
||||
|
||||
def test_session_filter_rejects(self):
|
||||
assert _matches_filters(
|
||||
"session=sess_aaa model=claude",
|
||||
session_filter="sess_bbb",
|
||||
) is False
|
||||
|
||||
def test_since_filter_passes(self):
|
||||
# Line from the future should always pass
|
||||
assert _matches_filters(
|
||||
"2099-01-01 00:00:00 INFO future",
|
||||
since=datetime.now(),
|
||||
) is True
|
||||
|
||||
def test_since_filter_rejects(self):
|
||||
assert _matches_filters(
|
||||
"2020-01-01 00:00:00 INFO past",
|
||||
since=datetime.now(),
|
||||
) is False
|
||||
|
||||
def test_combined_filters(self):
|
||||
line = "2099-01-01 00:00:00 WARNING run_agent: session=abc error"
|
||||
assert _matches_filters(
|
||||
line, min_level="WARNING", session_filter="abc",
|
||||
since=datetime.now(),
|
||||
) is True
|
||||
# Fails session filter
|
||||
assert _matches_filters(
|
||||
line, min_level="WARNING", session_filter="xyz",
|
||||
) is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _read_last_n_lines
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestReadLastNLines:
|
||||
def test_reads_correct_count(self, sample_agent_log):
|
||||
lines = _read_last_n_lines(sample_agent_log, 3)
|
||||
assert len(lines) == 3
|
||||
|
||||
def test_reads_all_when_fewer(self, sample_agent_log):
|
||||
lines = _read_last_n_lines(sample_agent_log, 100)
|
||||
assert len(lines) == 10 # sample has 10 lines
|
||||
|
||||
def test_empty_file(self, log_dir):
|
||||
empty = log_dir / "empty.log"
|
||||
empty.write_text("")
|
||||
lines = _read_last_n_lines(empty, 10)
|
||||
assert lines == []
|
||||
|
||||
def test_last_line_content(self, sample_agent_log):
|
||||
lines = _read_last_n_lines(sample_agent_log, 1)
|
||||
assert "rotated to key-2" in lines[0]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# tail_log
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestTailLog:
|
||||
def test_basic_tail(self, sample_agent_log, capsys):
|
||||
tail_log("agent", num_lines=3)
|
||||
captured = capsys.readouterr()
|
||||
assert "agent.log" in captured.out
|
||||
# Should have the header + 3 lines
|
||||
lines = captured.out.strip().split("\n")
|
||||
assert len(lines) == 4 # 1 header + 3 content
|
||||
|
||||
def test_level_filter(self, sample_agent_log, capsys):
|
||||
tail_log("agent", num_lines=50, level="ERROR")
|
||||
captured = capsys.readouterr()
|
||||
assert "level>=ERROR" in captured.out
|
||||
# Only the ERROR line should appear
|
||||
content_lines = [l for l in captured.out.strip().split("\n") if not l.startswith("---")]
|
||||
assert len(content_lines) == 1
|
||||
assert "API call failed" in content_lines[0]
|
||||
|
||||
def test_session_filter(self, sample_agent_log, capsys):
|
||||
tail_log("agent", num_lines=50, session="sess_bbb")
|
||||
captured = capsys.readouterr()
|
||||
content_lines = [l for l in captured.out.strip().split("\n") if not l.startswith("---")]
|
||||
assert len(content_lines) == 1
|
||||
assert "sess_bbb" in content_lines[0]
|
||||
|
||||
def test_errors_log(self, sample_errors_log, capsys):
|
||||
tail_log("errors", num_lines=10)
|
||||
captured = capsys.readouterr()
|
||||
assert "errors.log" in captured.out
|
||||
assert "WARNING" in captured.out or "ERROR" in captured.out
|
||||
|
||||
def test_unknown_log_exits(self):
|
||||
with pytest.raises(SystemExit):
|
||||
tail_log("nonexistent")
|
||||
|
||||
def test_missing_file_exits(self, log_dir):
|
||||
with pytest.raises(SystemExit):
|
||||
tail_log("agent") # agent.log doesn't exist in clean log_dir
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# list_logs
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestListLogs:
|
||||
def test_lists_files(self, sample_agent_log, sample_errors_log, capsys):
|
||||
list_logs()
|
||||
captured = capsys.readouterr()
|
||||
assert "agent.log" in captured.out
|
||||
assert "errors.log" in captured.out
|
||||
|
||||
def test_empty_dir(self, log_dir, capsys):
|
||||
list_logs()
|
||||
captured = capsys.readouterr()
|
||||
assert "no log files yet" in captured.out
|
||||
|
||||
def test_shows_sizes(self, sample_agent_log, capsys):
|
||||
list_logs()
|
||||
captured = capsys.readouterr()
|
||||
# File is small, should show as bytes or KB
|
||||
assert "B" in captured.out or "KB" in captured.out
|
||||
314
tests/test_hermes_logging.py
Normal file
314
tests/test_hermes_logging.py
Normal file
|
|
@ -0,0 +1,314 @@
|
|||
"""Tests for hermes_logging — centralized logging setup."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
import hermes_logging
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _reset_logging_state():
|
||||
"""Reset the module-level sentinel and clean up root logger handlers
|
||||
added by setup_logging() so tests don't leak state."""
|
||||
hermes_logging._logging_initialized = False
|
||||
root = logging.getLogger()
|
||||
original_handlers = list(root.handlers)
|
||||
yield
|
||||
# Restore — remove any handlers added during the test.
|
||||
for h in list(root.handlers):
|
||||
if h not in original_handlers:
|
||||
root.removeHandler(h)
|
||||
h.close()
|
||||
hermes_logging._logging_initialized = False
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def hermes_home(tmp_path, monkeypatch):
|
||||
"""Provide an isolated HERMES_HOME for logging tests.
|
||||
|
||||
Uses the same tmp_path as the autouse _isolate_hermes_home from conftest,
|
||||
reading it back from the env var to avoid double-mkdir conflicts.
|
||||
"""
|
||||
home = Path(os.environ["HERMES_HOME"])
|
||||
return home
|
||||
|
||||
|
||||
class TestSetupLogging:
|
||||
"""setup_logging() creates agent.log + errors.log with RotatingFileHandler."""
|
||||
|
||||
def test_creates_log_directory(self, hermes_home):
|
||||
log_dir = hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
assert log_dir == hermes_home / "logs"
|
||||
assert log_dir.is_dir()
|
||||
|
||||
def test_creates_agent_log_handler(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
root = logging.getLogger()
|
||||
|
||||
agent_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
and "agent.log" in getattr(h, "baseFilename", "")
|
||||
]
|
||||
assert len(agent_handlers) == 1
|
||||
assert agent_handlers[0].level == logging.INFO
|
||||
|
||||
def test_creates_errors_log_handler(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
root = logging.getLogger()
|
||||
|
||||
error_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
and "errors.log" in getattr(h, "baseFilename", "")
|
||||
]
|
||||
assert len(error_handlers) == 1
|
||||
assert error_handlers[0].level == logging.WARNING
|
||||
|
||||
def test_idempotent_no_duplicate_handlers(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home) # second call — should be no-op
|
||||
|
||||
root = logging.getLogger()
|
||||
agent_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
and "agent.log" in getattr(h, "baseFilename", "")
|
||||
]
|
||||
assert len(agent_handlers) == 1
|
||||
|
||||
def test_force_reinitializes(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
# Force still won't add duplicate handlers because _add_rotating_handler
|
||||
# checks by resolved path.
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home, force=True)
|
||||
|
||||
root = logging.getLogger()
|
||||
agent_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
and "agent.log" in getattr(h, "baseFilename", "")
|
||||
]
|
||||
assert len(agent_handlers) == 1
|
||||
|
||||
def test_custom_log_level(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home, log_level="DEBUG")
|
||||
|
||||
root = logging.getLogger()
|
||||
agent_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
and "agent.log" in getattr(h, "baseFilename", "")
|
||||
]
|
||||
assert agent_handlers[0].level == logging.DEBUG
|
||||
|
||||
def test_custom_max_size_and_backup(self, hermes_home):
|
||||
hermes_logging.setup_logging(
|
||||
hermes_home=hermes_home, max_size_mb=10, backup_count=5
|
||||
)
|
||||
|
||||
root = logging.getLogger()
|
||||
agent_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
and "agent.log" in getattr(h, "baseFilename", "")
|
||||
]
|
||||
assert agent_handlers[0].maxBytes == 10 * 1024 * 1024
|
||||
assert agent_handlers[0].backupCount == 5
|
||||
|
||||
def test_suppresses_noisy_loggers(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
|
||||
assert logging.getLogger("openai").level >= logging.WARNING
|
||||
assert logging.getLogger("httpx").level >= logging.WARNING
|
||||
assert logging.getLogger("httpcore").level >= logging.WARNING
|
||||
|
||||
def test_writes_to_agent_log(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
|
||||
test_logger = logging.getLogger("test_hermes_logging.write_test")
|
||||
test_logger.info("test message for agent.log")
|
||||
|
||||
# Flush handlers
|
||||
for h in logging.getLogger().handlers:
|
||||
h.flush()
|
||||
|
||||
agent_log = hermes_home / "logs" / "agent.log"
|
||||
assert agent_log.exists()
|
||||
content = agent_log.read_text()
|
||||
assert "test message for agent.log" in content
|
||||
|
||||
def test_warnings_appear_in_both_logs(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
|
||||
test_logger = logging.getLogger("test_hermes_logging.warning_test")
|
||||
test_logger.warning("this is a warning")
|
||||
|
||||
for h in logging.getLogger().handlers:
|
||||
h.flush()
|
||||
|
||||
agent_log = hermes_home / "logs" / "agent.log"
|
||||
errors_log = hermes_home / "logs" / "errors.log"
|
||||
assert "this is a warning" in agent_log.read_text()
|
||||
assert "this is a warning" in errors_log.read_text()
|
||||
|
||||
def test_info_not_in_errors_log(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
|
||||
test_logger = logging.getLogger("test_hermes_logging.info_test")
|
||||
test_logger.info("info only message")
|
||||
|
||||
for h in logging.getLogger().handlers:
|
||||
h.flush()
|
||||
|
||||
errors_log = hermes_home / "logs" / "errors.log"
|
||||
if errors_log.exists():
|
||||
assert "info only message" not in errors_log.read_text()
|
||||
|
||||
def test_reads_config_yaml(self, hermes_home):
|
||||
"""setup_logging reads logging.level from config.yaml."""
|
||||
import yaml
|
||||
config = {"logging": {"level": "DEBUG", "max_size_mb": 2, "backup_count": 1}}
|
||||
(hermes_home / "config.yaml").write_text(yaml.dump(config))
|
||||
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
|
||||
root = logging.getLogger()
|
||||
agent_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
and "agent.log" in getattr(h, "baseFilename", "")
|
||||
]
|
||||
assert agent_handlers[0].level == logging.DEBUG
|
||||
assert agent_handlers[0].maxBytes == 2 * 1024 * 1024
|
||||
assert agent_handlers[0].backupCount == 1
|
||||
|
||||
def test_explicit_params_override_config(self, hermes_home):
|
||||
"""Explicit function params take precedence over config.yaml."""
|
||||
import yaml
|
||||
config = {"logging": {"level": "DEBUG"}}
|
||||
(hermes_home / "config.yaml").write_text(yaml.dump(config))
|
||||
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home, log_level="WARNING")
|
||||
|
||||
root = logging.getLogger()
|
||||
agent_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
and "agent.log" in getattr(h, "baseFilename", "")
|
||||
]
|
||||
assert agent_handlers[0].level == logging.WARNING
|
||||
|
||||
|
||||
class TestSetupVerboseLogging:
|
||||
"""setup_verbose_logging() adds a DEBUG-level console handler."""
|
||||
|
||||
def test_adds_stream_handler(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
hermes_logging.setup_verbose_logging()
|
||||
|
||||
root = logging.getLogger()
|
||||
verbose_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, logging.StreamHandler)
|
||||
and not isinstance(h, RotatingFileHandler)
|
||||
and getattr(h, "_hermes_verbose", False)
|
||||
]
|
||||
assert len(verbose_handlers) == 1
|
||||
assert verbose_handlers[0].level == logging.DEBUG
|
||||
|
||||
def test_idempotent(self, hermes_home):
|
||||
hermes_logging.setup_logging(hermes_home=hermes_home)
|
||||
hermes_logging.setup_verbose_logging()
|
||||
hermes_logging.setup_verbose_logging() # second call
|
||||
|
||||
root = logging.getLogger()
|
||||
verbose_handlers = [
|
||||
h for h in root.handlers
|
||||
if isinstance(h, logging.StreamHandler)
|
||||
and not isinstance(h, RotatingFileHandler)
|
||||
and getattr(h, "_hermes_verbose", False)
|
||||
]
|
||||
assert len(verbose_handlers) == 1
|
||||
|
||||
|
||||
class TestAddRotatingHandler:
|
||||
"""_add_rotating_handler() is idempotent and creates the directory."""
|
||||
|
||||
def test_creates_directory(self, tmp_path):
|
||||
log_path = tmp_path / "subdir" / "test.log"
|
||||
logger = logging.getLogger("_test_rotating")
|
||||
formatter = logging.Formatter("%(message)s")
|
||||
|
||||
hermes_logging._add_rotating_handler(
|
||||
logger, log_path,
|
||||
level=logging.INFO, max_bytes=1024, backup_count=1,
|
||||
formatter=formatter,
|
||||
)
|
||||
|
||||
assert log_path.parent.is_dir()
|
||||
# Clean up
|
||||
for h in list(logger.handlers):
|
||||
if isinstance(h, RotatingFileHandler):
|
||||
logger.removeHandler(h)
|
||||
h.close()
|
||||
|
||||
def test_no_duplicate_for_same_path(self, tmp_path):
|
||||
log_path = tmp_path / "test.log"
|
||||
logger = logging.getLogger("_test_rotating_dup")
|
||||
formatter = logging.Formatter("%(message)s")
|
||||
|
||||
hermes_logging._add_rotating_handler(
|
||||
logger, log_path,
|
||||
level=logging.INFO, max_bytes=1024, backup_count=1,
|
||||
formatter=formatter,
|
||||
)
|
||||
hermes_logging._add_rotating_handler(
|
||||
logger, log_path,
|
||||
level=logging.INFO, max_bytes=1024, backup_count=1,
|
||||
formatter=formatter,
|
||||
)
|
||||
|
||||
rotating_handlers = [
|
||||
h for h in logger.handlers
|
||||
if isinstance(h, RotatingFileHandler)
|
||||
]
|
||||
assert len(rotating_handlers) == 1
|
||||
# Clean up
|
||||
for h in list(logger.handlers):
|
||||
if isinstance(h, RotatingFileHandler):
|
||||
logger.removeHandler(h)
|
||||
h.close()
|
||||
|
||||
|
||||
class TestReadLoggingConfig:
|
||||
"""_read_logging_config() reads from config.yaml."""
|
||||
|
||||
def test_returns_none_when_no_config(self, hermes_home):
|
||||
level, max_size, backup = hermes_logging._read_logging_config()
|
||||
assert level is None
|
||||
assert max_size is None
|
||||
assert backup is None
|
||||
|
||||
def test_reads_logging_section(self, hermes_home):
|
||||
import yaml
|
||||
config = {"logging": {"level": "DEBUG", "max_size_mb": 10, "backup_count": 5}}
|
||||
(hermes_home / "config.yaml").write_text(yaml.dump(config))
|
||||
|
||||
level, max_size, backup = hermes_logging._read_logging_config()
|
||||
assert level == "DEBUG"
|
||||
assert max_size == 10
|
||||
assert backup == 5
|
||||
|
||||
def test_handles_missing_logging_section(self, hermes_home):
|
||||
import yaml
|
||||
config = {"model": "test"}
|
||||
(hermes_home / "config.yaml").write_text(yaml.dump(config))
|
||||
|
||||
level, max_size, backup = hermes_logging._read_logging_config()
|
||||
assert level is None
|
||||
|
|
@ -29,6 +29,8 @@ import uuid
|
|||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
@ -43,12 +45,12 @@ class DebugSession:
|
|||
self.tool_name = tool_name
|
||||
self.enabled = os.getenv(env_var, "false").lower() == "true"
|
||||
self.session_id = str(uuid.uuid4()) if self.enabled else ""
|
||||
self.log_dir = Path("./logs")
|
||||
self.log_dir = get_hermes_home() / "logs"
|
||||
self._calls: list[Dict[str, Any]] = []
|
||||
self._start_time = datetime.datetime.now().isoformat() if self.enabled else ""
|
||||
|
||||
if self.enabled:
|
||||
self.log_dir.mkdir(exist_ok=True)
|
||||
self.log_dir.mkdir(parents=True, exist_ok=True)
|
||||
logger.debug("%s debug mode enabled - Session ID: %s",
|
||||
tool_name, self.session_id)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue